Ignore:
Timestamp:
2003-06-16T10:47:06+12:00 (21 years ago)
Author:
jmt12
Message:

Sunday's work

Location:
trunk/gli/src/org/greenstone/gatherer/file
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/gli/src/org/greenstone/gatherer/file/FileManager.java

    r4383 r4675  
    4646import org.greenstone.gatherer.util.DragComponent;
    4747import org.greenstone.gatherer.util.SynchronizedTreeModelTools;
    48 /** Manages the moving of files within a separate thread. 
     48/** Manages the moving of files within a separate thread.
    4949 * @author John Thompson, Greenstone Digital Library, University of Waikato
    5050 * @version 2.3
     
    7070    // If source and target are different
    7171    else {
    72                 // If target is the UndoManager, we're deleting
     72        // If target is the UndoManager, we're deleting
    7373        if(target instanceof UndoManager) {
    7474        // If the source is the workspace then display an error message. Workspace is read only.
     
    141141    public void run() {
    142142        // Reset, and calculate progress bar size.
    143         queue.calculateSize(source_nodes);
     143        boolean cancelled = queue.calculateSize(source_nodes);
    144144        // Now we queue the job(s). Note that this may fail if a read only file is encountered and we have been asked to delete.
    145         for(int i = 0; source_nodes != null && i < source_nodes.length; i++) {
     145        for(int i = 0; !cancelled && source_nodes != null && i < source_nodes.length; i++) {
    146146        queue.addJob(id, source, source_nodes[i], target, target_node, type, true, true, true);
    147147        }
  • trunk/gli/src/org/greenstone/gatherer/file/FileQueue.java

    r4615 r4675  
    140140    }
    141141
    142     public void calculateSize(FileNode[] files) {
     142    /** Calculates the total deep file size of the selected file nodes.
     143    * @param files a FileNode[] of selected files
     144    * @return true if a cancel was signalled, false otherwise
     145    * @see org.greenstone.gatherer.file.FileManager.Task#run()
     146    */
     147    public boolean calculateSize(FileNode[] files) {
    143148    progress.reset();
    144149    progress.setString(get("FileActions.Calculating_Size"));
    145150    progress.setIndeterminate(true);
    146151    Vector remaining = new Vector();
    147     for(int i = 0; i < files.length; i++) {
     152    for(int i = 0; !cancel_action && i < files.length; i++) {
    148153        remaining.add(files[i]);
    149154    }
    150     while(remaining.size() > 0) {
     155    while(!cancel_action && remaining.size() > 0) {
    151156        FileNode node = (FileNode)remaining.remove(0);
    152157        if(node.isLeaf()) {
     
    154159        }
    155160        else {
    156         for(int i = 0; i < node.getChildCount(); i++) {
     161        for(int i = 0; !cancel_action && i < node.getChildCount(); i++) {
    157162            remaining.add(node.getChildAt(i));
    158163        }
     
    161166    progress.setString(get("No_Activity"));
    162167    progress.setIndeterminate(false);
     168    // Now we return if calculation was cancelled so that the FileManagers Task can skip the addJob phase correctly.
     169    if(cancel_action) {
     170        cancel_action = false; // reset
     171        return true;
     172    }
     173    else {
     174        return false;
     175    }
    163176    }
    164177
     
    171184    return file_status;
    172185    }
    173     /** Access to the job state label. */   
     186    /** Access to the job state label. */
    174187//      public JLabel getJobStatus() {
    175188//      return job_status;
     
    203216            ///ystem.err.println("Found job: " + job);
    204217            // The user can cancel this individual action at several places, so keep track if the state is 'ready' for the next step.
    205             boolean ready = true; 
     218            boolean ready = true;
    206219            FileNode origin_node = job.getOrigin();
    207220            FileNode destination_node = job.getDestination();
     
    394407                source_model.refresh(new TreePath(((FileNode)origin_node.getParent()).getPath()));
    395408                }
    396                                      
     409
    397410                // We can't have been cancelled, and we must have created a new FileNode during the above phase, before we can handle metadata.
    398411                if(!cancel_action && new_node != null) {
     
    403416                    GDMManager gdm = Gatherer.c_man.getCollection().gdm;
    404417                    // we just retrieve the metadata attached to the origin node...
    405                     ArrayList existing_metadata = gdm.getMetadata(source_file);
    406                     ///ystem.err.println("Existing metadata for " + origin_node + ": " + gdm.toString(existing_metadata));
     418                    ArrayList existing_metadata = gdm.getMetadataOnly(source_file);
     419                    //Gatherer.println("Existing metadata for " + origin_node + ": " + gdm.toString(existing_metadata));
    407420                    // then assign this remainder to the new folder.
    408421                    ///ystem.err.println("New metadata: " + gdm.toString(existing_metadata));
     
    533546                // We add an entry to the complete mappings to ensure this directory isn't added again
    534547                completed_folder_mappings.put(origin_node, recycle_folder_record);
    535                 ///ystem.err.println("Added completed directories mapping " + origin_node); 
    536                 // queue all of its children, (both filtered and non-filtered), but for deleting only. Don't queue jobs for a current move event, as they would be queued as part of copying. I have no idea way, per sec, however the children within the origin node are always invalid during deletion (there are several copies of some nodes?!?). I'll check that each child is only added once. 
     548                ///ystem.err.println("Added completed directories mapping " + origin_node);
     549                // queue all of its children, (both filtered and non-filtered), but for deleting only. Don't queue jobs for a current move event, as they would be queued as part of copying. I have no idea way, per sec, however the children within the origin node are always invalid during deletion (there are several copies of some nodes?!?). I'll check that each child is only added once.
    537550                ///ystem.err.println("Directory has " + origin_node.getChildCount() + " children.");
    538551                ///ystem.err.println("Directory actually has " + child_list.length + " children.");
     
    586599            progress.reset();
    587600            progress.setString(get("No_Activity"));
    588             yes_to_all = false;         
     601            yes_to_all = false;
    589602            completed_folder_mappings.clear();
    590603            recycle_folder_mappings.clear();
     
    599612        }
    600613        catch (Exception error) {
    601         Gatherer.printStackTrace(error); 
     614        Gatherer.printStackTrace(error);
    602615        }
    603616    }
     
    655668      * @see org.greenstone.gatherer.Gatherer
    656669      */
    657     public void copyFile(File source, File destination, LongProgressBar progress) 
     670    public void copyFile(File source, File destination, LongProgressBar progress)
    658671    throws FileAlreadyExistsException, FileNotFoundException, InsufficientSpaceException, IOException, UnknownFileErrorException {
    659672    if(source.isDirectory()) {
Note: See TracChangeset for help on using the changeset viewer.