/** *######################################################################### * * A component of the Gatherer application, part of the Greenstone digital * library suite from the New Zealand Digital Library Project at the * University of Waikato, New Zealand. * * Author: John Thompson, Greenstone Digital Library, University of Waikato * * Copyright (C) 1999 New Zealand Digital Library Project * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *######################################################################## */ package org.greenstone.gatherer.file; import java.io.*; import java.util.*; import javax.swing.*; import javax.swing.event.*; import javax.swing.tree.*; import org.greenstone.gatherer.Configuration; import org.greenstone.gatherer.Dictionary; import org.greenstone.gatherer.Gatherer; import org.greenstone.gatherer.file.FileJob; import org.greenstone.gatherer.file.FileNode; import org.greenstone.gatherer.gui.LongProgressBar; import org.greenstone.gatherer.gui.tree.DragTree; import org.greenstone.gatherer.undo.UndoManager; import org.greenstone.gatherer.util.ArrayTools; import org.greenstone.gatherer.util.DragComponent; import org.greenstone.gatherer.util.SynchronizedTreeModelTools; import org.greenstone.gatherer.util.Utility; /** A threaded object which processes a queue of file actions such as copying and movement. It also handles updating the various trees involved so they are an accurate representation of the file system they are meant to match. * @author John Thompson, Greenstone Digital Library, University of Waikato * @version 2.3 */ public class FileQueue extends Thread implements TreeSelectionListener { /** When someone requests the movement queue to be dumped this cancel flag is set to true. */ private boolean cancel_action = false; /** A temporary mapping from currently existing FileNode folder to their equivelent FileNode folder within the undo managers tree. */ private HashMap completed_folder_mappings = new HashMap(); /** The button which controls the stopping of the file queue. */ private JButton stop_button = null; /** true to cause this file queue to return from run() as soon as there are no jobs left on the queue. Useful for undo jobs which must occur before a specific action. */ private boolean return_immediately = false; /** We are only allowed to wait under specific circumstances. */ /* private boolean wait_allowed = true; */ /** true if the user has selected yes to all from a file 'clash' dialog. */ private boolean yes_to_all = false; /** A temporary mapping from currently existing FileNodes to the potential FileNode folder within the undo managers tree. */ private HashMap recycle_folder_mappings = new HashMap(); /** A label explaining the current moving files status. */ private JLabel file_status = null; /** A list containing a queue of waiting movement jobs. */ private ArrayList queue; /** A progress bar which shows how many bytes, out of the total size of bytes, has been moved. */ private LongProgressBar progress = null; /** The last piece of text shown on the file status label, just incase we are displaying a very temporary message. */ private String previous = null; /** Constructor. * @param return_immediately true to cause this file queue to return from run() as soon as there are no jobs left on the queue. * @see org.greenstone.gatherer.Configuration * @see org.greenstone.gatherer.gui.Coloring * @see org.greenstone.gatherer.gui.LongProgressBar */ public FileQueue(boolean return_immediately) { this.return_immediately = return_immediately; this.queue = new ArrayList(); String args[] = new String[2]; args[0] = "0"; args[1] = "0"; file_status = new JLabel(); Dictionary.setText(file_status, "FileActions.Selected", args); progress = new LongProgressBar(); progress.setBackground(Gatherer.config.getColor("coloring.collection_tree_background", false)); progress.setForeground(Gatherer.config.getColor("coloring.collection_tree_foreground", false)); progress.setString(Dictionary.get("FileActions.No_Activity")); progress.setStringPainted(true); args = null; } /** Requeue an existing job into the queue. * @param job A previously created FileJob. */ synchronized public void addJob(FileJob job, int position) { job.done = true; // Ensure that the requeued job is marked as done. queue.add(position, job); notify(); } /** Add a new job to the queue, specifiying as many arguments as is necessary to complete this type of job (ie delete needs no target information). * @param id A long id unique to all jobs created by a single action. * @param source The DragComponent source of this file, most likely a DragTree. * @param child The FileNode you wish to mode. * @param target The DragComponent to move the file to, again most likely a DragTree. * @param parent The files new FileNode parent within the target. * @param type The type of this movement as an int, either COPY or DELETE. * @param undo true if this job should generate undo jobs, false for redo ones. * @param undoable true if this job can generate undo or redo jobs at all, false otherwise. */ public void addJob(long id, DragComponent source, FileNode child, DragComponent target, FileNode parent, byte type, boolean undo, boolean undoable, boolean folder_level) { addJob(id, source, child, target, parent, type, undo, undoable, folder_level, -1); } synchronized public void addJob(long id, DragComponent source, FileNode child, DragComponent target, FileNode parent, byte type, boolean undo, boolean undoable, boolean folder_level, int position) { FileJob job = new FileJob(id, source, child, target, parent, type, undo, undoable); job.folder_level = folder_level; Gatherer.println("Adding job: " + job); if(position != -1 && position <= queue.size() + 1) { queue.add(position, job); } else { queue.add(job); } notify(); } /** Calculates the total deep file size of the selected file nodes. * @param files a FileNode[] of selected files * @return true if a cancel was signalled, false otherwise * @see org.greenstone.gatherer.file.FileManager.Task#run() */ public boolean calculateSize(FileNode[] files) { file_status.setText(Dictionary.get("FileActions.Calculating_Size")); progress.setString(Dictionary.get("FileActions.Calculating_Size")); // Calculate the total file size of all the selected file nodes Vector remaining = new Vector(); for (int i = 0; !cancel_action && i < files.length; i++) { remaining.add(files[i]); } while (!cancel_action && remaining.size() > 0) { FileNode node = (FileNode) remaining.remove(0); if (node.isLeaf()) { progress.addMaximum(node.getFile().length()); } else { for (int i = 0; !cancel_action && i < node.getChildCount(); i++) { remaining.add(node.getChildAt(i)); } } } // Now we return if calculation was cancelled so that the FileManagers Task can skip the addJob phase correctly. if (cancel_action) { cancel_action = false; // reset return true; } else { return false; } } /** This method is called to cancel the job queue at the next available moment. */ public void cancelAction() { cancel_action = true; } /** Access to the file state label. */ public JLabel getFileStatus() { return file_status; } /** Access to the progress bar. */ public LongProgressBar getProgressBar() { return progress; } /** Prevent the progress bar updating momentarily, while its size is re-adjusted. */ public void pause() { progress.setIndeterminate(true); } /** The run method exists in every thread, and here it is used to work its way through the queue of Jobs. If no jobs are waiting and it cans, it waits until a job arrives. If a job is present then it is either COPIED or DELETED, with the records being copied or removed as necessary, and directories being recursed through. Finally the user can press cancel to cause the loop to prematurely dump the job queue then wait. * @see org.greenstone.gatherer.Gatherer * @see org.greenstone.gatherer.collection.CollectionManager * @see org.greenstone.gatherer.file.FileJob * @see org.greenstone.gatherer.file.FileNode * @see org.greenstone.gatherer.gui.LongProgressBar * @see org.greenstone.gatherer.undo.UndoManager * @see org.greenstone.gatherer.util.Utility */ public void run() { super.setName("FileQueue"); while (!Gatherer.self.exit) { try { // Retrieve the next job int position = queue.size() - 1; FileJob job = removeJob(position); if (job != null) { ///ystem.err.println("Found job: " + job); // Enabled stop button stop_button.setEnabled(true); // The user can cancel this individual action at several places, so keep track if the state is 'ready' for the next step. boolean ready = true; FileNode origin_node = job.getOrigin(); FileNode destination_node = job.getDestination(); FileSystemModel source_model = (FileSystemModel)job.source.getTreeModel(); FileSystemModel target_model = (FileSystemModel)job.target.getTreeModel(); if(destination_node == null) { // Retrieve the root node of the target model instead. A delete, or course, has no target file so all deleted files are added to the root of the Recycle Bin model. destination_node = (FileNode) target_model.getRoot(); } // Extract common job details. File source_file = origin_node.getFile(); File target_file = null; // Determine the target file for a copy or move. if(job.type == FileJob.COPY || job.type == FileJob.MOVE) { //target_file = new File(destination_node.getFile(), source_file.getName()); // use the name of the filenode instead of the name of the file - these should be the same except for the collection directories where we want the collection name to be used, not 'import' which is the underlying name target_file = new File(destination_node.getFile(), origin_node.toString()); } // To copy a file, copy it then add any metadata found at the source. If this file was already in our collection then we must ensure the lastest version of its metadata.xml has been saved to disk. To copy a directory simply create the directory at the destination, then add all of its children files as new jobs. if((job.type == FileJob.COPY || job.type == FileJob.MOVE) && !job.done) { ///ystem.err.println("Copy/Move: " + origin_node); // The number one thing to check is whether we are in a cyclic loop. The easiest way is just to check how deep we are int max_folder_depth = Gatherer.config.getInt("general.max_folder_depth", Configuration.COLLECTION_SPECIFIC); boolean continue_over_depth = false; if(FileManager.countFolderDepth(source_file) > max_folder_depth) { Object[] options = { Dictionary.get("General.Yes"), Dictionary.get("General.No"), Dictionary.get("FileActions.Increase_Depth") }; String args[] = { String.valueOf(max_folder_depth), source_file.getAbsolutePath() }; int result = JOptionPane.showOptionDialog(Gatherer.g_man, Utility.formatHTMLWidth(Dictionary.get("FileActions.Possible_Cyclic_Path", args), 80), Dictionary.get("General.Warning"), JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, options[1]); args = null; options = null; switch(result) { case 0: // Yes continue_over_depth = true; break; case 2: // Continue and increase depth continue_over_depth = true; Gatherer.config.setInt("general.max_folder_depth", Configuration.COLLECTION_SPECIFIC, (max_folder_depth + 1)); break; } } else { continue_over_depth = true; } if(continue_over_depth) { FileNode new_node = null; // Check if file exists, and action as necessary. Be aware the user can choose to cancel the action all together (where upon ready becomes false). if(target_file.exists()) { // We've previously been told if(yes_to_all) { // Remove the old file and tree entry. target_file.delete(); ready = true; } else { ///atherer.println("Opps! This filename already exists. Give the user some options."); Object[] options = { Dictionary.get("General.Yes"), Dictionary.get("FileActions.Yes_To_All"), Dictionary.get("General.No"), Dictionary.get("General.Cancel") }; int result = JOptionPane.showOptionDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Exists", target_file.getName()), Dictionary.get("General.Warning"), JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, options[0]); switch(result) { case 1: // Yes To All yes_to_all = true; case 0: // Yes // Remove the old file and tree entry. if(destination_node != null) { TreePath destination_path = new TreePath(destination_node.getPath()); FileNode temp_target_node = new FileNode(target_file, target_model, true); TreePath target_path = destination_path.pathByAddingChild(temp_target_node); SynchronizedTreeModelTools.removeNodeFromParent(target_model, target_model.getNode(target_path)); target_path = null; temp_target_node = null; destination_path = null; } target_file.delete(); ready = true; break; case 3: // No To All cancel_action = true; case 2: // No default: ready = false; // Increment progress by size of potentially copied file progress.addValue(origin_node.getFile().length()); } } } // We proceed with the copy/move if the ready flag is still set. If it is that means there is no longer any existing file of the same name. if(ready) { // update status area String args[] = new String[1]; args[0] = "" + (queue.size() + 1) + ""; if(job.type == FileJob.COPY) { args[0] = Utility.formatPath("FileActions.Copying", source_file.getAbsolutePath(), file_status.getSize().width); file_status.setText(Dictionary.get("FileActions.Copying", args)); } else { args[0] = Utility.formatPath("FileActions.Moving", source_file.getAbsolutePath(), file_status.getSize().width); file_status.setText(Dictionary.get("FileActions.Moving", args)); } args = null; // If source is a file if(source_file.isFile()) { // copy the file. If anything goes wrong the copy file should throw the appropriate exception. No matter what exception is thrown (bar an IOException) we display some message, perhaps take some action, then cancel the remainder of the pending file jobs. No point in being told your out of hard drive space for each one of six thousand files eh? try { copyFile(source_file, target_file, progress); } // If we can't find the source file, then the most likely reason is that the file system has changed since the last time it was mapped. Warn the user that the requested file can't be found, then force a refresh of the source folder involved. catch(FileNotFoundException fnf_exception) { Gatherer.printStackTrace(fnf_exception); cancel_action = true; // Show warning. JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Found_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Found_Title"), JOptionPane.ERROR_MESSAGE); // Force refresh of source folder. source_model.refresh(new TreePath(((FileNode)origin_node.getParent()).getPath())); } catch(FileAlreadyExistsException fae_exception) { Gatherer.printStackTrace(fae_exception); cancel_action = true; // Show warning. JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Already_Exists_Message", target_file.getName()), Dictionary.get("FileActions.File_Already_Exists_Title"), JOptionPane.ERROR_MESSAGE); // Nothing else can be done by the Gatherer. } catch(InsufficientSpaceException is_exception) { Gatherer.printStackTrace(is_exception); cancel_action = true; // Show warning. The message body of the expection explains how much more space is required for this file copy. JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Insufficient_Space_Message", is_exception.getMessage()), Dictionary.get("FileActions.Insufficient_Space_Title"), JOptionPane.ERROR_MESSAGE); // Nothing else can be done by the Gatherer. In fact if we are really out of space I'm not even sure we can quit safely. } catch(UnknownFileErrorException ufe_exception) { Gatherer.printStackTrace(ufe_exception); cancel_action = true; // Show warning JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Unknown_File_Error_Message"), Dictionary.get("FileActions.Unknown_File_Error_Title"), JOptionPane.ERROR_MESSAGE); // Nothing else we can do. } catch(WriteNotPermittedException wnp_exception) { Gatherer.printStackTrace(wnp_exception); cancel_action = true; // Show warning JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Write_Not_Permitted_Message", target_file.getAbsolutePath()), Dictionary.get("FileActions.Write_Not_Permitted_Title"), JOptionPane.ERROR_MESSAGE); // Nothing else we can do. } catch(IOException exception) { // Can't really do much about this. Gatherer.printStackTrace(exception); } // If not cancelled if(!cancel_action) { // Step one is to create a dummy FileNode. Its important it has the correct structure so getPath works. FileNode new_record = new FileNode(target_file); SynchronizedTreeModelTools.insertNodeInto(target_model, destination_node, new_record); new_node = new_record; // create undo job if(job.undoable) { job.undoable = false; if(job.type == FileJob.COPY) { // A copy is undone with a delete, so it doesn't really matter where the file originally came from (we're not moving it back there, but into the recycle bin). You may also notice we don't make use of the target parent record. This is because no undo action needs this information, and even if it did it could simply ask for records parent! Gatherer.c_man.undo.addUndo(job.ID(), UndoManager.FILE_COPY, null, null, job.target, new_record, job.undo); } else { // Movements however do need a source and source parent so the file can be moved back to the correct place. Gatherer.c_man.undo.addUndo(job.ID(), UndoManager.FILE_MOVE, job.source, (FileNode)origin_node.getParent(), job.target, new_record, job.undo); } } new_record = null; } } // Else else if(source_file.isDirectory()) { // create new record FileNode directory_record = new FileNode(target_file); ///ystem.err.println("Directory record = " + directory_record + " (" + target_file.getAbsolutePath() + ")"); SynchronizedTreeModelTools.insertNodeInto(target_model, destination_node, directory_record); // Why is this not happening eh? directory_record.setParent(destination_node); if(!target_file.exists()) { // make the directory target_file.mkdirs(); new_node = directory_record; // create undo job if(job.undoable) { job.undoable = false; if(job.type == FileJob.COPY) { // A copy is undone with a delete, so it doesn't really matter where the file originally came from (we're not moving it back there, but into the recycle bin). You may also notice we don't make use of the target parent record. This is because no undo action needs this information, and even if it did it could simply ask for records parent! Gatherer.c_man.undo.addUndo(job.ID(), UndoManager.FILE_COPY, null, null, job.target, directory_record, job.undo); } else { // Movements however do need a source and source parent so the file can be moved back to the correct place. Gatherer.c_man.undo.addUndo(job.ID(), UndoManager.FILE_MOVE, job.source, (FileNode)origin_node.getParent(), job.target, directory_record, job.undo); } } } // Else inform the users that a directory already exists and files will be copied into it //else { // JOptionPane.showMessageDialog(null, Dictionary.get("FileActions.Directory_Exists", target_file.toString()), Dictionary.get("General.Warning"), JOptionPane.WARNING_MESSAGE); //} // Queue non-filtered child files for copying. If this directory already existed, the child records will have to generate the undo jobs, as we don't want to entirely delete this directory if it already existed. FileNode child_record = null; // In order to have a sane copy proceedure (rather than always copying last file first as it used to) we always add the child node at the position the parent was removed from. Consider the file job 'a' at the end of the queue which generates three new jobs 'b', 'c' and 'd'. The resulting flow should look like this. // -- Starting queue ...[a] // remove(position) = 'a' ... // add(position, 'b') ...[b] // add(position, 'c') ...[c][b] // add(position, 'd') ...[d][c][b] // Next loop // remove(position) = 'b' ...[d][c] for(int i = 0; i < origin_node.getChildCount(); i++) { child_record = (FileNode) origin_node.getChildAt(i); addJob(job.ID(), job.source, child_record, job.target, directory_record, job.type, job.undo, false, false, position); } child_record = null; directory_record = null; } // The file wasn't found! else { cancel_action = true; // Show warning. JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Found_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Found_Title"), JOptionPane.ERROR_MESSAGE); // Force refresh of source folder. source_model.refresh(new TreePath(((FileNode)origin_node.getParent()).getPath())); } // We can't have been cancelled, and we must have created a new FileNode during the above phase, before we can handle metadata. if(!cancel_action && new_node != null) { /* Time to handle any existing metadata. */ // If the directory came from inside our collection... if (job.source.toString().equals("Collection")) { ///ystem.err.println("Move within collection..."); // we just retrieve the metadata attached to the origin node... ArrayList existing_metadata = Gatherer.c_man.getCollection().gdm.getMetadataOnly(source_file); ///atherer.println("Existing metadata for " + origin_node + ": " + gdm.toString(existing_metadata)); // then assign this remainder to the new folder. ///ystem.err.println("New metadata: " + gdm.toString(existing_metadata)); Gatherer.c_man.getCollection().gdm.addMetadata(new_node, existing_metadata); existing_metadata = null; } // If it came from the recycle bin retrieve the metadata from there, once again remembering to account for inherited metadata else if (job.source.toString().equals("Undo")) { // Retrieve metadata from the recycle bin ArrayList existing_metadata = Gatherer.c_man.undo.getMetadata(source_file); // then assign this remainder to the new folder. Gatherer.c_man.getCollection().gdm.addMetadata(new_node, existing_metadata); existing_metadata = null; } // Otherwise if it came from the workspace use the MSMs parsers to search for folder level metadata (such as metadata.xml or marc records). else if (job.source.toString().equals("Workspace")) { cancel_action = !Gatherer.c_man.getCollection().msm.searchForMetadata(new_node, origin_node, job.folder_level); } } new_node = null; } } } // If we haven't been cancelled, and we've been asked to delete a directory/file, or perhaps as part of a move, we delete the file. This involves removing any existing metadata and then copying the file to the recycled bin (for a delete only), then deleting the file. When deleting a directory record from the tree (or from the filesystem for that matter) we must ensure that all of the descendant records have already been removed. If we fail to do this the delete will fail, or you will be bombarded with hundreds of 'Parent node of null not allowed' error messages. Also be aware that if the user has cancelled just this action, because of say a name clash, then we shouldn't do any deleting of any sort dammit. if(!cancel_action && ready && (job.type == FileJob.DELETE || job.type == FileJob.MOVE)) { ///atherer.println("Delete/Move: " + origin_node); ///atherer.println(queue.size() + " jobs remain in queue"); if (source_file.isFile()) { progress.addValue(source_file.length()); } // If the source is an empty directory or a file. Don't do anything to the root node of a tree. File[] child_list = source_file.listFiles(); if(source_file.isFile() || (child_list != null && (child_list.length == 0 || (child_list.length == 1 && child_list[0].getName().equals(Utility.METADATA_XML))) && origin_node.getParent() != null)) { ///atherer.println("File or empty directory."); // Delete any metadata.xml still in the directory. if(child_list != null && child_list.length == 1) { child_list[0].delete(); } ///atherer.println("Origin is file or is directory and is empty."); // update status area String args[] = new String[1]; // args[0] = "" + (queue.size() + 1) + ""; args[0] = Utility.formatPath("FileActions.Deleting", source_file.getAbsolutePath(), file_status.getSize().width); file_status.setText(Dictionary.get("FileActions.Deleting", args)); args = null; // Remove its metadata ArrayList metadatum = null; if(job.source == Gatherer.c_man.undo) { Gatherer.c_man.undo.addMetadata(target_file, metadatum); } else { metadatum = Gatherer.c_man.getCollection().gdm.removeMetadata(origin_node.getFile()); } // determine its parent node FileNode parent_record = (FileNode)origin_node.getParent(); // Remove from model if(parent_record != null) { SynchronizedTreeModelTools.removeNodeFromParent(source_model, origin_node); } // If we are deleting File recycled_file = null; FileNode recycled_parent = null; // delete the source file Utility.delete(source_file); } // Else the source is a directory and it has children remaining else if(child_list != null && child_list.length > 0) { ///ystem.err.print("Nonempty directory -> "); ///atherer.println("Directory is non-empty. Remove children first."); FileNode recycle_folder_record = null; // Don't worry about all this for true file move actions. if(job.type == FileJob.DELETE) { // queue all of its children, (both filtered and non-filtered), but for deleting only. Don't queue jobs for a current move event, as they would be queued as part of copying. I have no idea way, per sec, however the children within the origin node are always invalid during deletion (there are several copies of some nodes?!?). I'll check that each child is only added once. ///ystem.err.println("Directory has " + origin_node.getChildCount() + " children."); ///ystem.err.println("Directory actually has " + child_list.length + " children."); origin_node.refresh(); ///atherer.println("Directory has " + origin_node.getChildCount() + " children."); ///atherer.println("Directory actually has " + child_list.length + " children."); for(int i = 0; i < origin_node.size(); i++) { FileNode child_record = (FileNode) origin_node.get(i); ///atherer.println("Queuing: " + child_record); addJob(job.ID(), job.source, child_record, job.target, destination_node, FileJob.DELETE, job.undo, false, false, position); //if(recycle_folder_record != null) { // recycle_folder_mappings.put(child_record, recycle_folder_record); //} } } // Requeue a delete job -after- the children have been dealt with. Remember I've reversed the direction of the queue so sooner is later. Te-he. Also have to remember that we have have followed this path to get here for a move job: Copy Directory -> Queue Child Files -> Delete Directory (must occur after child files) -> Queue Directory. // One special case. Do not requeue root nodes. Don't requeue jobs marked as done. if(origin_node.getParent() != null && !job.done) { ///atherer.println("Requeuing: " + origin_node.getFile().getAbsolutePath()); job.type = FileJob.DELETE; // You only requeue jobs that are deletes, as directories must be inspected before children, but deleted after. addJob(job, position); } else { Gatherer.println("I've already done this job twice. I refuse to requeue it again!!"); } } } job = null; source_file = null; target_file = null; origin_node = null; // We can only break out of the while loop if we are out of files, or if the action was cancelled. if(cancel_action) { // Empty queue clearJobs(); cancel_action = false; } // Debugging pause. ///ystem.err.println("Job complete."); } else { // Disable stop button if(stop_button != null) { stop_button.setEnabled(false); } synchronized(this) { // Force both workspace and collection trees to refresh if (Gatherer.g_man != null) { Gatherer.g_man.refreshWorkspaceTree(DragTree.COLLECTION_CONTENTS_CHANGED); Gatherer.g_man.refreshCollectionTree(DragTree.COLLECTION_CONTENTS_CHANGED); } // Reset status area file_status.setText(Dictionary.get("FileActions.No_Activity")); progress.reset(); progress.setString(Dictionary.get("FileActions.No_Activity")); yes_to_all = false; completed_folder_mappings.clear(); recycle_folder_mappings.clear(); // Reset whether we complain about no sets. if(Gatherer.f_man != null) { Gatherer.f_man.complain_if_no_sets = true; } // Now wait if applicable. if(return_immediately) { return; } ///ystem.err.println("Waiting"); wait(); } } } catch (Exception error) { Gatherer.printStackTrace(error); } } } /** Register the button that will be responsible for stopping executing file actions. * @param stop_button a JButton */ public void registerStopButton(JButton stop_button) { this.stop_button = stop_button; } /** Called when the user makes some selection in one of the trees we are listening to. From this we update the status details. */ public void valueChanged(TreeSelectionEvent event) { JTree tree = (JTree) event.getSource(); if(tree.getSelectionCount() > 0) { TreePath selection[] = tree.getSelectionPaths(); int file_count = 0; int dir_count = 0; for(int i = 0; i < selection.length; i++) { TreeNode record = (TreeNode) selection[i].getLastPathComponent(); if(record.isLeaf()) { file_count++; } else { dir_count++; } record = null; } selection = null; } tree = null; } synchronized private void clearJobs() { queue.clear(); } /** Copy the contents from the source directory to the destination * directory. * @param source The source directory * @param destination The destination directory * @param progress A progress bar to monitor copying progress * @see org.greenstone.gatherer.Gatherer */ public void copyDirectoryContents(File source, File destination, LongProgressBar progress) throws FileAlreadyExistsException, FileNotFoundException, InsufficientSpaceException, IOException, UnknownFileErrorException, WriteNotPermittedException { if (!source.isDirectory()) return; // check that dest dirs exist destination.mkdirs(); File [] src_files = source.listFiles(); if (src_files.length == 0) return; // nothing to copy for (int i=0; i destination.length()) { // Determine the difference (which I guess is in bytes). long difference = (destination_size + (long) data_size) - destination.length(); // Transform that into a human readable string. String message = Utility.formatFileLength(difference); throw(new InsufficientSpaceException(message)); } else { throw(io_exception); } } if(progress != null) { progress.addValue(data_size); } } // Flush and close the streams to ensure all bytes are written. f_in.close(); f_out.close(); // We have now, in theory, produced an exact copy of the source file. Check this by comparing sizes. if(!destination.exists() || (!cancel_action && source.length() != destination.length())) { throw(new UnknownFileErrorException()); } // If we were cancelled, ensure that none of the destination file exists. if(cancel_action) { destination.delete(); } } } private FileJob removeJob(int position) { FileJob job = null; if(queue.size() > 0) { job = (FileJob) queue.remove(position); } return job; } }