source: trunk/gli/src/org/greenstone/gatherer/file/FileQueue.java@ 7482

Last change on this file since 7482 was 7482, checked in by mdewsnip, 20 years ago

Some tidy ups in preparation for finalising the workspace and collection tree refreshing.

  • Property svn:keywords set to Author Date Id Revision
File size: 35.0 KB
Line 
1/**
2 *#########################################################################
3 *
4 * A component of the Gatherer application, part of the Greenstone digital
5 * library suite from the New Zealand Digital Library Project at the
6 * University of Waikato, New Zealand.
7 *
8 * Author: John Thompson, Greenstone Digital Library, University of Waikato
9 *
10 * Copyright (C) 1999 New Zealand Digital Library Project
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 *########################################################################
26 */
27package org.greenstone.gatherer.file;
28
29import java.io.*;
30import java.util.*;
31import javax.swing.*;
32import javax.swing.event.*;
33import javax.swing.tree.*;
34import org.greenstone.gatherer.Configuration;
35import org.greenstone.gatherer.Dictionary;
36import org.greenstone.gatherer.Gatherer;
37import org.greenstone.gatherer.file.FileJob;
38import org.greenstone.gatherer.file.FileNode;
39import org.greenstone.gatherer.gui.LongProgressBar;
40import org.greenstone.gatherer.gui.tree.DragTree;
41import org.greenstone.gatherer.msm.MetadataXMLFileManager;
42import org.greenstone.gatherer.undo.UndoManager;
43import org.greenstone.gatherer.util.ArrayTools;
44import org.greenstone.gatherer.util.DragComponent;
45import org.greenstone.gatherer.util.SynchronizedTreeModelTools;
46import org.greenstone.gatherer.util.Utility;
47
48/** A threaded object which processes a queue of file actions such as copying and movement. It also handles updating the various trees involved so they are an accurate representation of the file system they are meant to match.
49 * @author John Thompson, Greenstone Digital Library, University of Waikato
50 * @version 2.3
51 */
52public class FileQueue
53 extends Thread
54 implements TreeSelectionListener {
55 /** When someone requests the movement queue to be dumped this cancel flag is set to true. */
56 private boolean cancel_action = false;
57 /** A temporary mapping from currently existing FileNode folder to their equivelent FileNode folder within the undo managers tree. */
58 private HashMap completed_folder_mappings = new HashMap();
59
60 /** The button which controls the stopping of the file queue. */
61 private JButton stop_button = null;
62
63 /** true to cause this file queue to return from run() as soon as there are no jobs left on the queue. Useful for undo jobs which must occur before a specific action. */
64 private boolean return_immediately = false;
65 /** We are only allowed to wait under specific circumstances. */
66 /* private boolean wait_allowed = true; */
67 /** true if the user has selected yes to all from a file 'clash' dialog. */
68 private boolean yes_to_all = false;
69 /** A temporary mapping from currently existing FileNodes to the potential FileNode folder within the undo managers tree. */
70 private HashMap recycle_folder_mappings = new HashMap();
71 /** A label explaining the current moving files status. */
72 private JLabel file_status = null;
73 /** A list containing a queue of waiting movement jobs. */
74 private ArrayList queue;
75 /** A progress bar which shows how many bytes, out of the total size of bytes, has been moved. */
76 private LongProgressBar progress = null;
77 /** The last piece of text shown on the file status label, just incase we are displaying a very temporary message. */
78 private String previous = null;
79 /** Constructor.
80 * @param return_immediately true to cause this file queue to return from run() as soon as there are no jobs left on the queue.
81 * @see org.greenstone.gatherer.Configuration
82 * @see org.greenstone.gatherer.gui.Coloring
83 * @see org.greenstone.gatherer.gui.LongProgressBar
84 */
85 public FileQueue(boolean return_immediately) {
86 this.return_immediately = return_immediately;
87 this.queue = new ArrayList();
88 String args[] = new String[2];
89 args[0] = "0";
90 args[1] = "0";
91 file_status = new JLabel();
92 Dictionary.setText(file_status, "FileActions.Selected", args);
93 progress = new LongProgressBar();
94 progress.setBackground(Gatherer.config.getColor("coloring.collection_tree_background", false));
95 progress.setForeground(Gatherer.config.getColor("coloring.collection_tree_foreground", false));
96 progress.setString(Dictionary.get("FileActions.No_Activity"));
97 progress.setStringPainted(true);
98 args = null;
99 }
100
101 /** Requeue an existing job into the queue.
102 * @param job A previously created FileJob.
103 */
104 synchronized public void addJob(FileJob job, int position) {
105 job.done = true; // Ensure that the requeued job is marked as done.
106 queue.add(position, job);
107 notify();
108 }
109
110 /** Add a new job to the queue, specifiying as many arguments as is necessary to complete this type of job (ie delete needs no target information).
111 * @param id A long id unique to all jobs created by a single action.
112 * @param source The DragComponent source of this file, most likely a DragTree.
113 * @param child The FileNode you wish to mode.
114 * @param target The DragComponent to move the file to, again most likely a DragTree.
115 * @param parent The files new FileNode parent within the target.
116 * @param type The type of this movement as an int, either COPY or DELETE.
117 * @param undo true if this job should generate undo jobs, false for redo ones.
118 * @param undoable true if this job can generate undo or redo jobs at all, false otherwise.
119 */
120 public void addJob(long id, DragComponent source, FileNode child, DragComponent target, FileNode parent, byte type, boolean undo, boolean undoable, boolean folder_level) {
121 addJob(id, source, child, target, parent, type, undo, undoable, folder_level, -1);
122 }
123
124 synchronized public void addJob(long id, DragComponent source, FileNode child, DragComponent target, FileNode parent, byte type, boolean undo, boolean undoable, boolean folder_level, int position) {
125 FileJob job = new FileJob(id, source, child, target, parent, type, undo, undoable);
126 job.folder_level = folder_level;
127 Gatherer.println("Adding job: " + job);
128 if(position != -1 && position <= queue.size() + 1) {
129 queue.add(position, job);
130 }
131 else {
132 queue.add(job);
133 }
134 notify();
135 }
136
137 /** Calculates the total deep file size of the selected file nodes.
138 * @param files a FileNode[] of selected files
139 * @return true if a cancel was signalled, false otherwise
140 * @see org.greenstone.gatherer.file.FileManager.Task#run()
141 */
142 public boolean calculateSize(FileNode[] files)
143 {
144 file_status.setText(Dictionary.get("FileActions.Calculating_Size"));
145 progress.setString(Dictionary.get("FileActions.Calculating_Size"));
146
147 // Calculate the total file size of all the selected file nodes
148 Vector remaining = new Vector();
149 for (int i = 0; !cancel_action && i < files.length; i++) {
150 remaining.add(files[i]);
151 }
152 while (!cancel_action && remaining.size() > 0) {
153 FileNode node = (FileNode) remaining.remove(0);
154 if (node.isLeaf()) {
155 progress.addMaximum(node.getFile().length());
156 }
157 else {
158 for (int i = 0; !cancel_action && i < node.getChildCount(); i++) {
159 remaining.add(node.getChildAt(i));
160 }
161 }
162 }
163
164 // Now we return if calculation was cancelled so that the FileManagers Task can skip the addJob phase correctly.
165 if (cancel_action) {
166 cancel_action = false; // reset
167 return true;
168 }
169 else {
170 return false;
171 }
172 }
173
174 /** This method is called to cancel the job queue at the next available moment. */
175 public void cancelAction() {
176 cancel_action = true;
177 }
178 /** Access to the file state label. */
179 public JLabel getFileStatus() {
180 return file_status;
181 }
182
183 /** Access to the progress bar. */
184 public LongProgressBar getProgressBar() {
185 return progress;
186 }
187 /** Prevent the progress bar updating momentarily, while its size is re-adjusted. */
188 public void pause() {
189 progress.setIndeterminate(true);
190 }
191
192
193 /** The run method exists in every thread, and here it is used to work its way through the queue of Jobs. If no jobs are waiting and it cans, it waits until a job arrives. If a job is present then it is either COPIED or DELETED, with the records being copied or removed as necessary, and directories being recursed through. Finally the user can press cancel to cause the loop to prematurely dump the job queue then wait.
194 * @see org.greenstone.gatherer.Gatherer
195 * @see org.greenstone.gatherer.collection.CollectionManager
196 * @see org.greenstone.gatherer.file.FileJob
197 * @see org.greenstone.gatherer.file.FileNode
198 * @see org.greenstone.gatherer.gui.LongProgressBar
199 * @see org.greenstone.gatherer.msm.MetadataSetManager
200 * @see org.greenstone.gatherer.undo.UndoManager
201 * @see org.greenstone.gatherer.util.Utility
202 */
203 public void run()
204 {
205 super.setName("FileQueue");
206
207 while (!Gatherer.self.exit) {
208 try {
209 // Retrieve the next job
210 int position = queue.size() - 1;
211 FileJob job = removeJob(position);
212 if (job != null) {
213 ///ystem.err.println("Found job: " + job);
214 // Enabled stop button
215 stop_button.setEnabled(true);
216 // The user can cancel this individual action at several places, so keep track if the state is 'ready' for the next step.
217 boolean ready = true;
218 FileNode origin_node = job.getOrigin();
219 FileNode destination_node = job.getDestination();
220 FileSystemModel source_model = (FileSystemModel)job.source.getTreeModel();
221 FileSystemModel target_model = (FileSystemModel)job.target.getTreeModel();
222 if(destination_node == null) {
223 // Retrieve the root node of the target model instead. A delete, or course, has no target file so all deleted files are added to the root of the Recycle Bin model.
224 destination_node = (FileNode) target_model.getRoot();
225 }
226 // Extract common job details.
227 File source_file = origin_node.getFile();
228 File target_file = null;
229 // Determine the target file for a copy or move.
230 if(job.type == FileJob.COPY || job.type == FileJob.MOVE) {
231 //target_file = new File(destination_node.getFile(), source_file.getName());
232 // use the name of the filenode instead of the name of the file - these should be the same except for the collection directories where we want the collection name to be used, not 'import' which is the underlying name
233 target_file = new File(destination_node.getFile(), origin_node.toString());
234 }
235 // To copy a file, copy it then add any metadata found at the source. If this file was already in our collection then we must ensure the lastest version of its metadata.xml has been saved to disk. To copy a directory simply create the directory at the destination, then add all of its children files as new jobs.
236 if((job.type == FileJob.COPY || job.type == FileJob.MOVE) && !job.done) {
237 ///ystem.err.println("Copy/Move: " + origin_node);
238
239 // The number one thing to check is whether we are in a cyclic loop. The easiest way is just to check how deep we are
240 int max_folder_depth = Gatherer.config.getInt("general.max_folder_depth", Configuration.COLLECTION_SPECIFIC);
241 boolean continue_over_depth = false;
242 if(FileManager.countFolderDepth(source_file) > max_folder_depth) {
243 Object[] options = { Dictionary.get("General.Yes"), Dictionary.get("General.No"), Dictionary.get("FileActions.Increase_Depth") };
244 String args[] = { String.valueOf(max_folder_depth), source_file.getAbsolutePath() };
245 int result = JOptionPane.showOptionDialog(Gatherer.g_man, Utility.formatHTMLWidth(Dictionary.get("FileActions.Possible_Cyclic_Path", args), 80), Dictionary.get("General.Warning"), JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, options[1]);
246 args = null;
247 options = null;
248 switch(result) {
249 case 0: // Yes
250 continue_over_depth = true;
251 break;
252 case 2: // Continue and increase depth
253 continue_over_depth = true;
254 Gatherer.config.setInt("general.max_folder_depth", Configuration.COLLECTION_SPECIFIC, (max_folder_depth + 1));
255 break;
256 }
257 }
258 else {
259 continue_over_depth = true;
260 }
261
262 if(continue_over_depth) {
263 FileNode new_node = null;
264 // Check if file exists, and action as necessary. Be aware the user can choose to cancel the action all together (where upon ready becomes false).
265 if(target_file.exists()) {
266 // We've previously been told
267 if(yes_to_all) {
268 // Remove the old file and tree entry.
269 target_file.delete();
270 ready = true;
271 }
272 else {
273 ///atherer.println("Opps! This filename already exists. Give the user some options.");
274 Object[] options = { Dictionary.get("General.Yes"), Dictionary.get("FileActions.Yes_To_All"), Dictionary.get("General.No"), Dictionary.get("General.Cancel") };
275 int result = JOptionPane.showOptionDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Exists", target_file.getName()), Dictionary.get("General.Warning"), JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, options[0]);
276 switch(result) {
277 case 1: // Yes To All
278 yes_to_all = true;
279 case 0: // Yes
280 // Remove the old file and tree entry.
281 if(destination_node != null) {
282 TreePath destination_path = new TreePath(destination_node.getPath());
283 FileNode temp_target_node = new FileNode(target_file, target_model, true);
284 TreePath target_path = destination_path.pathByAddingChild(temp_target_node);
285 SynchronizedTreeModelTools.removeNodeFromParent(target_model, target_model.getNode(target_path));
286 target_path = null;
287 temp_target_node = null;
288 destination_path = null;
289 }
290 target_file.delete();
291 ready = true;
292 break;
293 case 3: // No To All
294 cancel_action = true;
295 case 2: // No
296 default:
297 ready = false;
298 // Increment progress by size of potentially copied file
299 progress.addValue(origin_node.getFile().length());
300 }
301 }
302 }
303 // We proceed with the copy/move if the ready flag is still set. If it is that means there is no longer any existing file of the same name.
304 if(ready) {
305 // update status area
306 String args[] = new String[1];
307 args[0] = "" + (queue.size() + 1) + "";
308 if(job.type == FileJob.COPY) {
309 args[0] = Utility.formatPath("FileActions.Copying", source_file.getAbsolutePath(), file_status.getSize().width);
310 file_status.setText(Dictionary.get("FileActions.Copying", args));
311 }
312 else {
313 args[0] = Utility.formatPath("FileActions.Moving", source_file.getAbsolutePath(), file_status.getSize().width);
314 file_status.setText(Dictionary.get("FileActions.Moving", args));
315 }
316 args = null;
317
318 // If source is a file
319 if(source_file.isFile()) {
320 // copy the file. If anything goes wrong the copy file should throw the appropriate exception. No matter what exception is thrown (bar an IOException) we display some message, perhaps take some action, then cancel the remainder of the pending file jobs. No point in being told your out of hard drive space for each one of six thousand files eh?
321 try {
322 copyFile(source_file, target_file, progress);
323 }
324 // If we can't find the source file, then the most likely reason is that the file system has changed since the last time it was mapped. Warn the user that the requested file can't be found, then force a refresh of the source folder involved.
325 catch(FileNotFoundException fnf_exception) {
326 Gatherer.printStackTrace(fnf_exception);
327 cancel_action = true;
328 // Show warning.
329 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Found_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Found_Title"), JOptionPane.ERROR_MESSAGE);
330 // Force refresh of source folder.
331 source_model.refresh(new TreePath(((FileNode)origin_node.getParent()).getPath()));
332 }
333 catch(FileAlreadyExistsException fae_exception) {
334 Gatherer.printStackTrace(fae_exception);
335 cancel_action = true;
336 // Show warning.
337 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Already_Exists_Message", target_file.getName()), Dictionary.get("FileActions.File_Already_Exists_Title"), JOptionPane.ERROR_MESSAGE);
338 // Nothing else can be done by the Gatherer.
339 }
340 catch(InsufficientSpaceException is_exception) {
341 Gatherer.printStackTrace(is_exception);
342 cancel_action = true;
343 // Show warning. The message body of the expection explains how much more space is required for this file copy.
344 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Insufficient_Space_Message", is_exception.getMessage()), Dictionary.get("FileActions.Insufficient_Space_Title"), JOptionPane.ERROR_MESSAGE);
345 // Nothing else can be done by the Gatherer. In fact if we are really out of space I'm not even sure we can quit safely.
346 }
347 catch(UnknownFileErrorException ufe_exception) {
348 Gatherer.printStackTrace(ufe_exception);
349 cancel_action = true;
350 // Show warning
351 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Unknown_File_Error_Message"), Dictionary.get("FileActions.Unknown_File_Error_Title"), JOptionPane.ERROR_MESSAGE);
352 // Nothing else we can do.
353 }
354 catch(WriteNotPermittedException wnp_exception) {
355 Gatherer.printStackTrace(wnp_exception);
356 cancel_action = true;
357 // Show warning
358 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Write_Not_Permitted_Message", target_file.getAbsolutePath()), Dictionary.get("FileActions.Write_Not_Permitted_Title"), JOptionPane.ERROR_MESSAGE);
359 // Nothing else we can do.
360 }
361 catch(IOException exception) {
362 // Can't really do much about this.
363 Gatherer.printStackTrace(exception);
364 }
365 // If not cancelled
366 if(!cancel_action) {
367 // Step one is to create a dummy FileNode. Its important it has the correct structure so getPath works.
368 FileNode new_record = new FileNode(target_file);
369 SynchronizedTreeModelTools.insertNodeInto(target_model, destination_node, new_record);
370 new_node = new_record;
371
372 // create undo job
373 if(job.undoable) {
374 job.undoable = false;
375 if(job.type == FileJob.COPY) {
376 // A copy is undone with a delete, so it doesn't really matter where the file originally came from (we're not moving it back there, but into the recycle bin). You may also notice we don't make use of the target parent record. This is because no undo action needs this information, and even if it did it could simply ask for records parent!
377 Gatherer.c_man.undo.addUndo(job.ID(), UndoManager.FILE_COPY, null, null, job.target, new_record, job.undo);
378 }
379 else {
380 // Movements however do need a source and source parent so the file can be moved back to the correct place.
381 Gatherer.c_man.undo.addUndo(job.ID(), UndoManager.FILE_MOVE, job.source, (FileNode)origin_node.getParent(), job.target, new_record, job.undo);
382 }
383 }
384 new_record = null;
385 }
386 }
387 // Else
388 else if(source_file.isDirectory()) {
389 // create new record
390 FileNode directory_record = new FileNode(target_file);
391 ///ystem.err.println("Directory record = " + directory_record + " (" + target_file.getAbsolutePath() + ")");
392 SynchronizedTreeModelTools.insertNodeInto(target_model, destination_node, directory_record);
393 // Why is this not happening eh?
394 directory_record.setParent(destination_node);
395 if(!target_file.exists()) {
396 // make the directory
397 target_file.mkdirs();
398 new_node = directory_record;
399 // create undo job
400 if(job.undoable) {
401 job.undoable = false;
402 if(job.type == FileJob.COPY) {
403 // A copy is undone with a delete, so it doesn't really matter where the file originally came from (we're not moving it back there, but into the recycle bin). You may also notice we don't make use of the target parent record. This is because no undo action needs this information, and even if it did it could simply ask for records parent!
404 Gatherer.c_man.undo.addUndo(job.ID(), UndoManager.FILE_COPY, null, null, job.target, directory_record, job.undo);
405 }
406 else {
407 // Movements however do need a source and source parent so the file can be moved back to the correct place.
408 Gatherer.c_man.undo.addUndo(job.ID(), UndoManager.FILE_MOVE, job.source, (FileNode)origin_node.getParent(), job.target, directory_record, job.undo);
409 }
410 }
411 }
412 // Else inform the users that a directory already exists and files will be copied into it
413 //else {
414 // JOptionPane.showMessageDialog(null, Dictionary.get("FileActions.Directory_Exists", target_file.toString()), Dictionary.get("General.Warning"), JOptionPane.WARNING_MESSAGE);
415 //}
416 // Queue non-filtered child files for copying. If this directory already existed, the child records will have to generate the undo jobs, as we don't want to entirely delete this directory if it already existed.
417 FileNode child_record = null;
418 // In order to have a sane copy proceedure (rather than always copying last file first as it used to) we always add the child node at the position the parent was removed from. Consider the file job 'a' at the end of the queue which generates three new jobs 'b', 'c' and 'd'. The resulting flow should look like this.
419 // -- Starting queue ...[a]
420 // remove(position) = 'a' ...
421 // add(position, 'b') ...[b]
422 // add(position, 'c') ...[c][b]
423 // add(position, 'd') ...[d][c][b]
424 // Next loop
425 // remove(position) = 'b' ...[d][c]
426 for(int i = 0; i < origin_node.getChildCount(); i++) {
427 child_record = (FileNode) origin_node.getChildAt(i);
428 addJob(job.ID(), job.source, child_record, job.target, directory_record, job.type, job.undo, false, false, position);
429 }
430 child_record = null;
431 directory_record = null;
432 }
433 // The file wasn't found!
434 else {
435 cancel_action = true;
436 // Show warning.
437 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Found_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Found_Title"), JOptionPane.ERROR_MESSAGE);
438 // Force refresh of source folder.
439 source_model.refresh(new TreePath(((FileNode)origin_node.getParent()).getPath()));
440 }
441
442 // We can't have been cancelled, and we must have created a new FileNode during the above phase, before we can handle metadata.
443 if(!cancel_action && new_node != null) {
444 /* Time to handle any existing metadata. */
445 // If the directory came from inside our collection...
446 if (job.source.toString().equals("Collection")) {
447 ///ystem.err.println("Move within collection...");
448 MetadataXMLFileManager gdm = Gatherer.c_man.getCollection().gdm;
449 // we just retrieve the metadata attached to the origin node...
450 ArrayList existing_metadata = gdm.getMetadataOnly(source_file);
451 ///atherer.println("Existing metadata for " + origin_node + ": " + gdm.toString(existing_metadata));
452 // then assign this remainder to the new folder.
453 ///ystem.err.println("New metadata: " + gdm.toString(existing_metadata));
454 gdm.addMetadata(new_node, existing_metadata);
455 existing_metadata = null;
456 gdm = null;
457 }
458 // If it came from the recycle bin retrieve the metadata from there, once again remembering to account for inherited metadata
459 else if (job.source.toString().equals("Undo")) {
460 MetadataXMLFileManager gdm = Gatherer.c_man.getCollection().gdm;
461 // Retrieve metadata from the recycle bin
462 ArrayList existing_metadata = Gatherer.c_man.undo.getMetadata(source_file);
463 // then assign this remainder to the new folder.
464 gdm.addMetadata(new_node, existing_metadata);
465 existing_metadata = null;
466 gdm = null;
467 }
468 // Otherwise if it came from the workspace use the MSMs parsers to search for folder level metadata (such as metadata.xml or marc records).
469 else if (job.source.toString().equals("Workspace")) {
470 cancel_action = !Gatherer.c_man.getCollection().msm.searchForMetadata(new_node, origin_node, job.folder_level);
471 }
472 }
473 new_node = null;
474 }
475 }
476 }
477 // If we haven't been cancelled, and we've been asked to delete a directory/file, or perhaps as part of a move, we delete the file. This involves removing any existing metadata and then copying the file to the recycled bin (for a delete only), then deleting the file. When deleting a directory record from the tree (or from the filesystem for that matter) we must ensure that all of the descendant records have already been removed. If we fail to do this the delete will fail, or you will be bombarded with hundreds of 'Parent node of null not allowed' error messages. Also be aware that if the user has cancelled just this action, because of say a name clash, then we shouldn't do any deleting of any sort dammit.
478 if(!cancel_action && ready && (job.type == FileJob.DELETE || job.type == FileJob.MOVE)) {
479 ///atherer.println("Delete/Move: " + origin_node);
480 ///atherer.println(queue.size() + " jobs remain in queue");
481
482 if (source_file.isFile()) {
483 progress.addValue(source_file.length());
484 }
485
486 // If the source is an empty directory or a file. Don't do anything to the root node of a tree.
487 File[] child_list = source_file.listFiles();
488 if(source_file.isFile() || (child_list != null && (child_list.length == 0 || (child_list.length == 1 && child_list[0].getName().equals(Utility.METADATA_XML))) && origin_node.getParent() != null)) {
489 ///atherer.println("File or empty directory.");
490 // Delete any metadata.xml still in the directory.
491 if(child_list != null && child_list.length == 1) {
492 child_list[0].delete();
493 }
494
495 ///atherer.println("Origin is file or is directory and is empty.");
496 // update status area
497 String args[] = new String[1];
498 // args[0] = "" + (queue.size() + 1) + "";
499 args[0] = Utility.formatPath("FileActions.Deleting", source_file.getAbsolutePath(), file_status.getSize().width);
500 file_status.setText(Dictionary.get("FileActions.Deleting", args));
501 args = null;
502
503 // Remove its metadata
504 ArrayList metadatum = null;
505 if(job.source == Gatherer.c_man.undo) {
506 Gatherer.c_man.undo.addMetadata(target_file, metadatum);
507 }
508 else {
509 metadatum = Gatherer.c_man.getCollection().gdm.removeMetadata(origin_node.getFile());
510 }
511 // determine its parent node
512 FileNode parent_record = (FileNode)origin_node.getParent();
513 // Remove from model
514 if(parent_record != null) {
515 SynchronizedTreeModelTools.removeNodeFromParent(source_model, origin_node);
516 }
517 // If we are deleting
518 File recycled_file = null;
519 FileNode recycled_parent = null;
520 // delete the source file
521 Utility.delete(source_file);
522 }
523 // Else the source is a directory and it has children remaining
524 else if(child_list != null && child_list.length > 0) {
525 ///ystem.err.print("Nonempty directory -> ");
526 ///atherer.println("Directory is non-empty. Remove children first.");
527 FileNode recycle_folder_record = null;
528 // Don't worry about all this for true file move actions.
529 if(job.type == FileJob.DELETE) {
530 // queue all of its children, (both filtered and non-filtered), but for deleting only. Don't queue jobs for a current move event, as they would be queued as part of copying. I have no idea way, per sec, however the children within the origin node are always invalid during deletion (there are several copies of some nodes?!?). I'll check that each child is only added once.
531 ///ystem.err.println("Directory has " + origin_node.getChildCount() + " children.");
532 ///ystem.err.println("Directory actually has " + child_list.length + " children.");
533 origin_node.unmap();
534 origin_node.map();
535 ///atherer.println("Directory has " + origin_node.getChildCount() + " children.");
536 ///atherer.println("Directory actually has " + child_list.length + " children.");
537 for(int i = 0; i < origin_node.size(); i++) {
538 FileNode child_record = (FileNode) origin_node.get(i);
539 ///atherer.println("Queuing: " + child_record);
540 addJob(job.ID(), job.source, child_record, job.target, destination_node, FileJob.DELETE, job.undo, false, false, position);
541 //if(recycle_folder_record != null) {
542 // recycle_folder_mappings.put(child_record, recycle_folder_record);
543 //}
544 }
545 }
546 // Requeue a delete job -after- the children have been dealt with. Remember I've reversed the direction of the queue so sooner is later. Te-he. Also have to remember that we have have followed this path to get here for a move job: Copy Directory -> Queue Child Files -> Delete Directory (must occur after child files) -> Queue Directory.
547 // One special case. Do not requeue root nodes. Don't requeue jobs marked as done.
548 if(origin_node.getParent() != null && !job.done) {
549 ///atherer.println("Requeuing: " + origin_node.getFile().getAbsolutePath());
550 job.type = FileJob.DELETE; // You only requeue jobs that are deletes, as directories must be inspected before children, but deleted after.
551 addJob(job, position);
552 }
553 else {
554 Gatherer.println("I've already done this job twice. I refuse to requeue it again!!!");
555 }
556 }
557 }
558 job = null;
559 source_file = null;
560 target_file = null;
561 origin_node = null;
562 // We can only break out of the while loop if we are out of files, or if the action was cancelled.
563 if(cancel_action) {
564 // Empty queue
565 clearJobs();
566 cancel_action = false;
567 }
568 // Debugging pause.
569 ///ystem.err.println("Job complete.");
570 }
571 else {
572 // Disable stop button
573 if(stop_button != null) {
574 stop_button.setEnabled(false);
575 }
576 synchronized(this) {
577 // Force both workspace and collection trees to refresh
578 if (Gatherer.g_man != null) {
579 Gatherer.g_man.refreshWorkspaceTree(DragTree.COLLECTION_CONTENTS_CHANGED);
580 Gatherer.g_man.refreshCollectionTree(DragTree.COLLECTION_CONTENTS_CHANGED);
581 }
582
583 // Reset status area
584 file_status.setText(Dictionary.get("FileActions.No_Activity"));
585 progress.reset();
586 progress.setString(Dictionary.get("FileActions.No_Activity"));
587 yes_to_all = false;
588 completed_folder_mappings.clear();
589 recycle_folder_mappings.clear();
590
591 // Reset whether we complain about no sets.
592 if(Gatherer.f_man != null) {
593 Gatherer.f_man.complain_if_no_sets = true;
594 }
595
596 // Now wait if applicable.
597 if(return_immediately) {
598 return;
599 }
600 ///ystem.err.println("Waiting");
601 wait();
602 }
603 }
604 }
605 catch (Exception error) {
606 Gatherer.printStackTrace(error);
607 }
608 }
609 }
610
611 /** Register the button that will be responsible for stopping executing file actions.
612 * @param stop_button a JButton
613 */
614 public void registerStopButton(JButton stop_button) {
615 this.stop_button = stop_button;
616 }
617
618 /** Called when the user makes some selection in one of the trees we are listening to. From this we update the status details. */
619 public void valueChanged(TreeSelectionEvent event) {
620 JTree tree = (JTree) event.getSource();
621 if(tree.getSelectionCount() > 0) {
622 TreePath selection[] = tree.getSelectionPaths();
623 int file_count = 0;
624 int dir_count = 0;
625 for(int i = 0; i < selection.length; i++) {
626 TreeNode record = (TreeNode) selection[i].getLastPathComponent();
627 if(record.isLeaf()) {
628 file_count++;
629 }
630 else {
631 dir_count++;
632 }
633 record = null;
634 }
635 selection = null;
636 }
637 tree = null;
638 }
639
640 synchronized private void clearJobs() {
641 queue.clear();
642 }
643
644 /** Copy a file from the source location to the destination location.
645 * @param source The source File.
646 * @param destination The destination File.
647 * @see org.greenstone.gatherer.Gatherer
648 */
649 public void copyFile(File source, File destination, LongProgressBar progress)
650 throws FileAlreadyExistsException, FileNotFoundException, InsufficientSpaceException, IOException, UnknownFileErrorException, WriteNotPermittedException {
651 if(source.isDirectory()) {
652 destination.mkdirs();
653 }
654 else {
655 // Check if the origin file exists.
656 if(!source.exists()) {
657 Gatherer.println("Couldn't find the source file.");
658 throw(new FileNotFoundException());
659 }
660 // Check if the destination file does not exist.
661 if(destination.exists()) {
662 throw(new FileAlreadyExistsException());
663 }
664 File dirs = destination.getParentFile();
665 dirs.mkdirs();
666 // Copy the file.
667 FileInputStream f_in = new FileInputStream(source);
668 FileOutputStream f_out = null;
669 // This may throw a file not found exception, but this translates to a WriteNotPermittedException, in this case
670 try {
671 f_out = new FileOutputStream(destination);
672 }
673 catch (FileNotFoundException exception) {
674 throw new WriteNotPermittedException(exception.toString());
675 }
676 byte data[] = new byte[Utility.BUFFER_SIZE];
677 int data_size = 0;
678 while((data_size = f_in.read(data, 0, Utility.BUFFER_SIZE)) != -1 && !cancel_action) {
679 long destination_size = destination.length();
680 try {
681 f_out.write(data, 0, data_size);
682 }
683 // If an IO exception occurs, we can do some maths to determine if the number of bytes written to the file was less than expected. If so we assume a InsufficientSpace exception. If not we just throw the exception again.
684 catch (IOException io_exception) {
685 if(destination_size + (long) data_size > destination.length()) {
686 // Determine the difference (which I guess is in bytes).
687 long difference = (destination_size + (long) data_size) - destination.length();
688 // Transform that into a human readable string.
689 String message = Utility.formatFileLength(difference);
690 throw(new InsufficientSpaceException(message));
691 }
692 else {
693 throw(io_exception);
694 }
695 }
696 if(progress != null) {
697 progress.addValue(data_size);
698 }
699 }
700 // Flush and close the streams to ensure all bytes are written.
701 f_in.close();
702 f_out.close();
703 // We have now, in theory, produced an exact copy of the source file. Check this by comparing sizes.
704 if(!destination.exists() || (!cancel_action && source.length() != destination.length())) {
705 throw(new UnknownFileErrorException());
706 }
707 // If we were cancelled, ensure that none of the destination file exists.
708 if(cancel_action) {
709 destination.delete();
710 }
711 }
712 }
713
714
715 private FileJob removeJob(int position) {
716 FileJob job = null;
717 if(queue.size() > 0) {
718 job = (FileJob) queue.remove(position);
719 }
720 return job;
721 }
722}
Note: See TracBrowser for help on using the repository browser.