source: trunk/gli/src/org/greenstone/gatherer/file/FileQueue.java@ 8022

Last change on this file since 8022 was 8022, checked in by mdewsnip, 20 years ago

(Very) minor changes.

  • Property svn:keywords set to Author Date Id Revision
File size: 35.8 KB
Line 
1/**
2 *#########################################################################
3 *
4 * A component of the Gatherer application, part of the Greenstone digital
5 * library suite from the New Zealand Digital Library Project at the
6 * University of Waikato, New Zealand.
7 *
8 * Author: John Thompson, Greenstone Digital Library, University of Waikato
9 *
10 * Copyright (C) 1999 New Zealand Digital Library Project
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 *########################################################################
26 */
27package org.greenstone.gatherer.file;
28
29import java.io.*;
30import java.util.*;
31import javax.swing.*;
32import javax.swing.event.*;
33import javax.swing.tree.*;
34import org.greenstone.gatherer.Configuration;
35import org.greenstone.gatherer.Dictionary;
36import org.greenstone.gatherer.Gatherer;
37import org.greenstone.gatherer.file.FileJob;
38import org.greenstone.gatherer.file.FileNode;
39import org.greenstone.gatherer.gui.LongProgressBar;
40import org.greenstone.gatherer.gui.tree.DragTree;
41import org.greenstone.gatherer.undo.UndoManager;
42import org.greenstone.gatherer.util.ArrayTools;
43import org.greenstone.gatherer.util.DragComponent;
44import org.greenstone.gatherer.util.SynchronizedTreeModelTools;
45import org.greenstone.gatherer.util.Utility;
46
47/** A threaded object which processes a queue of file actions such as copying and movement. It also handles updating the various trees involved so they are an accurate representation of the file system they are meant to match.
48 * @author John Thompson, Greenstone Digital Library, University of Waikato
49 * @version 2.3
50 */
51public class FileQueue
52 extends Thread
53 implements TreeSelectionListener {
54 /** When someone requests the movement queue to be dumped this cancel flag is set to true. */
55 private boolean cancel_action = false;
56 /** A temporary mapping from currently existing FileNode folder to their equivelent FileNode folder within the undo managers tree. */
57 private HashMap completed_folder_mappings = new HashMap();
58
59 /** The button which controls the stopping of the file queue. */
60 private JButton stop_button = null;
61
62 /** true to cause this file queue to return from run() as soon as there are no jobs left on the queue. Useful for undo jobs which must occur before a specific action. */
63 private boolean return_immediately = false;
64 /** We are only allowed to wait under specific circumstances. */
65 /* private boolean wait_allowed = true; */
66 /** true if the user has selected yes to all from a file 'clash' dialog. */
67 private boolean yes_to_all = false;
68 /** A temporary mapping from currently existing FileNodes to the potential FileNode folder within the undo managers tree. */
69 private HashMap recycle_folder_mappings = new HashMap();
70 /** A label explaining the current moving files status. */
71 private JLabel file_status = null;
72 /** A list containing a queue of waiting movement jobs. */
73 private ArrayList queue;
74 /** A progress bar which shows how many bytes, out of the total size of bytes, has been moved. */
75 private LongProgressBar progress = null;
76 /** The last piece of text shown on the file status label, just incase we are displaying a very temporary message. */
77 private String previous = null;
78 /** Constructor.
79 * @param return_immediately true to cause this file queue to return from run() as soon as there are no jobs left on the queue.
80 * @see org.greenstone.gatherer.Configuration
81 * @see org.greenstone.gatherer.gui.Coloring
82 * @see org.greenstone.gatherer.gui.LongProgressBar
83 */
84 public FileQueue(boolean return_immediately) {
85 this.return_immediately = return_immediately;
86 this.queue = new ArrayList();
87 String args[] = new String[2];
88 args[0] = "0";
89 args[1] = "0";
90 file_status = new JLabel();
91 Dictionary.setText(file_status, "FileActions.Selected", args);
92 progress = new LongProgressBar();
93 progress.setBackground(Gatherer.config.getColor("coloring.collection_tree_background", false));
94 progress.setForeground(Gatherer.config.getColor("coloring.collection_tree_foreground", false));
95 progress.setString(Dictionary.get("FileActions.No_Activity"));
96 progress.setStringPainted(true);
97 args = null;
98 }
99
100 /** Requeue an existing job into the queue.
101 * @param job A previously created FileJob.
102 */
103 synchronized public void addJob(FileJob job, int position) {
104 job.done = true; // Ensure that the requeued job is marked as done.
105 queue.add(position, job);
106 notify();
107 }
108
109 /** Add a new job to the queue, specifiying as many arguments as is necessary to complete this type of job (ie delete needs no target information).
110 * @param id A long id unique to all jobs created by a single action.
111 * @param source The DragComponent source of this file, most likely a DragTree.
112 * @param child The FileNode you wish to mode.
113 * @param target The DragComponent to move the file to, again most likely a DragTree.
114 * @param parent The files new FileNode parent within the target.
115 * @param type The type of this movement as an int, either COPY or DELETE.
116 * @param undo true if this job should generate undo jobs, false for redo ones.
117 * @param undoable true if this job can generate undo or redo jobs at all, false otherwise.
118 */
119 public void addJob(long id, DragComponent source, FileNode child, DragComponent target, FileNode parent, byte type, boolean undo, boolean undoable, boolean folder_level) {
120 addJob(id, source, child, target, parent, type, undo, undoable, folder_level, -1);
121 }
122
123 synchronized public void addJob(long id, DragComponent source, FileNode child, DragComponent target, FileNode parent, byte type, boolean undo, boolean undoable, boolean folder_level, int position) {
124 FileJob job = new FileJob(id, source, child, target, parent, type, undo, undoable);
125 job.folder_level = folder_level;
126 Gatherer.println("Adding job: " + job);
127 if(position != -1 && position <= queue.size() + 1) {
128 queue.add(position, job);
129 }
130 else {
131 queue.add(job);
132 }
133 notify();
134 }
135
136 /** Calculates the total deep file size of the selected file nodes.
137 * @param files a FileNode[] of selected files
138 * @return true if a cancel was signalled, false otherwise
139 * @see org.greenstone.gatherer.file.FileManager.Task#run()
140 */
141 public boolean calculateSize(FileNode[] files)
142 {
143 file_status.setText(Dictionary.get("FileActions.Calculating_Size"));
144 progress.setString(Dictionary.get("FileActions.Calculating_Size"));
145
146 // Calculate the total file size of all the selected file nodes
147 Vector remaining = new Vector();
148 for (int i = 0; !cancel_action && i < files.length; i++) {
149 remaining.add(files[i]);
150 }
151 while (!cancel_action && remaining.size() > 0) {
152 FileNode node = (FileNode) remaining.remove(0);
153 if (node.isLeaf()) {
154 progress.addMaximum(node.getFile().length());
155 }
156 else {
157 for (int i = 0; !cancel_action && i < node.getChildCount(); i++) {
158 remaining.add(node.getChildAt(i));
159 }
160 }
161 }
162
163 // Now we return if calculation was cancelled so that the FileManagers Task can skip the addJob phase correctly.
164 if (cancel_action) {
165 cancel_action = false; // reset
166 return true;
167 }
168 else {
169 return false;
170 }
171 }
172
173 /** This method is called to cancel the job queue at the next available moment. */
174 public void cancelAction() {
175 cancel_action = true;
176 }
177 /** Access to the file state label. */
178 public JLabel getFileStatus() {
179 return file_status;
180 }
181
182 /** Access to the progress bar. */
183 public LongProgressBar getProgressBar() {
184 return progress;
185 }
186 /** Prevent the progress bar updating momentarily, while its size is re-adjusted. */
187 public void pause() {
188 progress.setIndeterminate(true);
189 }
190
191
192 /** The run method exists in every thread, and here it is used to work its way through the queue of Jobs. If no jobs are waiting and it cans, it waits until a job arrives. If a job is present then it is either COPIED or DELETED, with the records being copied or removed as necessary, and directories being recursed through. Finally the user can press cancel to cause the loop to prematurely dump the job queue then wait.
193 * @see org.greenstone.gatherer.Gatherer
194 * @see org.greenstone.gatherer.collection.CollectionManager
195 * @see org.greenstone.gatherer.file.FileJob
196 * @see org.greenstone.gatherer.file.FileNode
197 * @see org.greenstone.gatherer.gui.LongProgressBar
198 * @see org.greenstone.gatherer.undo.UndoManager
199 * @see org.greenstone.gatherer.util.Utility
200 */
201 public void run()
202 {
203 super.setName("FileQueue");
204
205 while (!Gatherer.self.exit) {
206 try {
207 // Retrieve the next job
208 int position = queue.size() - 1;
209 FileJob job = removeJob(position);
210 if (job != null) {
211 ///ystem.err.println("Found job: " + job);
212 // Enabled stop button
213 stop_button.setEnabled(true);
214 // The user can cancel this individual action at several places, so keep track if the state is 'ready' for the next step.
215 boolean ready = true;
216 FileNode origin_node = job.getOrigin();
217 FileNode destination_node = job.getDestination();
218 FileSystemModel source_model = (FileSystemModel)job.source.getTreeModel();
219 FileSystemModel target_model = (FileSystemModel)job.target.getTreeModel();
220 if(destination_node == null) {
221 // Retrieve the root node of the target model instead. A delete, or course, has no target file so all deleted files are added to the root of the Recycle Bin model.
222 destination_node = (FileNode) target_model.getRoot();
223 }
224 // Extract common job details.
225 File source_file = origin_node.getFile();
226 File target_file = null;
227 // Determine the target file for a copy or move.
228 if(job.type == FileJob.COPY || job.type == FileJob.MOVE) {
229 //target_file = new File(destination_node.getFile(), source_file.getName());
230 // use the name of the filenode instead of the name of the file - these should be the same except for the collection directories where we want the collection name to be used, not 'import' which is the underlying name
231 target_file = new File(destination_node.getFile(), origin_node.toString());
232 }
233 // To copy a file, copy it then add any metadata found at the source. If this file was already in our collection then we must ensure the lastest version of its metadata.xml has been saved to disk. To copy a directory simply create the directory at the destination, then add all of its children files as new jobs.
234 if((job.type == FileJob.COPY || job.type == FileJob.MOVE) && !job.done) {
235 ///ystem.err.println("Copy/Move: " + origin_node);
236
237 // The number one thing to check is whether we are in a cyclic loop. The easiest way is just to check how deep we are
238 int max_folder_depth = Gatherer.config.getInt("general.max_folder_depth", Configuration.COLLECTION_SPECIFIC);
239 boolean continue_over_depth = false;
240 if(FileManager.countFolderDepth(source_file) > max_folder_depth) {
241 Object[] options = { Dictionary.get("General.Yes"), Dictionary.get("General.No"), Dictionary.get("FileActions.Increase_Depth") };
242 String args[] = { String.valueOf(max_folder_depth), source_file.getAbsolutePath() };
243 int result = JOptionPane.showOptionDialog(Gatherer.g_man, Utility.formatHTMLWidth(Dictionary.get("FileActions.Possible_Cyclic_Path", args), 80), Dictionary.get("General.Warning"), JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, options[1]);
244 args = null;
245 options = null;
246 switch(result) {
247 case 0: // Yes
248 continue_over_depth = true;
249 break;
250 case 2: // Continue and increase depth
251 continue_over_depth = true;
252 Gatherer.config.setInt("general.max_folder_depth", Configuration.COLLECTION_SPECIFIC, (max_folder_depth + 1));
253 break;
254 }
255 }
256 else {
257 continue_over_depth = true;
258 }
259
260 if(continue_over_depth) {
261 FileNode new_node = null;
262 // Check if file exists, and action as necessary. Be aware the user can choose to cancel the action all together (where upon ready becomes false).
263 if(target_file.exists()) {
264 // We've previously been told
265 if(yes_to_all) {
266 // Remove the old file and tree entry.
267 target_file.delete();
268 ready = true;
269 }
270 else {
271 ///atherer.println("Opps! This filename already exists. Give the user some options.");
272 Object[] options = { Dictionary.get("General.Yes"), Dictionary.get("FileActions.Yes_To_All"), Dictionary.get("General.No"), Dictionary.get("General.Cancel") };
273 int result = JOptionPane.showOptionDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Exists", target_file.getName()), Dictionary.get("General.Warning"), JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, options[0]);
274 switch(result) {
275 case 1: // Yes To All
276 yes_to_all = true;
277 case 0: // Yes
278 // Remove the old file and tree entry.
279 if(destination_node != null) {
280 TreePath destination_path = new TreePath(destination_node.getPath());
281 FileNode temp_target_node = new FileNode(target_file, target_model, true);
282 TreePath target_path = destination_path.pathByAddingChild(temp_target_node);
283 SynchronizedTreeModelTools.removeNodeFromParent(target_model, target_model.getNode(target_path));
284 target_path = null;
285 temp_target_node = null;
286 destination_path = null;
287 }
288 target_file.delete();
289 ready = true;
290 break;
291 case 3: // No To All
292 cancel_action = true;
293 case 2: // No
294 default:
295 ready = false;
296 // Increment progress by size of potentially copied file
297 progress.addValue(origin_node.getFile().length());
298 }
299 }
300 }
301 // We proceed with the copy/move if the ready flag is still set. If it is that means there is no longer any existing file of the same name.
302 if(ready) {
303 // update status area
304 String args[] = new String[1];
305 args[0] = "" + (queue.size() + 1) + "";
306 if(job.type == FileJob.COPY) {
307 args[0] = Utility.formatPath("FileActions.Copying", source_file.getAbsolutePath(), file_status.getSize().width);
308 file_status.setText(Dictionary.get("FileActions.Copying", args));
309 }
310 else {
311 args[0] = Utility.formatPath("FileActions.Moving", source_file.getAbsolutePath(), file_status.getSize().width);
312 file_status.setText(Dictionary.get("FileActions.Moving", args));
313 }
314 args = null;
315
316 // If source is a file
317 if(source_file.isFile()) {
318 // copy the file. If anything goes wrong the copy file should throw the appropriate exception. No matter what exception is thrown (bar an IOException) we display some message, perhaps take some action, then cancel the remainder of the pending file jobs. No point in being told your out of hard drive space for each one of six thousand files eh?
319 try {
320 copyFile(source_file, target_file, progress);
321 }
322 // If we can't find the source file, then the most likely reason is that the file system has changed since the last time it was mapped. Warn the user that the requested file can't be found, then force a refresh of the source folder involved.
323 catch(FileNotFoundException fnf_exception) {
324 Gatherer.printStackTrace(fnf_exception);
325 cancel_action = true;
326 // Show warning.
327 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Found_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Found_Title"), JOptionPane.ERROR_MESSAGE);
328 // Force refresh of source folder.
329 source_model.refresh(new TreePath(((FileNode)origin_node.getParent()).getPath()));
330 }
331 catch(FileAlreadyExistsException fae_exception) {
332 Gatherer.printStackTrace(fae_exception);
333 cancel_action = true;
334 // Show warning.
335 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Already_Exists_Message", target_file.getName()), Dictionary.get("FileActions.File_Already_Exists_Title"), JOptionPane.ERROR_MESSAGE);
336 // Nothing else can be done by the Gatherer.
337 }
338 catch(InsufficientSpaceException is_exception) {
339 Gatherer.printStackTrace(is_exception);
340 cancel_action = true;
341 // Show warning. The message body of the expection explains how much more space is required for this file copy.
342 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Insufficient_Space_Message", is_exception.getMessage()), Dictionary.get("FileActions.Insufficient_Space_Title"), JOptionPane.ERROR_MESSAGE);
343 // Nothing else can be done by the Gatherer. In fact if we are really out of space I'm not even sure we can quit safely.
344 }
345 catch(UnknownFileErrorException ufe_exception) {
346 Gatherer.printStackTrace(ufe_exception);
347 cancel_action = true;
348 // Show warning
349 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Unknown_File_Error_Message"), Dictionary.get("FileActions.Unknown_File_Error_Title"), JOptionPane.ERROR_MESSAGE);
350 // Nothing else we can do.
351 }
352 catch(WriteNotPermittedException wnp_exception) {
353 Gatherer.printStackTrace(wnp_exception);
354 cancel_action = true;
355 // Show warning
356 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Write_Not_Permitted_Message", target_file.getAbsolutePath()), Dictionary.get("FileActions.Write_Not_Permitted_Title"), JOptionPane.ERROR_MESSAGE);
357 // Nothing else we can do.
358 }
359 catch(IOException exception) {
360 // Can't really do much about this.
361 Gatherer.printStackTrace(exception);
362 }
363 // If not cancelled
364 if(!cancel_action) {
365 // Step one is to create a dummy FileNode. Its important it has the correct structure so getPath works.
366 FileNode new_record = new FileNode(target_file);
367 SynchronizedTreeModelTools.insertNodeInto(target_model, destination_node, new_record);
368 new_node = new_record;
369
370 // create undo job
371 if(job.undoable) {
372 job.undoable = false;
373 if(job.type == FileJob.COPY) {
374 // A copy is undone with a delete, so it doesn't really matter where the file originally came from (we're not moving it back there, but into the recycle bin). You may also notice we don't make use of the target parent record. This is because no undo action needs this information, and even if it did it could simply ask for records parent!
375 Gatherer.c_man.undo.addUndo(job.ID(), UndoManager.FILE_COPY, null, null, job.target, new_record, job.undo);
376 }
377 else {
378 // Movements however do need a source and source parent so the file can be moved back to the correct place.
379 Gatherer.c_man.undo.addUndo(job.ID(), UndoManager.FILE_MOVE, job.source, (FileNode)origin_node.getParent(), job.target, new_record, job.undo);
380 }
381 }
382 new_record = null;
383 }
384 }
385 // Else
386 else if(source_file.isDirectory()) {
387 // create new record
388 FileNode directory_record = new FileNode(target_file);
389 ///ystem.err.println("Directory record = " + directory_record + " (" + target_file.getAbsolutePath() + ")");
390 SynchronizedTreeModelTools.insertNodeInto(target_model, destination_node, directory_record);
391 // Why is this not happening eh?
392 directory_record.setParent(destination_node);
393 if(!target_file.exists()) {
394 // make the directory
395 target_file.mkdirs();
396 new_node = directory_record;
397 // create undo job
398 if(job.undoable) {
399 job.undoable = false;
400 if(job.type == FileJob.COPY) {
401 // A copy is undone with a delete, so it doesn't really matter where the file originally came from (we're not moving it back there, but into the recycle bin). You may also notice we don't make use of the target parent record. This is because no undo action needs this information, and even if it did it could simply ask for records parent!
402 Gatherer.c_man.undo.addUndo(job.ID(), UndoManager.FILE_COPY, null, null, job.target, directory_record, job.undo);
403 }
404 else {
405 // Movements however do need a source and source parent so the file can be moved back to the correct place.
406 Gatherer.c_man.undo.addUndo(job.ID(), UndoManager.FILE_MOVE, job.source, (FileNode)origin_node.getParent(), job.target, directory_record, job.undo);
407 }
408 }
409 }
410 // Else inform the users that a directory already exists and files will be copied into it
411 //else {
412 // JOptionPane.showMessageDialog(null, Dictionary.get("FileActions.Directory_Exists", target_file.toString()), Dictionary.get("General.Warning"), JOptionPane.WARNING_MESSAGE);
413 //}
414 // Queue non-filtered child files for copying. If this directory already existed, the child records will have to generate the undo jobs, as we don't want to entirely delete this directory if it already existed.
415 FileNode child_record = null;
416 // In order to have a sane copy proceedure (rather than always copying last file first as it used to) we always add the child node at the position the parent was removed from. Consider the file job 'a' at the end of the queue which generates three new jobs 'b', 'c' and 'd'. The resulting flow should look like this.
417 // -- Starting queue ...[a]
418 // remove(position) = 'a' ...
419 // add(position, 'b') ...[b]
420 // add(position, 'c') ...[c][b]
421 // add(position, 'd') ...[d][c][b]
422 // Next loop
423 // remove(position) = 'b' ...[d][c]
424 for(int i = 0; i < origin_node.getChildCount(); i++) {
425 child_record = (FileNode) origin_node.getChildAt(i);
426 addJob(job.ID(), job.source, child_record, job.target, directory_record, job.type, job.undo, false, false, position);
427 }
428 child_record = null;
429 directory_record = null;
430 }
431 // The file wasn't found!
432 else {
433 cancel_action = true;
434 // Show warning.
435 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Found_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Found_Title"), JOptionPane.ERROR_MESSAGE);
436 // Force refresh of source folder.
437 source_model.refresh(new TreePath(((FileNode)origin_node.getParent()).getPath()));
438 }
439
440 // We can't have been cancelled, and we must have created a new FileNode during the above phase, before we can handle metadata.
441 if(!cancel_action && new_node != null) {
442 /* Time to handle any existing metadata. */
443 // If the directory came from inside our collection...
444 if (job.source.toString().equals("Collection")) {
445 ///ystem.err.println("Move within collection...");
446 // we just retrieve the metadata attached to the origin node...
447 ArrayList existing_metadata = Gatherer.c_man.getCollection().gdm.getMetadataOnly(source_file);
448 ///atherer.println("Existing metadata for " + origin_node + ": " + gdm.toString(existing_metadata));
449 // then assign this remainder to the new folder.
450 ///ystem.err.println("New metadata: " + gdm.toString(existing_metadata));
451 Gatherer.c_man.getCollection().gdm.addMetadata(new_node, existing_metadata);
452 existing_metadata = null;
453 }
454 // If it came from the recycle bin retrieve the metadata from there, once again remembering to account for inherited metadata
455 else if (job.source.toString().equals("Undo")) {
456 // Retrieve metadata from the recycle bin
457 ArrayList existing_metadata = Gatherer.c_man.undo.getMetadata(source_file);
458 // then assign this remainder to the new folder.
459 Gatherer.c_man.getCollection().gdm.addMetadata(new_node, existing_metadata);
460 existing_metadata = null;
461 }
462 // Otherwise if it came from the workspace use the MSMs parsers to search for folder level metadata (such as metadata.xml or marc records).
463 else if (job.source.toString().equals("Workspace")) {
464 cancel_action = !Gatherer.c_man.getCollection().msm.searchForMetadata(new_node, origin_node, job.folder_level);
465 }
466 }
467 new_node = null;
468 }
469 }
470 }
471 // If we haven't been cancelled, and we've been asked to delete a directory/file, or perhaps as part of a move, we delete the file. This involves removing any existing metadata and then copying the file to the recycled bin (for a delete only), then deleting the file. When deleting a directory record from the tree (or from the filesystem for that matter) we must ensure that all of the descendant records have already been removed. If we fail to do this the delete will fail, or you will be bombarded with hundreds of 'Parent node of null not allowed' error messages. Also be aware that if the user has cancelled just this action, because of say a name clash, then we shouldn't do any deleting of any sort dammit.
472 if(!cancel_action && ready && (job.type == FileJob.DELETE || job.type == FileJob.MOVE)) {
473 ///atherer.println("Delete/Move: " + origin_node);
474 ///atherer.println(queue.size() + " jobs remain in queue");
475
476 if (source_file.isFile()) {
477 progress.addValue(source_file.length());
478 }
479
480 // If the source is an empty directory or a file. Don't do anything to the root node of a tree.
481 File[] child_list = source_file.listFiles();
482 if(source_file.isFile() || (child_list != null && (child_list.length == 0 || (child_list.length == 1 && child_list[0].getName().equals(Utility.METADATA_XML))) && origin_node.getParent() != null)) {
483 ///atherer.println("File or empty directory.");
484 // Delete any metadata.xml still in the directory.
485 if(child_list != null && child_list.length == 1) {
486 child_list[0].delete();
487 }
488
489 ///atherer.println("Origin is file or is directory and is empty.");
490 // update status area
491 String args[] = new String[1];
492 // args[0] = "" + (queue.size() + 1) + "";
493 args[0] = Utility.formatPath("FileActions.Deleting", source_file.getAbsolutePath(), file_status.getSize().width);
494 file_status.setText(Dictionary.get("FileActions.Deleting", args));
495 args = null;
496
497 // Remove its metadata
498 ArrayList metadatum = null;
499 if(job.source == Gatherer.c_man.undo) {
500 Gatherer.c_man.undo.addMetadata(target_file, metadatum);
501 }
502 else {
503 metadatum = Gatherer.c_man.getCollection().gdm.removeMetadata(origin_node.getFile());
504 }
505 // determine its parent node
506 FileNode parent_record = (FileNode)origin_node.getParent();
507 // Remove from model
508 if(parent_record != null) {
509 SynchronizedTreeModelTools.removeNodeFromParent(source_model, origin_node);
510 }
511 // If we are deleting
512 File recycled_file = null;
513 FileNode recycled_parent = null;
514 // delete the source file
515 Utility.delete(source_file);
516 }
517 // Else the source is a directory and it has children remaining
518 else if(child_list != null && child_list.length > 0) {
519 ///ystem.err.print("Nonempty directory -> ");
520 ///atherer.println("Directory is non-empty. Remove children first.");
521 FileNode recycle_folder_record = null;
522 // Don't worry about all this for true file move actions.
523 if(job.type == FileJob.DELETE) {
524 // queue all of its children, (both filtered and non-filtered), but for deleting only. Don't queue jobs for a current move event, as they would be queued as part of copying. I have no idea way, per sec, however the children within the origin node are always invalid during deletion (there are several copies of some nodes?!?). I'll check that each child is only added once.
525 ///ystem.err.println("Directory has " + origin_node.getChildCount() + " children.");
526 ///ystem.err.println("Directory actually has " + child_list.length + " children.");
527 origin_node.refresh();
528 ///atherer.println("Directory has " + origin_node.getChildCount() + " children.");
529 ///atherer.println("Directory actually has " + child_list.length + " children.");
530 for(int i = 0; i < origin_node.size(); i++) {
531 FileNode child_record = (FileNode) origin_node.get(i);
532 ///atherer.println("Queuing: " + child_record);
533 addJob(job.ID(), job.source, child_record, job.target, destination_node, FileJob.DELETE, job.undo, false, false, position);
534 //if(recycle_folder_record != null) {
535 // recycle_folder_mappings.put(child_record, recycle_folder_record);
536 //}
537 }
538 }
539 // Requeue a delete job -after- the children have been dealt with. Remember I've reversed the direction of the queue so sooner is later. Te-he. Also have to remember that we have have followed this path to get here for a move job: Copy Directory -> Queue Child Files -> Delete Directory (must occur after child files) -> Queue Directory.
540 // One special case. Do not requeue root nodes. Don't requeue jobs marked as done.
541 if(origin_node.getParent() != null && !job.done) {
542 ///atherer.println("Requeuing: " + origin_node.getFile().getAbsolutePath());
543 job.type = FileJob.DELETE; // You only requeue jobs that are deletes, as directories must be inspected before children, but deleted after.
544 addJob(job, position);
545 }
546 else {
547 Gatherer.println("I've already done this job twice. I refuse to requeue it again!!");
548 }
549 }
550 }
551 job = null;
552 source_file = null;
553 target_file = null;
554 origin_node = null;
555 // We can only break out of the while loop if we are out of files, or if the action was cancelled.
556 if(cancel_action) {
557 // Empty queue
558 clearJobs();
559 cancel_action = false;
560 }
561 // Debugging pause.
562 ///ystem.err.println("Job complete.");
563 }
564 else {
565 // Disable stop button
566 if(stop_button != null) {
567 stop_button.setEnabled(false);
568 }
569 synchronized(this) {
570 // Force both workspace and collection trees to refresh
571 if (Gatherer.g_man != null) {
572 Gatherer.g_man.refreshWorkspaceTree(DragTree.COLLECTION_CONTENTS_CHANGED);
573 Gatherer.g_man.refreshCollectionTree(DragTree.COLLECTION_CONTENTS_CHANGED);
574 }
575
576 // Reset status area
577 file_status.setText(Dictionary.get("FileActions.No_Activity"));
578 progress.reset();
579 progress.setString(Dictionary.get("FileActions.No_Activity"));
580 yes_to_all = false;
581 completed_folder_mappings.clear();
582 recycle_folder_mappings.clear();
583
584 // Reset whether we complain about no sets.
585 if(Gatherer.f_man != null) {
586 Gatherer.f_man.complain_if_no_sets = true;
587 }
588
589 // Now wait if applicable.
590 if(return_immediately) {
591 return;
592 }
593 ///ystem.err.println("Waiting");
594 wait();
595 }
596 }
597 }
598 catch (Exception error) {
599 Gatherer.printStackTrace(error);
600 }
601 }
602 }
603
604 /** Register the button that will be responsible for stopping executing file actions.
605 * @param stop_button a JButton
606 */
607 public void registerStopButton(JButton stop_button) {
608 this.stop_button = stop_button;
609 }
610
611 /** Called when the user makes some selection in one of the trees we are listening to. From this we update the status details. */
612 public void valueChanged(TreeSelectionEvent event) {
613 JTree tree = (JTree) event.getSource();
614 if(tree.getSelectionCount() > 0) {
615 TreePath selection[] = tree.getSelectionPaths();
616 int file_count = 0;
617 int dir_count = 0;
618 for(int i = 0; i < selection.length; i++) {
619 TreeNode record = (TreeNode) selection[i].getLastPathComponent();
620 if(record.isLeaf()) {
621 file_count++;
622 }
623 else {
624 dir_count++;
625 }
626 record = null;
627 }
628 selection = null;
629 }
630 tree = null;
631 }
632
633 synchronized private void clearJobs() {
634 queue.clear();
635 }
636
637 /** Copy the contents from the source directory to the destination
638 * directory.
639 * @param source The source directory
640 * @param destination The destination directory
641 * @param progress A progress bar to monitor copying progress
642 * @see org.greenstone.gatherer.Gatherer
643 */
644 public void copyDirectoryContents(File source, File destination, LongProgressBar progress)
645 throws FileAlreadyExistsException, FileNotFoundException, InsufficientSpaceException, IOException, UnknownFileErrorException, WriteNotPermittedException
646 {
647 if (!source.isDirectory()) return;
648 // check that dest dirs exist
649 destination.mkdirs();
650
651 File [] src_files = source.listFiles();
652 if (src_files.length == 0) return; // nothing to copy
653 for (int i=0; i<src_files.length; i++) {
654 File f = src_files[i];
655 String f_name = f.getName();
656 File new_file = new File(destination, f_name);
657 if (f.isDirectory()) {
658 copyDirectoryContents(f, new_file, progress);
659 } else if (f.isFile()) {
660 copyFile(f, new_file, progress);
661 }
662 }
663
664 }
665
666 /** Copy a file from the source location to the destination location.
667 * @param source The source File.
668 * @param destination The destination File.
669 * @see org.greenstone.gatherer.Gatherer
670 */
671 public void copyFile(File source, File destination, LongProgressBar progress)
672 throws FileAlreadyExistsException, FileNotFoundException, InsufficientSpaceException, IOException, UnknownFileErrorException, WriteNotPermittedException {
673 if(source.isDirectory()) {
674 destination.mkdirs();
675 }
676 else {
677 // Check if the origin file exists.
678 if(!source.exists()) {
679 Gatherer.println("Couldn't find the source file.");
680 throw(new FileNotFoundException());
681 }
682 // Check if the destination file does not exist.
683 if(destination.exists()) {
684 throw(new FileAlreadyExistsException());
685 }
686 File dirs = destination.getParentFile();
687 dirs.mkdirs();
688 // Copy the file.
689 FileInputStream f_in = new FileInputStream(source);
690 FileOutputStream f_out = null;
691 // This may throw a file not found exception, but this translates to a WriteNotPermittedException, in this case
692 try {
693 f_out = new FileOutputStream(destination);
694 }
695 catch (FileNotFoundException exception) {
696 throw new WriteNotPermittedException(exception.toString());
697 }
698 byte data[] = new byte[Utility.BUFFER_SIZE];
699 int data_size = 0;
700 while((data_size = f_in.read(data, 0, Utility.BUFFER_SIZE)) != -1 && !cancel_action) {
701 long destination_size = destination.length();
702 try {
703 f_out.write(data, 0, data_size);
704 }
705 // If an IO exception occurs, we can do some maths to determine if the number of bytes written to the file was less than expected. If so we assume a InsufficientSpace exception. If not we just throw the exception again.
706 catch (IOException io_exception) {
707 if(destination_size + (long) data_size > destination.length()) {
708 // Determine the difference (which I guess is in bytes).
709 long difference = (destination_size + (long) data_size) - destination.length();
710 // Transform that into a human readable string.
711 String message = Utility.formatFileLength(difference);
712 throw(new InsufficientSpaceException(message));
713 }
714 else {
715 throw(io_exception);
716 }
717 }
718 if(progress != null) {
719 progress.addValue(data_size);
720 }
721 }
722 // Flush and close the streams to ensure all bytes are written.
723 f_in.close();
724 f_out.close();
725 // We have now, in theory, produced an exact copy of the source file. Check this by comparing sizes.
726 if(!destination.exists() || (!cancel_action && source.length() != destination.length())) {
727 throw(new UnknownFileErrorException());
728 }
729 // If we were cancelled, ensure that none of the destination file exists.
730 if(cancel_action) {
731 destination.delete();
732 }
733 }
734 }
735
736
737 private FileJob removeJob(int position) {
738 FileJob job = null;
739 if(queue.size() > 0) {
740 job = (FileJob) queue.remove(position);
741 }
742 return job;
743 }
744}
Note: See TracBrowser for help on using the repository browser.