source: trunk/gli/src/org/greenstone/gatherer/file/FileQueue.java@ 8597

Last change on this file since 8597 was 8596, checked in by mdewsnip, 20 years ago

Minor tidy ups.

  • Property svn:keywords set to Author Date Id Revision
File size: 31.5 KB
Line 
1/**
2 *#########################################################################
3 *
4 * A component of the Gatherer application, part of the Greenstone digital
5 * library suite from the New Zealand Digital Library Project at the
6 * University of Waikato, New Zealand.
7 *
8 * Author: John Thompson, Greenstone Digital Library, University of Waikato
9 *
10 * Copyright (C) 1999 New Zealand Digital Library Project
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 *########################################################################
26 */
27package org.greenstone.gatherer.file;
28
29import java.io.*;
30import java.util.*;
31import javax.swing.*;
32import javax.swing.event.*;
33import javax.swing.tree.*;
34import org.greenstone.gatherer.Configuration;
35import org.greenstone.gatherer.DebugStream;
36import org.greenstone.gatherer.Dictionary;
37import org.greenstone.gatherer.Gatherer;
38import org.greenstone.gatherer.gui.LongProgressBar;
39import org.greenstone.gatherer.gui.tree.DragTree;
40import org.greenstone.gatherer.metadata.MetadataValue;
41import org.greenstone.gatherer.metadata.MetadataXMLFileManager;
42import org.greenstone.gatherer.util.DragComponent;
43import org.greenstone.gatherer.util.SynchronizedTreeModelTools;
44import org.greenstone.gatherer.util.Utility;
45
46/** A threaded object which processes a queue of file actions such as copying and movement. It also handles updating the various trees involved so they are an accurate representation of the file system they are meant to match.
47 * @author John Thompson, Greenstone Digital Library, University of Waikato
48 * @version 2.3
49 */
50public class FileQueue
51 extends Thread
52{
53 /** When someone requests the movement queue to be dumped this cancel flag is set to true. */
54 private boolean cancel_action = false;
55
56 /** The button which controls the stopping of the file queue. */
57 private JButton stop_button = null;
58
59 /** true if the user has selected yes to all from a file 'clash' dialog. */
60 private boolean yes_to_all = false;
61 /** A label explaining the current moving files status. */
62 private JLabel file_status = null;
63 /** A list containing a queue of waiting movement jobs. */
64 private ArrayList queue = null;
65 /** A progress bar which shows how many bytes, out of the total size of bytes, has been moved. */
66 private LongProgressBar progress = null;
67
68
69 /** Constructor.
70 */
71 public FileQueue() {
72 this.queue = new ArrayList();
73 file_status = new JLabel();
74 Dictionary.setText(file_status, "FileActions.No_Activity");
75 progress = new LongProgressBar();
76 progress.setBackground(Configuration.getColor("coloring.collection_tree_background", false));
77 progress.setForeground(Configuration.getColor("coloring.collection_tree_foreground", false));
78 progress.setString(Dictionary.get("FileActions.No_Activity"));
79 progress.setStringPainted(true);
80 }
81
82 /** Requeue an existing job into the queue.
83 * @param job A previously created FileJob.
84 */
85 synchronized public void addJob(FileJob job, int position) {
86 job.done = true; // Ensure that the requeued job is marked as done.
87 queue.add(position, job);
88 notify();
89 }
90
91 /** Add a new job to the queue, specifiying as many arguments as is necessary to complete this type of job (ie delete needs no target information).
92 * @param id A long id unique to all jobs created by a single action.
93 * @param source The DragComponent source of this file, most likely a DragTree.
94 * @param child The FileNode you wish to mode.
95 * @param target The DragComponent to move the file to, again most likely a DragTree.
96 * @param parent The files new FileNode parent within the target.
97 * @param type The type of this movement as an int, either COPY or DELETE.
98 * @param undo true if this job should generate undo jobs, false for redo ones.
99 * @param undoable true if this job can generate undo or redo jobs at all, false otherwise.
100 */
101 public void addJob(long id, DragComponent source, FileNode child, DragComponent target, FileNode parent, byte type, boolean undo, boolean undoable, boolean folder_level) {
102 addJob(id, source, child, target, parent, type, undo, undoable, folder_level, -1);
103 }
104
105 synchronized public void addJob(long id, DragComponent source, FileNode child, DragComponent target, FileNode parent, byte type, boolean undo, boolean undoable, boolean folder_level, int position) {
106 FileJob job = new FileJob(id, source, child, target, parent, type, undo, undoable);
107 job.folder_level = folder_level;
108 DebugStream.println("Adding job: " + job);
109 if(position != -1 && position <= queue.size() + 1) {
110 queue.add(position, job);
111 }
112 else {
113 queue.add(job);
114 }
115 notify();
116 }
117
118 /** Calculates the total deep file size of the selected file nodes.
119 * @param files a FileNode[] of selected files
120 * @return true if a cancel was signalled, false otherwise
121 * @see org.greenstone.gatherer.file.FileManager.Task#run()
122 */
123 public boolean calculateSize(FileNode[] files)
124 {
125 file_status.setText(Dictionary.get("FileActions.Calculating_Size"));
126 progress.setString(Dictionary.get("FileActions.Calculating_Size"));
127
128 // Calculate the total file size of all the selected file nodes
129 Vector remaining = new Vector();
130 for (int i = 0; !cancel_action && i < files.length; i++) {
131 remaining.add(files[i]);
132 }
133 while (!cancel_action && remaining.size() > 0) {
134 FileNode node = (FileNode) remaining.remove(0);
135 if (node.isLeaf()) {
136 progress.addMaximum(node.getFile().length());
137 }
138 else {
139 for (int i = 0; !cancel_action && i < node.getChildCount(); i++) {
140 remaining.add(node.getChildAt(i));
141 }
142 }
143 }
144
145 // Now we return if calculation was cancelled so that the FileManagers Task can skip the addJob phase correctly.
146 if (cancel_action) {
147 cancel_action = false; // reset
148 return true;
149 }
150 else {
151 return false;
152 }
153 }
154
155 /** This method is called to cancel the job queue at the next available moment. */
156 public void cancelAction() {
157 cancel_action = true;
158 }
159 /** Access to the file state label. */
160 public JLabel getFileStatus() {
161 return file_status;
162 }
163
164 /** Access to the progress bar. */
165 public LongProgressBar getProgressBar() {
166 return progress;
167 }
168 /** Prevent the progress bar updating momentarily, while its size is re-adjusted. */
169 public void pause() {
170 progress.setIndeterminate(true);
171 }
172
173
174 /** The run method exists in every thread, and here it is used to work its way through the queue of Jobs. If no jobs are waiting and it cans, it waits until a job arrives. If a job is present then it is either COPIED or DELETED, with the records being copied or removed as necessary, and directories being recursed through. Finally the user can press cancel to cause the loop to prematurely dump the job queue then wait.
175 * @see org.greenstone.gatherer.Gatherer
176 * @see org.greenstone.gatherer.collection.CollectionManager
177 * @see org.greenstone.gatherer.file.FileJob
178 * @see org.greenstone.gatherer.file.FileNode
179 * @see org.greenstone.gatherer.gui.LongProgressBar
180 * @see org.greenstone.gatherer.util.Utility
181 */
182 public void run()
183 {
184 super.setName("FileQueue");
185
186 while (!Gatherer.self.exit) {
187 try {
188 // Retrieve the next job
189 int position = queue.size() - 1;
190 FileJob job = removeJob(position);
191 if (job != null) {
192 ///ystem.err.println("Found job: " + job);
193 // Enabled stop button
194 stop_button.setEnabled(true);
195 // The user can cancel this individual action at several places, so keep track if the state is 'ready' for the next step.
196 boolean ready = true;
197 FileNode origin_node = job.getOrigin();
198 FileNode destination_node = job.getDestination();
199 FileSystemModel source_model = (FileSystemModel)job.source.getTreeModel();
200 FileSystemModel target_model = (FileSystemModel)job.target.getTreeModel();
201 if(destination_node == null) {
202 // Retrieve the root node of the target model instead. A delete, or course, has no target file so all deleted files are added to the root of the Recycle Bin model.
203 destination_node = (FileNode) target_model.getRoot();
204 }
205 // Extract common job details.
206 File source_file = origin_node.getFile();
207 File target_file = null;
208 // Determine the target file for a copy or move.
209 if(job.type == FileJob.COPY || job.type == FileJob.MOVE) {
210 //target_file = new File(destination_node.getFile(), source_file.getName());
211 // use the name of the filenode instead of the name of the file - these should be the same except for the collection directories where we want the collection name to be used, not 'import' which is the underlying name
212 target_file = new File(destination_node.getFile(), origin_node.toString());
213 }
214 // To copy a file, copy it then add any metadata found at the source. If this file was already in our collection then we must ensure the lastest version of its metadata.xml has been saved to disk. To copy a directory simply create the directory at the destination, then add all of its children files as new jobs.
215 if((job.type == FileJob.COPY || job.type == FileJob.MOVE) && !job.done) {
216 ///ystem.err.println("Copy/Move: " + origin_node);
217
218 // The number one thing to check is whether we are in a cyclic loop. The easiest way is just to check how deep we are
219 int max_folder_depth = Configuration.getInt("general.max_folder_depth", Configuration.COLLECTION_SPECIFIC);
220 boolean continue_over_depth = false;
221 if(FileManager.countFolderDepth(source_file) > max_folder_depth) {
222 Object[] options = { Dictionary.get("General.Yes"), Dictionary.get("General.No"), Dictionary.get("FileActions.Increase_Depth") };
223 String args[] = { String.valueOf(max_folder_depth), source_file.getAbsolutePath() };
224 int result = JOptionPane.showOptionDialog(Gatherer.g_man, Utility.formatHTMLWidth(Dictionary.get("FileActions.Possible_Cyclic_Path", args), 80), Dictionary.get("General.Warning"), JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, options[1]);
225 args = null;
226 options = null;
227 switch(result) {
228 case 0: // Yes
229 continue_over_depth = true;
230 break;
231 case 2: // Continue and increase depth
232 continue_over_depth = true;
233 Configuration.setInt("general.max_folder_depth", Configuration.COLLECTION_SPECIFIC, (max_folder_depth + 1));
234 break;
235 }
236 }
237 else {
238 continue_over_depth = true;
239 }
240
241 if(continue_over_depth) {
242 FileNode new_node = null;
243 // Check if file exists, and action as necessary. Be aware the user can choose to cancel the action all together (where upon ready becomes false).
244 if(target_file.exists()) {
245 // We've previously been told
246 if(yes_to_all) {
247 // Remove the old file and tree entry.
248 target_file.delete();
249 ready = true;
250 }
251 else {
252 ///atherer.println("Opps! This filename already exists. Give the user some options.");
253 Object[] options = { Dictionary.get("General.Yes"), Dictionary.get("FileActions.Yes_To_All"), Dictionary.get("General.No"), Dictionary.get("General.Cancel") };
254 int result = JOptionPane.showOptionDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Exists", target_file.getName()), Dictionary.get("General.Warning"), JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, options[0]);
255 switch(result) {
256 case 1: // Yes To All
257 yes_to_all = true;
258 case 0: // Yes
259 // Remove the old file and tree entry.
260 if(destination_node != null) {
261 TreePath destination_path = new TreePath(destination_node.getPath());
262 FileNode temp_target_node = new FileNode(target_file, target_model, true);
263 TreePath target_path = destination_path.pathByAddingChild(temp_target_node);
264 SynchronizedTreeModelTools.removeNodeFromParent(target_model, target_model.getNode(target_path));
265 target_path = null;
266 temp_target_node = null;
267 destination_path = null;
268 }
269 target_file.delete();
270 ready = true;
271 break;
272 case 3: // No To All
273 cancel_action = true;
274 case 2: // No
275 default:
276 ready = false;
277 // Increment progress by size of potentially copied file
278 progress.addValue(origin_node.getFile().length());
279 }
280 }
281 }
282 // We proceed with the copy/move if the ready flag is still set. If it is that means there is no longer any existing file of the same name.
283 if(ready) {
284 // update status area
285 String args[] = new String[1];
286 args[0] = "" + (queue.size() + 1) + "";
287 if(job.type == FileJob.COPY) {
288 args[0] = Utility.formatPath("FileActions.Copying", source_file.getAbsolutePath(), file_status.getSize().width);
289 file_status.setText(Dictionary.get("FileActions.Copying", args));
290 }
291 else {
292 args[0] = Utility.formatPath("FileActions.Moving", source_file.getAbsolutePath(), file_status.getSize().width);
293 file_status.setText(Dictionary.get("FileActions.Moving", args));
294 }
295 args = null;
296
297 // If source is a file
298 if(source_file.isFile()) {
299 // copy the file. If anything goes wrong the copy file should throw the appropriate exception. No matter what exception is thrown (bar an IOException) we display some message, perhaps take some action, then cancel the remainder of the pending file jobs. No point in being told your out of hard drive space for each one of six thousand files eh?
300 try {
301 copyFile(source_file, target_file, progress);
302 }
303 // If we can't find the source file, then the most likely reason is that the file system has changed since the last time it was mapped. Warn the user that the requested file can't be found, then force a refresh of the source folder involved.
304 catch(FileNotFoundException fnf_exception) {
305 DebugStream.printStackTrace(fnf_exception);
306 cancel_action = true;
307 // Show warning.
308 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Found_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Found_Title"), JOptionPane.ERROR_MESSAGE);
309 // Force refresh of source folder.
310 source_model.refresh(new TreePath(((FileNode)origin_node.getParent()).getPath()));
311 }
312 catch(FileAlreadyExistsException fae_exception) {
313 DebugStream.printStackTrace(fae_exception);
314 cancel_action = true;
315 // Show warning.
316 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Already_Exists_Message", target_file.getName()), Dictionary.get("FileActions.File_Already_Exists_Title"), JOptionPane.ERROR_MESSAGE);
317 // Nothing else can be done by the Gatherer.
318 }
319 catch(InsufficientSpaceException is_exception) {
320 DebugStream.printStackTrace(is_exception);
321 cancel_action = true;
322 // Show warning. The message body of the expection explains how much more space is required for this file copy.
323 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Insufficient_Space_Message", is_exception.getMessage()), Dictionary.get("FileActions.Insufficient_Space_Title"), JOptionPane.ERROR_MESSAGE);
324 // Nothing else can be done by the Gatherer. In fact if we are really out of space I'm not even sure we can quit safely.
325 }
326 catch (ReadNotPermittedException rnp_exception) {
327 if (DebugStream.isDebuggingEnabled()) {
328 DebugStream.printStackTrace(rnp_exception);
329 }
330 cancel_action = true;
331 // Show warning
332 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Read_Not_Permitted_Message", source_file.getAbsolutePath()), Dictionary.get("FileActions.Write_Not_Permitted_Title"), JOptionPane.ERROR_MESSAGE);
333 // Nothing else we can do.
334 }
335 catch(UnknownFileErrorException ufe_exception) {
336 DebugStream.printStackTrace(ufe_exception);
337 cancel_action = true;
338 // Show warning
339 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Unknown_File_Error_Message"), Dictionary.get("FileActions.Unknown_File_Error_Title"), JOptionPane.ERROR_MESSAGE);
340 // Nothing else we can do.
341 }
342 catch(WriteNotPermittedException wnp_exception) {
343 if (DebugStream.isDebuggingEnabled()) {
344 DebugStream.printStackTrace(wnp_exception);
345 }
346 cancel_action = true;
347 // Show warning
348 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Write_Not_Permitted_Message", target_file.getAbsolutePath()), Dictionary.get("FileActions.Write_Not_Permitted_Title"), JOptionPane.ERROR_MESSAGE);
349 // Nothing else we can do.
350 }
351 catch(IOException exception) {
352 // Can't really do much about this.
353 DebugStream.printStackTrace(exception);
354 }
355 // If not cancelled
356 if(!cancel_action) {
357 // Step one is to create a dummy FileNode. Its important it has the correct structure so getPath works.
358 FileNode new_record = new FileNode(target_file);
359 SynchronizedTreeModelTools.insertNodeInto(target_model, destination_node, new_record);
360 new_node = new_record;
361
362 // create undo job
363 if(job.undoable) {
364 job.undoable = false;
365 }
366 new_record = null;
367 }
368 }
369 // Else
370 else if(source_file.isDirectory()) {
371 // create new record
372 FileNode directory_record = new FileNode(target_file);
373 ///ystem.err.println("Directory record = " + directory_record + " (" + target_file.getAbsolutePath() + ")");
374 SynchronizedTreeModelTools.insertNodeInto(target_model, destination_node, directory_record);
375 // Why is this not happening eh?
376 directory_record.setParent(destination_node);
377 if(!target_file.exists()) {
378 // make the directory
379 target_file.mkdirs();
380 new_node = directory_record;
381 // create undo job
382 if(job.undoable) {
383 job.undoable = false;
384 }
385 }
386 // Else inform the users that a directory already exists and files will be copied into it
387 //else {
388 // JOptionPane.showMessageDialog(null, Dictionary.get("FileActions.Directory_Exists", target_file.toString()), Dictionary.get("General.Warning"), JOptionPane.WARNING_MESSAGE);
389 //}
390 // Queue non-filtered child files for copying. If this directory already existed, the child records will have to generate the undo jobs, as we don't want to entirely delete this directory if it already existed.
391 FileNode child_record = null;
392 // In order to have a sane copy proceedure (rather than always copying last file first as it used to) we always add the child node at the position the parent was removed from. Consider the file job 'a' at the end of the queue which generates three new jobs 'b', 'c' and 'd'. The resulting flow should look like this.
393 // -- Starting queue ...[a]
394 // remove(position) = 'a' ...
395 // add(position, 'b') ...[b]
396 // add(position, 'c') ...[c][b]
397 // add(position, 'd') ...[d][c][b]
398 // Next loop
399 // remove(position) = 'b' ...[d][c]
400 for(int i = 0; i < origin_node.getChildCount(); i++) {
401 child_record = (FileNode) origin_node.getChildAt(i);
402 addJob(job.ID(), job.source, child_record, job.target, directory_record, job.type, job.undo, false, false, position);
403 }
404 child_record = null;
405 directory_record = null;
406 }
407 // The file wasn't found!
408 else {
409 cancel_action = true;
410 // Show warning.
411 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Found_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Found_Title"), JOptionPane.ERROR_MESSAGE);
412 // Force refresh of source folder.
413 source_model.refresh(new TreePath(((FileNode)origin_node.getParent()).getPath()));
414 }
415
416 // We can't have been cancelled, and we must have created a new FileNode during the above phase, before we can handle metadata.
417 if (!cancel_action && new_node != null) {
418 /* Time to handle any existing metadata. */
419 // If the directory came from inside our collection...
420 if (job.source.toString().equals("Collection")) {
421 // System.err.println("Move within collection...");
422
423 // Get the non-folder level metadata assigned to the origin node...
424 ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToFile(source_file);
425 // ...and remove it from the original node and assign it to the new folder
426 for (int i = 0; i < assigned_metadata.size(); i++) {
427 MetadataValue metadata_value = (MetadataValue) assigned_metadata.get(i);
428 MetadataXMLFileManager.removeMetadata(origin_node, metadata_value);
429 MetadataXMLFileManager.addMetadata(new_node, metadata_value);
430 }
431 }
432 // If it came from the workspace search for metadata assigned to the file
433 else if (job.source.toString().equals("Workspace")) {
434 ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToExternalFile(origin_node.getFile());
435 for (int i = 0; i < assigned_metadata.size(); i++) {
436 MetadataValue metadata_value = (MetadataValue) assigned_metadata.get(i);
437 MetadataXMLFileManager.addMetadata(new_node, metadata_value);
438 }
439 }
440 }
441 new_node = null;
442 }
443 }
444 }
445 // If we haven't been cancelled, and we've been asked to delete a directory/file, or perhaps as part of a move, we delete the file. This involves removing any existing metadata and then copying the file to the recycled bin (for a delete only), then deleting the file. When deleting a directory record from the tree (or from the filesystem for that matter) we must ensure that all of the descendant records have already been removed. If we fail to do this the delete will fail, or you will be bombarded with hundreds of 'Parent node of null not allowed' error messages. Also be aware that if the user has cancelled just this action, because of say a name clash, then we shouldn't do any deleting of any sort dammit.
446 if(!cancel_action && ready && (job.type == FileJob.DELETE || job.type == FileJob.MOVE)) {
447 // Update the progress bar for this job
448 if (source_file.isFile()) {
449 progress.addValue(source_file.length());
450 }
451
452 // If the source is a file or an empty directory (but not the root node of a tree)
453 File[] child_list = source_file.listFiles();
454 if (source_file.isFile() || (child_list != null && child_list.length == 0 && origin_node.getParent() != null)) {
455 // Update status area
456 String args[] = new String[1];
457 args[0] = Utility.formatPath("FileActions.Deleting", source_file.getAbsolutePath(), file_status.getSize().width);
458 file_status.setText(Dictionary.get("FileActions.Deleting", args));
459
460 // If it is a metadata.xml file, we must unload it
461 if (source_file.getName().equals(Utility.METADATA_XML)) {
462 MetadataXMLFileManager.unloadMetadataXMLFile(source_file);
463 }
464
465 // Remove the metadata assigned directly to the file
466 ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToFile(origin_node.getFile());
467 for (int i = 0; i < assigned_metadata.size(); i++) {
468 MetadataValue metadata_value = (MetadataValue) assigned_metadata.get(i);
469 MetadataXMLFileManager.removeMetadata(origin_node, metadata_value);
470 }
471
472 // Remove from model
473 FileNode parent_record = (FileNode) origin_node.getParent();
474 if (parent_record != null) {
475 SynchronizedTreeModelTools.removeNodeFromParent(source_model, origin_node);
476 }
477
478 // Delete the source file
479 Utility.delete(source_file);
480 }
481 // Else the source is a directory and it has children remaining
482 else if(child_list != null && child_list.length > 0) {
483 // Don't worry about all this for true file move actions.
484 if(job.type == FileJob.DELETE) {
485 // queue all of its children, (both filtered and non-filtered), but for deleting only. Don't queue jobs for a current move event, as they would be queued as part of copying. I have no idea way, per sec, however the children within the origin node are always invalid during deletion (there are several copies of some nodes?!?). I'll check that each child is only added once.
486 origin_node.refresh();
487 for(int i = 0; i < origin_node.size(); i++) {
488 FileNode child_record = (FileNode) origin_node.get(i);
489 ///atherer.println("Queuing: " + child_record);
490 addJob(job.ID(), job.source, child_record, job.target, destination_node, FileJob.DELETE, job.undo, false, false, position);
491 }
492 }
493 // Requeue a delete job -after- the children have been dealt with. Remember I've reversed the direction of the queue so sooner is later. Te-he. Also have to remember that we have have followed this path to get here for a move job: Copy Directory -> Queue Child Files -> Delete Directory (must occur after child files) -> Queue Directory.
494 // One special case. Do not requeue root nodes. Don't requeue jobs marked as done.
495 if(origin_node.getParent() != null && !job.done) {
496 ///atherer.println("Requeuing: " + origin_node.getFile().getAbsolutePath());
497 job.type = FileJob.DELETE; // You only requeue jobs that are deletes, as directories must be inspected before children, but deleted after.
498 addJob(job, position);
499 }
500 else {
501 DebugStream.println("I've already done this job twice. I refuse to requeue it again!!");
502 }
503 }
504 }
505 job = null;
506 source_file = null;
507 target_file = null;
508 origin_node = null;
509
510 // We only break out of the while loop if we are out of files or the action was cancelled
511 if (cancel_action) {
512 // Empty queue
513 clearJobs();
514 cancel_action = false;
515 }
516 }
517 else {
518 // Disable stop button
519 if (stop_button != null) {
520 stop_button.setEnabled(false);
521 }
522 synchronized(this) {
523 // Force both workspace and collection trees to refresh
524 if (Gatherer.g_man != null) {
525 Gatherer.g_man.refreshWorkspaceTree(DragTree.COLLECTION_CONTENTS_CHANGED);
526 Gatherer.g_man.refreshCollectionTree(DragTree.COLLECTION_CONTENTS_CHANGED);
527 }
528
529 // Reset status area
530 file_status.setText(Dictionary.get("FileActions.No_Activity"));
531 progress.reset();
532 progress.setString(Dictionary.get("FileActions.No_Activity"));
533 yes_to_all = false;
534 wait();
535 }
536 }
537 }
538 catch (Exception error) {
539 DebugStream.printStackTrace(error);
540 }
541 }
542 }
543
544
545 /** Register the button that will be responsible for stopping executing file actions.
546 * @param stop_button a JButton
547 */
548 public void registerStopButton(JButton stop_button) {
549 this.stop_button = stop_button;
550 }
551
552
553 synchronized private void clearJobs() {
554 queue.clear();
555 }
556
557 /** Copy the contents from the source directory to the destination
558 * directory.
559 * @param source The source directory
560 * @param destination The destination directory
561 * @param progress A progress bar to monitor copying progress
562 * @see org.greenstone.gatherer.Gatherer
563 */
564 public void copyDirectoryContents(File source, File destination, LongProgressBar progress)
565 throws FileAlreadyExistsException, FileNotFoundException, InsufficientSpaceException, IOException, ReadNotPermittedException, UnknownFileErrorException, WriteNotPermittedException
566 {
567 if (!source.isDirectory()) return;
568 // check that dest dirs exist
569 destination.mkdirs();
570
571 File [] src_files = source.listFiles();
572 if (src_files.length == 0) return; // nothing to copy
573 for (int i=0; i<src_files.length; i++) {
574 File f = src_files[i];
575 String f_name = f.getName();
576 File new_file = new File(destination, f_name);
577 if (f.isDirectory()) {
578 copyDirectoryContents(f, new_file, progress);
579 } else if (f.isFile()) {
580 copyFile(f, new_file, progress);
581 }
582 }
583
584 }
585
586 /** Copy a file from the source location to the destination location.
587 * @param source The source File.
588 * @param destination The destination File.
589 * @see org.greenstone.gatherer.Gatherer
590 */
591 public void copyFile(File source, File destination, LongProgressBar progress)
592 throws FileAlreadyExistsException, FileNotFoundException, InsufficientSpaceException, IOException, ReadNotPermittedException, UnknownFileErrorException, WriteNotPermittedException {
593 if(source.isDirectory()) {
594 destination.mkdirs();
595 }
596 else {
597 // Check if the origin file exists.
598 if (!source.exists()) {
599 DebugStream.println("Couldn't find the source file.");
600 throw(new FileNotFoundException());
601 }
602
603 // Make sure the destination file does not exist.
604 if (destination.exists()) {
605 throw(new FileAlreadyExistsException());
606 }
607
608 // Open an input stream to the source file
609 FileInputStream f_in = null;
610 try {
611 f_in = new FileInputStream(source);
612 }
613 catch (FileNotFoundException exception) {
614 // A FileNotFoundException translates into a ReadNotPermittedException in this case
615 throw new ReadNotPermittedException(exception.toString());
616 }
617
618 // Create an necessary directories for the target file
619 File dirs = destination.getParentFile();
620 dirs.mkdirs();
621
622 // Open an output stream to the target file
623 FileOutputStream f_out = null;
624 try {
625 f_out = new FileOutputStream(destination);
626 }
627 catch (FileNotFoundException exception) {
628 // A FileNotFoundException translates into a WriteNotPermittedException in this case
629 throw new WriteNotPermittedException(exception.toString());
630 }
631
632 // Copy the file
633 byte data[] = new byte[Utility.BUFFER_SIZE];
634 int data_size = 0;
635 while((data_size = f_in.read(data, 0, Utility.BUFFER_SIZE)) != -1 && !cancel_action) {
636 long destination_size = destination.length();
637 try {
638 f_out.write(data, 0, data_size);
639 }
640 // If an IO exception occurs, we can do some maths to determine if the number of bytes written to the file was less than expected. If so we assume a InsufficientSpace exception. If not we just throw the exception again.
641 catch (IOException io_exception) {
642 if(destination_size + (long) data_size > destination.length()) {
643 // Determine the difference (which I guess is in bytes).
644 long difference = (destination_size + (long) data_size) - destination.length();
645 // Transform that into a human readable string.
646 String message = Utility.formatFileLength(difference);
647 throw(new InsufficientSpaceException(message));
648 }
649 else {
650 throw(io_exception);
651 }
652 }
653 if(progress != null) {
654 progress.addValue(data_size);
655 }
656 }
657 // Flush and close the streams to ensure all bytes are written.
658 f_in.close();
659 f_out.close();
660 // We have now, in theory, produced an exact copy of the source file. Check this by comparing sizes.
661 if(!destination.exists() || (!cancel_action && source.length() != destination.length())) {
662 throw(new UnknownFileErrorException());
663 }
664 // If we were cancelled, ensure that none of the destination file exists.
665 if(cancel_action) {
666 destination.delete();
667 }
668 }
669 }
670
671
672 private FileJob removeJob(int position) {
673 FileJob job = null;
674 if(queue.size() > 0) {
675 job = (FileJob) queue.remove(position);
676 }
677 return job;
678 }
679}
Note: See TracBrowser for help on using the repository browser.