Changeset 11085
- Timestamp:
- 2006-01-23T15:57:36+13:00 (18 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/gli/src/org/greenstone/gatherer/file/FileQueue.java
r11080 r11085 84 84 * @param job A previously created FileJob. 85 85 */ 86 synchronized private void addJob(FileJob job, int position) {87 job.done = true; // Ensure that the requeued job is marked as done.88 queue.add(position, job);89 notify();90 }86 // synchronized private void addJob(FileJob job, int position) { 87 // job.done = true; // Ensure that the requeued job is marked as done. 88 // queue.add(position, job); 89 // notify(); 90 // } 91 91 92 92 /** Add a new job to the queue, specifiying as many arguments as is necessary to complete this type of job (ie delete needs no target information). … … 107 107 108 108 synchronized private void addJob(long id, DragComponent source, FileNode child, DragComponent target, FileNode parent, byte type, boolean folder_level, int position) { 109 FileJob job = new FileJob(id, source, child, target, parent, type);110 job.folder_level = folder_level;111 112 if(position != -1 && position <= queue.size() + 1) {113 queue.add(position, job);114 }115 else {109 FileJob job = new FileJob(id, source, child, target, parent, type); 110 job.folder_level = folder_level; 111 DebugStream.println("Adding job: " + job); 112 if(position != -1 && position <= queue.size() + 1) { 113 queue.add(position, job); 114 } 115 else { 116 116 queue.add(job); 117 }118 notify();117 } 118 notify(); 119 119 } 120 120 … … 162 162 163 163 164 private int countFolderDepth(File file)165 {166 int depth = 0;167 while (file != null) {168 depth++;169 file = file.getParentFile();170 }171 return depth;172 }164 // private int countFolderDepth(File file) 165 // { 166 // int depth = 0; 167 // while (file != null) { 168 // depth++; 169 // file = file.getParentFile(); 170 // } 171 // return depth; 172 // } 173 173 174 174 … … 202 202 public GProgressBar getProgressBar() { 203 203 return progress; 204 } 205 206 207 synchronized private void addFileJob(long id, DragComponent source, FileNode child, DragComponent target, FileNode parent, byte type) 208 { 209 queue.add(new FileJob(id, source, child, target, parent, type)); 210 notify(); 211 } 212 213 214 private void doEmptyDirectoryDelete(FileJob file_job) 215 { 216 FileNode source_node = file_job.getOrigin(); 217 File source_directory = source_node.getFile(); 218 219 // If the directory isn't empty then this will fail 220 if (source_directory.delete() == false) { 221 // The source directory couldn't be deleted, so give the user the option of continuing or cancelling 222 if (showErrorDialog(Dictionary.get("FileActions.Could_Not_Delete", source_directory.getAbsolutePath())) == JOptionPane.CANCEL_OPTION) { 223 clearJobs(); // Aborting action 224 } 225 return; 226 } 227 228 // Remove the node from the model 229 SynchronizedTreeModelTools.removeNodeFromParent(file_job.source.getTreeModel(), source_node); 230 } 231 232 233 private void doDirectoryDelete(FileJob file_job) 234 { 235 FileNode source_node = file_job.getOrigin(); 236 File source_directory = source_node.getFile(); 237 238 // The last thing we will do is delete this directory (which should be empty by then) 239 addFileJob(file_job.ID(), file_job.source, source_node, null, null, FileJob.DELETE_EMPTY_DIRECTORY); 240 241 // Add a new Delete job for each child of this directory (except metadata.xml files) 242 source_node.refresh(); 243 for (int i = 0; i < source_node.size(); i++) { 244 FileNode child_file_node = (FileNode) source_node.getChildAtUnfiltered(i); 245 if (!child_file_node.getFile().getName().equals(StaticStrings.METADATA_XML)) { 246 addFileJob(file_job.ID(), file_job.source, child_file_node, null, null, FileJob.DELETE); 247 } 248 } 249 250 // Treat metadata.xml files specially: delete them first 251 for (int i = 0; i < source_node.size(); i++) { 252 FileNode child_file_node = (FileNode) source_node.getChildAtUnfiltered(i); 253 if (child_file_node.getFile().getName().equals(StaticStrings.METADATA_XML)) { 254 addFileJob(file_job.ID(), file_job.source, child_file_node, null, null, FileJob.DELETE); 255 break; 256 } 257 } 258 } 259 260 261 private void doDirectoryCopy(FileJob file_job) 262 { 263 FileNode source_node = file_job.getOrigin(); 264 FileNode target_node = file_job.getDestination(); 265 266 File source_directory = source_node.getFile(); 267 File target_directory = new File(target_node.getFile(), source_directory.getName()); 268 269 // The target directory shouldn't already exist 270 if (target_directory.exists()) { 271 if (showErrorDialog(Dictionary.get("FileActions.Folder_Already_Exists", target_directory.getAbsolutePath())) == JOptionPane.CANCEL_OPTION) { 272 clearJobs(); // Aborting action 273 } 274 return; 275 } 276 target_directory.mkdirs(); 277 278 // Create a node for the new directory in the collection tree 279 FileSystemModel target_model = (FileSystemModel) file_job.target.getTreeModel(); 280 CollectionTreeNode new_target_node = new CollectionTreeNode(target_directory); 281 SynchronizedTreeModelTools.insertNodeInto(target_model, target_node, new_target_node); 282 new_target_node.setParent(target_node); 283 284 // Copy the non-folder level metadata assigned to the original directory to the new directory 285 ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToExternalFile(source_directory); 286 MetadataXMLFileManager.addMetadata((CollectionTreeNode) new_target_node, assigned_metadata); 287 288 // Add a new Copy job for each child of this directory (except metadata.xml files) 289 source_node.refresh(); 290 for (int i = 0; i < source_node.size(); i++) { 291 FileNode child_file_node = (FileNode) source_node.getChildAtUnfiltered(i); 292 if (!child_file_node.getFile().getName().equals(StaticStrings.METADATA_XML)) { 293 addFileJob(file_job.ID(), file_job.source, child_file_node, file_job.target, new_target_node, FileJob.COPY); 294 } 295 } 296 } 297 298 299 private void doDirectoryMove(FileJob file_job) 300 { 301 FileNode source_node = file_job.getOrigin(); 302 FileNode target_node = file_job.getDestination(); 303 304 File source_directory = source_node.getFile(); 305 File target_directory = new File(target_node.getFile(), source_directory.getName()); 306 307 // Check the target directory isn't the source directory 308 if (target_directory.equals(source_directory)) { 309 DebugStream.println("Target directory is the source directory!"); 310 return; 311 } 312 313 // The target directory shouldn't already exist 314 if (target_directory.exists()) { 315 if (showErrorDialog(Dictionary.get("FileActions.Folder_Already_Exists", target_directory.getAbsolutePath())) == JOptionPane.CANCEL_OPTION) { 316 clearJobs(); // Aborting action 317 } 318 return; 319 } 320 target_directory.mkdirs(); 321 322 // Create a node for the new directory in the collection tree 323 FileSystemModel target_model = (FileSystemModel) file_job.target.getTreeModel(); 324 CollectionTreeNode new_target_node = new CollectionTreeNode(target_directory); 325 SynchronizedTreeModelTools.insertNodeInto(target_model, target_node, new_target_node); 326 new_target_node.setParent(target_node); 327 328 // Move the folder level metadata assigned to the original directory to the new directory 329 ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToFile(source_directory); 330 MetadataXMLFileManager.removeMetadata((CollectionTreeNode) source_node, assigned_metadata); 331 MetadataXMLFileManager.addMetadata((CollectionTreeNode) new_target_node, assigned_metadata); 332 333 // The last thing we will do is delete this directory 334 addFileJob(file_job.ID(), file_job.source, source_node, null, null, FileJob.DELETE); 335 336 // Treat metadata.xml files specially: delete them last 337 source_node.refresh(); 338 for (int i = 0; i < source_node.size(); i++) { 339 FileNode child_file_node = (FileNode) source_node.getChildAtUnfiltered(i); 340 if (child_file_node.getFile().getName().equals(StaticStrings.METADATA_XML)) { 341 addFileJob(file_job.ID(), file_job.source, child_file_node, null, null, FileJob.DELETE); 342 break; 343 } 344 } 345 346 // Add a new Move job for each child of this directory (except metadata.xml files) 347 for (int i = 0; i < source_node.size(); i++) { 348 FileNode child_file_node = (FileNode) source_node.getChildAtUnfiltered(i); 349 if (!child_file_node.getFile().getName().equals(StaticStrings.METADATA_XML)) { 350 addFileJob(file_job.ID(), file_job.source, child_file_node, file_job.target, new_target_node, FileJob.MOVE); 351 } 352 } 353 } 354 355 356 private void doFileDelete(FileJob file_job) 357 { 358 FileNode source_node = file_job.getOrigin(); 359 File source_file = source_node.getFile(); 360 361 // If we're deleting a metadata.xml file we must unload it 362 boolean metadata_xml_file = source_file.getName().equals(StaticStrings.METADATA_XML); 363 if (metadata_xml_file) { 364 MetadataXMLFileManager.unloadMetadataXMLFile(source_file); 365 } 366 // Otherwise remove any metadata assigned directly to the file 367 else { 368 ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToFile(source_file); 369 MetadataXMLFileManager.removeMetadata((CollectionTreeNode) source_node, assigned_metadata); 370 } 371 372 // Delete the source file 373 if (!Utility.delete(source_file)) { 374 // The source file couldn't be deleted, so give the user the option of continuing or cancelling 375 if (showErrorDialog(Dictionary.get("FileActions.File_Not_Deleted_Message", source_file.getAbsolutePath())) == JOptionPane.CANCEL_OPTION) { 376 clearJobs(); // Aborting action 377 } 378 return; 379 } 380 381 // Remove the node from the model 382 SynchronizedTreeModelTools.removeNodeFromParent(file_job.source.getTreeModel(), source_node); 383 } 384 385 386 private void doFileCopy(FileJob file_job) 387 { 388 FileNode source_node = file_job.getOrigin(); 389 FileNode target_node = file_job.getDestination(); 390 391 File source_file = source_node.getFile(); 392 File target_file = new File(target_node.getFile(), source_file.getName()); 393 394 // The target file shouldn't already exist 395 if (target_file.exists()) { 396 int result = showOverwriteDialog(target_file.getName()); 397 if (result == JOptionPane.NO_OPTION) { 398 // Don't overwrite 399 return; 400 } 401 if (result == JOptionPane.CANCEL_OPTION) { 402 clearJobs(); // Aborting action 403 return; 404 } 405 } 406 407 // Copy the file 408 try { 409 copyFile(source_file, target_file, true); 410 } 411 catch (FileAlreadyExistsException exception) { 412 // This should not ever happen, since we've called copyFile with overwrite set 413 DebugStream.printStackTrace(exception); 414 return; 415 } 416 catch (FileNotFoundException exception) { 417 DebugStream.printStackTrace(exception); 418 if (showErrorDialog(Dictionary.get("FileActions.File_Not_Found_Message", source_file.getAbsolutePath())) == JOptionPane.CANCEL_OPTION) { 419 clearJobs(); // Aborting action 420 } 421 // Refresh the source tree model 422 FileSystemModel source_model = file_job.source.getTreeModel(); 423 source_model.refresh(new TreePath(((FileNode) file_job.getOrigin().getParent()).getPath())); 424 return; 425 } 426 catch (InsufficientSpaceException exception) { 427 DebugStream.printStackTrace(exception); 428 if (showErrorDialog(Dictionary.get("FileActions.Insufficient_Space_Message", exception.getMessage())) == JOptionPane.CANCEL_OPTION) { 429 clearJobs(); // Aborting action 430 } 431 return; 432 } 433 catch (IOException exception) { 434 DebugStream.printStackTrace(exception); 435 if (showErrorDialog(exception.getMessage()) == JOptionPane.CANCEL_OPTION) { 436 clearJobs(); // Aborting action 437 } 438 return; 439 } 440 catch (ReadNotPermittedException exception) { 441 DebugStream.printStackTrace(exception); 442 if (showErrorDialog(Dictionary.get("FileActions.Read_Not_Permitted_Message", source_file.getAbsolutePath())) == JOptionPane.CANCEL_OPTION) { 443 clearJobs(); // Aborting action 444 } 445 return; 446 } 447 catch (UnknownFileErrorException exception) { 448 DebugStream.printStackTrace(exception); 449 if (showErrorDialog(Dictionary.get("FileActions.Unknown_File_Error_Message")) == JOptionPane.CANCEL_OPTION) { 450 clearJobs(); // Aborting action 451 } 452 return; 453 } 454 catch (WriteNotPermittedException exception) { 455 DebugStream.printStackTrace(exception); 456 if (showErrorDialog(Dictionary.get("FileActions.Write_Not_Permitted_Message", target_file.getAbsolutePath())) == JOptionPane.CANCEL_OPTION) { 457 clearJobs(); // Aborting action 458 } 459 return; 460 } 461 462 CollectionTreeNode new_target_node = new CollectionTreeNode(target_file); 463 FileSystemModel target_model = file_job.target.getTreeModel(); 464 SynchronizedTreeModelTools.insertNodeInto(target_model, target_node, new_target_node); 465 Gatherer.c_man.fireFileAddedToCollection(target_file); 466 467 // Copy the non-folder level metadata assigned to the original file to the new file 468 ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToExternalFile(source_file); 469 MetadataXMLFileManager.addMetadata((CollectionTreeNode) new_target_node, assigned_metadata); 470 } 471 472 473 private void doFileMove(FileJob file_job) 474 { 475 FileNode source_node = file_job.getOrigin(); 476 FileNode target_node = file_job.getDestination(); 477 478 File source_file = source_node.getFile(); 479 File target_file = new File(target_node.getFile(), source_file.getName()); 480 481 // Check the target file isn't the source file 482 if (target_file.equals(source_file)) { 483 DebugStream.println("Target file is the source file!"); 484 return; 485 } 486 487 // The target file shouldn't already exist 488 if (target_file.exists()) { 489 int result = showOverwriteDialog(target_file.getName()); 490 if (result == JOptionPane.NO_OPTION) { 491 // Don't overwrite 492 return; 493 } 494 if (result == JOptionPane.CANCEL_OPTION) { 495 clearJobs(); // Aborting action 496 return; 497 } 498 } 499 500 // Move the file by renaming it 501 if (!source_file.renameTo(target_file)) { 502 String args[] = { source_file.getName(), target_file.getAbsolutePath() }; 503 if (showErrorDialog(Dictionary.get("FileActions.File_Move_Error_Message", args)) == JOptionPane.CANCEL_OPTION) { 504 clearJobs(); // Aborting action 505 } 506 return; 507 } 508 509 // Remove the node from the source model and add it to the target model 510 SynchronizedTreeModelTools.removeNodeFromParent(file_job.source.getTreeModel(), source_node); 511 CollectionTreeNode new_target_node = new CollectionTreeNode(target_file); 512 FileSystemModel target_model = file_job.target.getTreeModel(); 513 SynchronizedTreeModelTools.insertNodeInto(target_model, target_node, new_target_node); 514 515 // Move the non-folder level metadata assigned to the original file to the new file 516 ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToFile(source_file); 517 MetadataXMLFileManager.removeMetadata((CollectionTreeNode) source_node, assigned_metadata); 518 MetadataXMLFileManager.addMetadata((CollectionTreeNode) new_target_node, assigned_metadata); 519 } 520 521 522 private void processFileJob(FileJob file_job) 523 { 524 DebugStream.println("Processing file job " + file_job + "..."); 525 526 // Ensure that the source file exists 527 File source_file = file_job.getOrigin().getFile(); 528 if (!source_file.exists()) { 529 // The source file doesn't exist, so give the user the option of continuing or cancelling 530 if (showErrorDialog(Dictionary.get("FileActions.File_Not_Found_Message", source_file.getAbsolutePath())) == JOptionPane.CANCEL_OPTION) { 531 clearJobs(); // Aborting action 532 } 533 // Refresh the source tree model 534 FileSystemModel source_model = file_job.source.getTreeModel(); 535 source_model.refresh(new TreePath(((FileNode) file_job.getOrigin().getParent()).getPath())); 536 return; 537 } 538 539 // Enable the "Stop" button 540 stop_button.setEnabled(true); 541 542 // Delete empty directory job 543 if (file_job.type == FileJob.DELETE_EMPTY_DIRECTORY) { 544 file_status.setText(Dictionary.get("FileActions.Deleting", formatPath("FileActions.Deleting", source_file.getAbsolutePath(), file_status.getSize().width))); 545 doEmptyDirectoryDelete(file_job); 546 } 547 548 // Delete job 549 if (file_job.type == FileJob.DELETE) { 550 file_status.setText(Dictionary.get("FileActions.Deleting", formatPath("FileActions.Deleting", source_file.getAbsolutePath(), file_status.getSize().width))); 551 if (source_file.isFile()) { 552 long source_file_size = source_file.length(); 553 doFileDelete(file_job); 554 progress.addValue(source_file_size); // Update progress bar 555 } 556 else { 557 doDirectoryDelete(file_job); 558 } 559 } 560 561 // Copy job 562 if (file_job.type == FileJob.COPY) { 563 file_status.setText(Dictionary.get("FileActions.Copying", formatPath("FileActions.Copying", source_file.getAbsolutePath(), file_status.getSize().width))); 564 if (source_file.isFile()) { 565 long source_file_size = source_file.length(); 566 doFileCopy(file_job); 567 progress.addValue(source_file_size); // Update progress bar 568 } 569 else { 570 doDirectoryCopy(file_job); 571 } 572 } 573 574 // Move job 575 if (file_job.type == FileJob.MOVE) { 576 file_status.setText(Dictionary.get("FileActions.Moving", formatPath("FileActions.Moving", source_file.getAbsolutePath(), file_status.getSize().width))); 577 if (source_file.isFile()) { 578 long source_file_size = source_file.length(); 579 doFileMove(file_job); 580 progress.addValue(source_file_size); // Update progress bar 581 } 582 else { 583 doDirectoryMove(file_job); 584 } 585 } 586 } 587 588 589 private int showErrorDialog(String error_message) 590 { 591 Object[] options = { Dictionary.get("General.OK"), Dictionary.get("General.Cancel") }; 592 int result = JOptionPane.showOptionDialog(Gatherer.g_man, error_message, Dictionary.get("General.Error"), JOptionPane.DEFAULT_OPTION, JOptionPane.ERROR_MESSAGE, null, options, options[0]); 593 if (result == 0) { 594 return JOptionPane.OK_OPTION; 595 } 596 else { 597 return JOptionPane.CANCEL_OPTION; 598 } 599 } 600 601 602 private int showOverwriteDialog(String target_file_name) 603 { 604 // Has "yes to all" been set? 605 if (yes_to_all) { 606 return JOptionPane.YES_OPTION; 607 } 608 609 Object[] options = { Dictionary.get("General.Yes"), Dictionary.get("FileActions.Yes_To_All"), Dictionary.get("General.No"), Dictionary.get("General.Cancel") }; 610 int result = JOptionPane.showOptionDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Exists", target_file_name), Dictionary.get("General.Warning"), JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, options[0]); 611 if (result == 0) { 612 return JOptionPane.YES_OPTION; 613 } 614 else if (result == 1) { 615 yes_to_all = true; 616 return JOptionPane.YES_OPTION; 617 } 618 else if (result == 2) { 619 return JOptionPane.NO_OPTION; 620 } 621 else { 622 return JOptionPane.CANCEL_OPTION; 623 } 624 } 625 626 627 public void run() 628 { 629 super.setName("FileQueue"); 630 631 while (!Gatherer.exit) { 632 // Retrieve the next job 633 int position = queue.size() - 1; 634 if (position >= 0) { 635 // We have a file job, so process it 636 processFileJob((FileJob) queue.remove(position)); 637 } 638 else { 639 // No jobs, so reset and wait until we are notified of one 640 synchronized(this) { 641 // Force both workspace and collection trees to refresh 642 if (Gatherer.g_man != null) { 643 Gatherer.g_man.refreshWorkspaceTree(DragTree.COLLECTION_CONTENTS_CHANGED); 644 // Gatherer.g_man.refreshCollectionTree(DragTree.COLLECTION_CONTENTS_CHANGED); 645 } 646 647 // Reset status area 648 file_status.setText(Dictionary.get("FileActions.No_Activity")); 649 progress.reset(); 650 progress.setString(Dictionary.get("FileActions.No_Activity")); 651 652 // Reset "yes to all" 653 yes_to_all = false; 654 655 // Wait for a new file job 656 try { 657 wait(); 658 } 659 catch (InterruptedException exception) {} 660 } 661 } 662 } 204 663 } 205 664 … … 213 672 * @see org.greenstone.gatherer.util.Utility 214 673 */ 215 public void run()216 {217 super.setName("FileQueue");218 219 while (!Gatherer.exit) {220 try {221 // Retrieve the next job222 int position = queue.size() - 1;223 FileJob job = null;224 if (position >= 0) {225 job = (FileJob) queue.remove(position);226 }227 228 if (job != null) {229 ///ystem.err.println("Found job: " + job);230 // Enabled stop button231 stop_button.setEnabled(true);232 // The user can cancel this individual action at several places, so keep track if the state is 'ready' for the next step.233 boolean ready = true;234 FileNode origin_node = job.getOrigin();235 FileNode destination_node = job.getDestination();236 FileSystemModel source_model = (FileSystemModel)job.source.getTreeModel();237 FileSystemModel target_model = (FileSystemModel)job.target.getTreeModel();238 if(destination_node == null) {239 // Retrieve the root node of the target model instead. A delete, or course, has no target file so all deleted files are added to the root of the Recycle Bin model.240 destination_node = (FileNode) target_model.getRoot();241 }242 243 // Extract common job details.244 File source_file = origin_node.getFile();245 File target_file = null;246 // Determine the target file for a copy or move.247 if (job.type == FileJob.COPY || job.type == FileJob.MOVE) {248 // use the name of the filenode instead of the name of the file - these should be the same except for the collection directories where we want the collection name to be used, not 'import' which is the underlying name249 target_file = new File(destination_node.getFile(), origin_node.toString());250 }251 // To copy a file, copy it then add any metadata found at the source. If this file was already in our collection then we must ensure the lastest version of its metadata.xml has been saved to disk. To copy a directory simply create the directory at the destination, then add all of its children files as new jobs.252 if((job.type == FileJob.COPY || job.type == FileJob.MOVE) && !job.done) {253 ///ystem.err.println("Copy/Move: " + origin_node);254 255 // The number one thing to check is whether we are in a cyclic loop. The easiest way is just to check how deep we are256 int max_folder_depth = Configuration.getInt("general.max_folder_depth", Configuration.COLLECTION_SPECIFIC);257 boolean continue_over_depth = false;258 if (countFolderDepth(source_file) > max_folder_depth) {259 Object[] options = { Dictionary.get("General.Yes"), Dictionary.get("General.No"), Dictionary.get("FileActions.Increase_Depth") };260 String args[] = { String.valueOf(max_folder_depth), source_file.getAbsolutePath() };261 int result = JOptionPane.showOptionDialog(Gatherer.g_man, Utility.formatHTMLWidth(Dictionary.get("FileActions.Possible_Cyclic_Path", args), 80), Dictionary.get("General.Warning"), JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, options[1]);262 args = null;263 options = null;264 switch(result) {265 case 0: // Yes266 continue_over_depth = true;267 break;268 case 2: // Continue and increase depth269 continue_over_depth = true;270 Configuration.setInt("general.max_folder_depth", Configuration.COLLECTION_SPECIFIC, (max_folder_depth + 1));271 break;272 }273 }274 else {275 continue_over_depth = true;276 }674 // public void run() 675 // { 676 // super.setName("FileQueue"); 677 678 // while (!Gatherer.exit) { 679 // try { 680 // // Retrieve the next job 681 // int position = queue.size() - 1; 682 // FileJob job = null; 683 // if (position >= 0) { 684 // job = (FileJob) queue.remove(position); 685 // } 686 687 // if (job != null) { 688 // ///ystem.err.println("Found job: " + job); 689 // // Enabled stop button 690 // stop_button.setEnabled(true); 691 // // The user can cancel this individual action at several places, so keep track if the state is 'ready' for the next step. 692 // boolean ready = true; 693 // FileNode origin_node = job.getOrigin(); 694 // FileNode destination_node = job.getDestination(); 695 // FileSystemModel source_model = (FileSystemModel)job.source.getTreeModel(); 696 // FileSystemModel target_model = (FileSystemModel)job.target.getTreeModel(); 697 // if(destination_node == null) { 698 // // Retrieve the root node of the target model instead. A delete, or course, has no target file so all deleted files are added to the root of the Recycle Bin model. 699 // destination_node = (FileNode) target_model.getRoot(); 700 // } 701 702 // // Extract common job details. 703 // File source_file = origin_node.getFile(); 704 // File target_file = null; 705 // // Determine the target file for a copy or move. 706 // if (job.type == FileJob.COPY || job.type == FileJob.MOVE) { 707 // // use the name of the filenode instead of the name of the file - these should be the same except for the collection directories where we want the collection name to be used, not 'import' which is the underlying name 708 // target_file = new File(destination_node.getFile(), origin_node.toString()); 709 // } 710 // // To copy a file, copy it then add any metadata found at the source. If this file was already in our collection then we must ensure the lastest version of its metadata.xml has been saved to disk. To copy a directory simply create the directory at the destination, then add all of its children files as new jobs. 711 // if((job.type == FileJob.COPY || job.type == FileJob.MOVE) && !job.done) { 712 // ///ystem.err.println("Copy/Move: " + origin_node); 713 714 // // The number one thing to check is whether we are in a cyclic loop. The easiest way is just to check how deep we are 715 // int max_folder_depth = Configuration.getInt("general.max_folder_depth", Configuration.COLLECTION_SPECIFIC); 716 // boolean continue_over_depth = false; 717 // if (countFolderDepth(source_file) > max_folder_depth) { 718 // Object[] options = { Dictionary.get("General.Yes"), Dictionary.get("General.No"), Dictionary.get("FileActions.Increase_Depth") }; 719 // String args[] = { String.valueOf(max_folder_depth), source_file.getAbsolutePath() }; 720 // int result = JOptionPane.showOptionDialog(Gatherer.g_man, Utility.formatHTMLWidth(Dictionary.get("FileActions.Possible_Cyclic_Path", args), 80), Dictionary.get("General.Warning"), JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, options[1]); 721 // args = null; 722 // options = null; 723 // switch(result) { 724 // case 0: // Yes 725 // continue_over_depth = true; 726 // break; 727 // case 2: // Continue and increase depth 728 // continue_over_depth = true; 729 // Configuration.setInt("general.max_folder_depth", Configuration.COLLECTION_SPECIFIC, (max_folder_depth + 1)); 730 // break; 731 // } 732 // } 733 // else { 734 // continue_over_depth = true; 735 // } 277 736 278 if(continue_over_depth) {279 FileNode new_node = null;280 // Check if file exists, and action as necessary. Be aware the user can choose to cancel the action all together (where upon ready becomes false).281 if(target_file.exists()) {282 // We've previously been told283 if(yes_to_all) {284 // Remove the old file and tree entry.285 target_file.delete();286 ready = true;287 }288 else {289 ///atherer.println("Opps! This filename already exists. Give the user some options.");290 Object[] options = { Dictionary.get("General.Yes"), Dictionary.get("FileActions.Yes_To_All"), Dictionary.get("General.No"), Dictionary.get("General.Cancel") };291 int result = JOptionPane.showOptionDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Exists", target_file.getName()), Dictionary.get("General.Warning"), JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, options[0]);292 switch(result) {293 case 1: // Yes To All294 yes_to_all = true;295 case 0: // Yes296 // Remove the old file and tree entry.297 if(destination_node != null) {298 TreePath destination_path = new TreePath(destination_node.getPath());299 CollectionTreeNode temp_target_node = new CollectionTreeNode(target_file); // !!! , target_model, true);300 TreePath target_path = destination_path.pathByAddingChild(temp_target_node);301 SynchronizedTreeModelTools.removeNodeFromParent(target_model, target_model.getNode(target_path));302 target_path = null;303 temp_target_node = null;304 destination_path = null;305 }306 target_file.delete();307 ready = true;308 break;309 case 3: // No To All310 cancel_action = true;311 case 2: // No312 default:313 ready = false;314 // Increment progress by size of potentially copied file315 progress.addValue(origin_node.getFile().length());316 }317 }318 }319 // We proceed with the copy/move if the ready flag is still set. If it is that means there is no longer any existing file of the same name.320 if(ready) {321 // update status area322 String args[] = new String[1];323 args[0] = "" + (queue.size() + 1) + "";324 if(job.type == FileJob.COPY) {325 args[0] = formatPath("FileActions.Copying", source_file.getAbsolutePath(), file_status.getSize().width);326 file_status.setText(Dictionary.get("FileActions.Copying", args));327 }328 else {329 args[0] = formatPath("FileActions.Moving", source_file.getAbsolutePath(), file_status.getSize().width);330 file_status.setText(Dictionary.get("FileActions.Moving", args));331 }332 args = null;737 // if(continue_over_depth) { 738 // FileNode new_node = null; 739 // // Check if file exists, and action as necessary. Be aware the user can choose to cancel the action all together (where upon ready becomes false). 740 // if(target_file.exists()) { 741 // // We've previously been told 742 // if(yes_to_all) { 743 // // Remove the old file and tree entry. 744 // target_file.delete(); 745 // ready = true; 746 // } 747 // else { 748 // ///atherer.println("Opps! This filename already exists. Give the user some options."); 749 // Object[] options = { Dictionary.get("General.Yes"), Dictionary.get("FileActions.Yes_To_All"), Dictionary.get("General.No"), Dictionary.get("General.Cancel") }; 750 // int result = JOptionPane.showOptionDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Exists", target_file.getName()), Dictionary.get("General.Warning"), JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, options[0]); 751 // switch(result) { 752 // case 1: // Yes To All 753 // yes_to_all = true; 754 // case 0: // Yes 755 // // Remove the old file and tree entry. 756 // if(destination_node != null) { 757 // TreePath destination_path = new TreePath(destination_node.getPath()); 758 // CollectionTreeNode temp_target_node = new CollectionTreeNode(target_file); // !!! , target_model, true); 759 // TreePath target_path = destination_path.pathByAddingChild(temp_target_node); 760 // SynchronizedTreeModelTools.removeNodeFromParent(target_model, target_model.getNode(target_path)); 761 // target_path = null; 762 // temp_target_node = null; 763 // destination_path = null; 764 // } 765 // target_file.delete(); 766 // ready = true; 767 // break; 768 // case 3: // No To All 769 // cancel_action = true; 770 // case 2: // No 771 // default: 772 // ready = false; 773 // // Increment progress by size of potentially copied file 774 // progress.addValue(origin_node.getFile().length()); 775 // } 776 // } 777 // } 778 // // We proceed with the copy/move if the ready flag is still set. If it is that means there is no longer any existing file of the same name. 779 // if(ready) { 780 // // update status area 781 // String args[] = new String[1]; 782 // args[0] = "" + (queue.size() + 1) + ""; 783 // if(job.type == FileJob.COPY) { 784 // args[0] = formatPath("FileActions.Copying", source_file.getAbsolutePath(), file_status.getSize().width); 785 // file_status.setText(Dictionary.get("FileActions.Copying", args)); 786 // } 787 // else { 788 // args[0] = formatPath("FileActions.Moving", source_file.getAbsolutePath(), file_status.getSize().width); 789 // file_status.setText(Dictionary.get("FileActions.Moving", args)); 790 // } 791 // args = null; 333 792 334 // If source is a file335 if(source_file.isFile()) {336 // copy the file. If anything goes wrong the copy file should throw the appropriate exception. No matter what exception is thrown (bar an IOException) we display some message, perhaps take some action, then cancel the remainder of the pending file jobs. No point in being told your out of hard drive space for each one of six thousand files eh?337 try {338 copyFile(source_file, target_file, false);339 progress.addValue(source_file.length());340 }341 // If we can't find the source file, then the most likely reason is that the file system has changed since the last time it was mapped. Warn the user that the requested file can't be found, then force a refresh of the source folder involved.342 catch(FileNotFoundException fnf_exception) {343 DebugStream.printStackTrace(fnf_exception);344 cancel_action = true;345 // Show warning.346 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Found_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Found_Title"), JOptionPane.ERROR_MESSAGE);347 // Force refresh of source folder.348 source_model.refresh(new TreePath(((FileNode)origin_node.getParent()).getPath()));349 }350 catch(FileAlreadyExistsException fae_exception) {351 DebugStream.printStackTrace(fae_exception);352 cancel_action = true;353 // Show warning.354 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Already_Exists_Message", target_file.getName()), Dictionary.get("FileActions.File_Already_Exists_Title"), JOptionPane.ERROR_MESSAGE);355 // Nothing else can be done by the Gatherer.356 }357 catch(InsufficientSpaceException is_exception) {358 DebugStream.printStackTrace(is_exception);359 cancel_action = true;360 // Show warning. The message body of the expection explains how much more space is required for this file copy.361 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Insufficient_Space_Message", is_exception.getMessage()), Dictionary.get("FileActions.Insufficient_Space_Title"), JOptionPane.ERROR_MESSAGE);362 // Nothing else can be done by the Gatherer. In fact if we are really out of space I'm not even sure we can quit safely.363 }364 catch (ReadNotPermittedException rnp_exception) {365 if (DebugStream.isDebuggingEnabled()) {366 DebugStream.printStackTrace(rnp_exception);367 }368 cancel_action = true;369 // Show warning370 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Read_Not_Permitted_Message", source_file.getAbsolutePath()), Dictionary.get("FileActions.Write_Not_Permitted_Title"), JOptionPane.ERROR_MESSAGE);371 // Nothing else we can do.372 }373 catch(UnknownFileErrorException ufe_exception) {374 DebugStream.printStackTrace(ufe_exception);375 cancel_action = true;376 // Show warning377 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Unknown_File_Error_Message"), Dictionary.get("FileActions.Unknown_File_Error_Title"), JOptionPane.ERROR_MESSAGE);378 // Nothing else we can do.379 }380 catch(WriteNotPermittedException wnp_exception) {381 if (DebugStream.isDebuggingEnabled()) {382 DebugStream.printStackTrace(wnp_exception);383 }384 cancel_action = true;385 // Show warning386 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Write_Not_Permitted_Message", target_file.getAbsolutePath()), Dictionary.get("FileActions.Write_Not_Permitted_Title"), JOptionPane.ERROR_MESSAGE);387 // Nothing else we can do.388 }389 catch(IOException exception) {390 // Can't really do much about this.391 DebugStream.printStackTrace(exception);392 }393 // If not cancelled394 if (!cancel_action) {395 // Create a dummy FileNode with the correct structure (so getPath works)396 new_node = new CollectionTreeNode(target_file);397 SynchronizedTreeModelTools.insertNodeInto(target_model, destination_node, new_node);398 }399 }400 // Else401 else if(source_file.isDirectory()) {402 // create new record403 CollectionTreeNode directory_record = new CollectionTreeNode(target_file);404 SynchronizedTreeModelTools.insertNodeInto(target_model, destination_node, directory_record);405 // Why is this not happening eh?406 directory_record.setParent(destination_node);407 if(!target_file.exists()) {408 // make the directory409 target_file.mkdirs();410 new_node = directory_record;411 }412 413 // Queue non-filtered child files for copying. If this directory already existed, the child records will have to generate the undo jobs, as we don't want to entirely delete this directory if it already existed.414 FileNode child_record = null;415 // In order to have a sane copy proceedure (rather than always copying last file first as it used to) we always add the child node at the position the parent was removed from. Consider the file job 'a' at the end of the queue which generates three new jobs 'b', 'c' and 'd'. The resulting flow should look like this.416 // -- Starting queue ...[a]417 // remove(position) = 'a' ...418 // add(position, 'b') ...[b]419 // add(position, 'c') ...[c][b]420 // add(position, 'd') ...[d][c][b]421 // Next loop422 // remove(position) = 'b' ...[d][c]423 //for(int i = 0; i < origin_node.getChildCount(); i++) {424 for (int i=origin_node.getChildCount()-1; i>=0; i--) {425 child_record = (FileNode) origin_node.getChildAt(i);426 addJob(job.ID(), job.source, child_record, job.target, directory_record, job.type, false, position);427 }428 child_record = null;429 directory_record = null;430 }431 // The file wasn't found!432 else {433 cancel_action = true;434 // Show warning.435 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Found_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Found_Title"), JOptionPane.ERROR_MESSAGE);436 // Force refresh of source folder.437 source_model.refresh(new TreePath(((FileNode)origin_node.getParent()).getPath()));438 }439 440 // If we haven't been cancelled and we created a new FileNode during the above phase, now is the time to deal with metadata441 if (!cancel_action && new_node != null) {442 // If the file came from inside our collection...443 if (job.source.toString().equals("Collection")) {444 // Get the non-folder level metadata assigned to the origin node...445 ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToFile(source_file);446 // ...and remove it from the original node and assign it to the new folder447 MetadataXMLFileManager.removeMetadata((CollectionTreeNode) origin_node, assigned_metadata);448 MetadataXMLFileManager.addMetadata((CollectionTreeNode) new_node, assigned_metadata);449 }450 // If it came from the workspace search for metadata assigned to the file451 else if (job.source.toString().equals("Workspace")) {452 ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToExternalFile(origin_node.getFile());453 MetadataXMLFileManager.addMetadata((CollectionTreeNode) new_node, assigned_metadata);454 }455 456 if (job.type == FileJob.COPY && new_node.getFile().isFile()) {457 Gatherer.c_man.fireFileAddedToCollection(new_node.getFile());458 }459 }460 new_node = null;461 }462 }463 }464 // If we haven't been cancelled, and we've been asked to delete a directory/file, or perhaps as part of a move, we delete the file. This involves removing any existing metadata and then copying the file to the recycled bin (for a delete only), then deleting the file. When deleting a directory record from the tree (or from the filesystem for that matter) we must ensure that all of the descendant records have already been removed. If we fail to do this the delete will fail, or you will be bombarded with hundreds of 'Parent node of null not allowed' error messages. Also be aware that if the user has cancelled just this action, because of say a name clash, then we shouldn't do any deleting of any sort dammit.465 if(!cancel_action && ready && (job.type == FileJob.DELETE || job.type == FileJob.MOVE)) {466 // Update the progress bar for this job467 if (source_file.isFile()) {468 progress.addValue(source_file.length());469 }470 471 // If the source is a file or an empty directory (but not the root node of a tree)472 File[] child_list = source_file.listFiles();473 if (source_file.isFile() || (child_list != null && child_list.length == 0 && origin_node.getParent() != null)) {474 // Update status area475 String args[] = new String[1];476 args[0] = formatPath("FileActions.Deleting", source_file.getAbsolutePath(), file_status.getSize().width);477 file_status.setText(Dictionary.get("FileActions.Deleting", args));478 479 // If it is a metadata.xml file, we must unload it480 if (source_file.getName().equals(StaticStrings.METADATA_XML)) {481 MetadataXMLFileManager.unloadMetadataXMLFile(source_file);482 }483 484 // Remove the metadata assigned directly to the file485 ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToFile(origin_node.getFile());486 MetadataXMLFileManager.removeMetadata((CollectionTreeNode) origin_node, assigned_metadata);487 488 // Remove from model489 FileNode parent_record = (FileNode) origin_node.getParent();490 if (parent_record != null) {491 SynchronizedTreeModelTools.removeNodeFromParent(source_model, origin_node);492 }493 494 // Delete the source file495 if (!Utility.delete(source_file)) {496 // Show message that we couldn't delete497 JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Deleted_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Deleted_Title"), JOptionPane.ERROR_MESSAGE);498 }499 }500 // Else the source is a directory and it has children remaining501 else if(child_list != null && child_list.length > 0) {502 // Don't worry about all this for true file move actions.503 if(job.type == FileJob.DELETE) {504 // queue all of its children, (both filtered and non-filtered), but for deleting only. Don't queue jobs for a current move event, as they would be queued as part of copying. I have no idea way, per sec, however the children within the origin node are always invalid during deletion (there are several copies of some nodes?!?). I'll check that each child is only added once.505 origin_node.refresh();506 for(int i = 0; i < origin_node.size(); i++) {507 FileNode child_record = (FileNode) origin_node.getChildAtUnfiltered(i);508 ///atherer.println("Queuing: " + child_record);509 addJob(job.ID(), job.source, child_record, job.target, destination_node, FileJob.DELETE, false, position);510 }511 }512 // Requeue a delete job -after- the children have been dealt with. Remember I've reversed the direction of the queue so sooner is later. Te-he. Also have to remember that we have have followed this path to get here for a move job: Copy Directory -> Queue Child Files -> Delete Directory (must occur after child files) -> Queue Directory.513 // One special case. Do not requeue root nodes. Don't requeue jobs marked as done.514 if(origin_node.getParent() != null && !job.done) {515 ///atherer.println("Requeuing: " + origin_node.getFile().getAbsolutePath());516 job.type = FileJob.DELETE; // You only requeue jobs that are deletes, as directories must be inspected before children, but deleted after.517 addJob(job, position);518 }519 else {520 DebugStream.println("I've already done this job twice. I refuse to requeue it again!");521 }522 }523 }524 job = null;525 source_file = null;526 target_file = null;527 origin_node = null;528 529 // We only break out of the while loop if we are out of files or the action was cancelled530 if (cancel_action) {531 // Empty queue532 clearJobs();533 cancel_action = false;534 }535 }536 else { // job == null537 // Disable stop button538 if (stop_button != null) {539 stop_button.setEnabled(false);540 }541 synchronized(this) {542 // Force both workspace and collection trees to refresh543 if (Gatherer.g_man != null) {544 Gatherer.g_man.refreshWorkspaceTree(DragTree.COLLECTION_CONTENTS_CHANGED);545 Gatherer.g_man.refreshCollectionTree(DragTree.COLLECTION_CONTENTS_CHANGED);546 }547 548 // Reset status area549 file_status.setText(Dictionary.get("FileActions.No_Activity"));550 progress.reset();551 progress.setString(Dictionary.get("FileActions.No_Activity"));552 yes_to_all = false;553 try {554 wait();555 }556 catch (InterruptedException exception) {}557 }558 }559 }560 catch (Exception error) {561 DebugStream.printStackTrace(error);562 }563 }564 }793 // // If source is a file 794 // if(source_file.isFile()) { 795 // // copy the file. If anything goes wrong the copy file should throw the appropriate exception. No matter what exception is thrown (bar an IOException) we display some message, perhaps take some action, then cancel the remainder of the pending file jobs. No point in being told your out of hard drive space for each one of six thousand files eh? 796 // try { 797 // copyFile(source_file, target_file, false); 798 // progress.addValue(source_file.length()); 799 // } 800 // // If we can't find the source file, then the most likely reason is that the file system has changed since the last time it was mapped. Warn the user that the requested file can't be found, then force a refresh of the source folder involved. 801 // catch(FileNotFoundException fnf_exception) { 802 // DebugStream.printStackTrace(fnf_exception); 803 // cancel_action = true; 804 // // Show warning. 805 // JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Found_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Found_Title"), JOptionPane.ERROR_MESSAGE); 806 // // Force refresh of source folder. 807 // source_model.refresh(new TreePath(((FileNode)origin_node.getParent()).getPath())); 808 // } 809 // catch(FileAlreadyExistsException fae_exception) { 810 // DebugStream.printStackTrace(fae_exception); 811 // cancel_action = true; 812 // // Show warning. 813 // JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Already_Exists_Message", target_file.getName()), Dictionary.get("FileActions.File_Already_Exists_Title"), JOptionPane.ERROR_MESSAGE); 814 // // Nothing else can be done by the Gatherer. 815 // } 816 // catch(InsufficientSpaceException is_exception) { 817 // DebugStream.printStackTrace(is_exception); 818 // cancel_action = true; 819 // // Show warning. The message body of the expection explains how much more space is required for this file copy. 820 // JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Insufficient_Space_Message", is_exception.getMessage()), Dictionary.get("FileActions.Insufficient_Space_Title"), JOptionPane.ERROR_MESSAGE); 821 // // Nothing else can be done by the Gatherer. In fact if we are really out of space I'm not even sure we can quit safely. 822 // } 823 // catch (ReadNotPermittedException rnp_exception) { 824 // if (DebugStream.isDebuggingEnabled()) { 825 // DebugStream.printStackTrace(rnp_exception); 826 // } 827 // cancel_action = true; 828 // // Show warning 829 // JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Read_Not_Permitted_Message", source_file.getAbsolutePath()), Dictionary.get("FileActions.Write_Not_Permitted_Title"), JOptionPane.ERROR_MESSAGE); 830 // // Nothing else we can do. 831 // } 832 // catch(UnknownFileErrorException ufe_exception) { 833 // DebugStream.printStackTrace(ufe_exception); 834 // cancel_action = true; 835 // // Show warning 836 // JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Unknown_File_Error_Message"), Dictionary.get("FileActions.Unknown_File_Error_Title"), JOptionPane.ERROR_MESSAGE); 837 // // Nothing else we can do. 838 // } 839 // catch(WriteNotPermittedException wnp_exception) { 840 // if (DebugStream.isDebuggingEnabled()) { 841 // DebugStream.printStackTrace(wnp_exception); 842 // } 843 // cancel_action = true; 844 // // Show warning 845 // JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Write_Not_Permitted_Message", target_file.getAbsolutePath()), Dictionary.get("FileActions.Write_Not_Permitted_Title"), JOptionPane.ERROR_MESSAGE); 846 // // Nothing else we can do. 847 // } 848 // catch(IOException exception) { 849 // // Can't really do much about this. 850 // DebugStream.printStackTrace(exception); 851 // } 852 // // If not cancelled 853 // if (!cancel_action) { 854 // // Create a dummy FileNode with the correct structure (so getPath works) 855 // new_node = new CollectionTreeNode(target_file); 856 // SynchronizedTreeModelTools.insertNodeInto(target_model, destination_node, new_node); 857 // } 858 // } 859 // // Else 860 // else if(source_file.isDirectory()) { 861 // // create new record 862 // CollectionTreeNode directory_record = new CollectionTreeNode(target_file); 863 // SynchronizedTreeModelTools.insertNodeInto(target_model, destination_node, directory_record); 864 // // Why is this not happening eh? 865 // directory_record.setParent(destination_node); 866 // if(!target_file.exists()) { 867 // // make the directory 868 // target_file.mkdirs(); 869 // new_node = directory_record; 870 // } 871 872 // // Queue non-filtered child files for copying. If this directory already existed, the child records will have to generate the undo jobs, as we don't want to entirely delete this directory if it already existed. 873 // FileNode child_record = null; 874 // // In order to have a sane copy proceedure (rather than always copying last file first as it used to) we always add the child node at the position the parent was removed from. Consider the file job 'a' at the end of the queue which generates three new jobs 'b', 'c' and 'd'. The resulting flow should look like this. 875 // // -- Starting queue ...[a] 876 // // remove(position) = 'a' ... 877 // // add(position, 'b') ...[b] 878 // // add(position, 'c') ...[c][b] 879 // // add(position, 'd') ...[d][c][b] 880 // // Next loop 881 // // remove(position) = 'b' ...[d][c] 882 // //for(int i = 0; i < origin_node.getChildCount(); i++) { 883 // for (int i=origin_node.getChildCount()-1; i>=0; i--) { 884 // child_record = (FileNode) origin_node.getChildAt(i); 885 // addJob(job.ID(), job.source, child_record, job.target, directory_record, job.type, false, position); 886 // } 887 // child_record = null; 888 // directory_record = null; 889 // } 890 // // The file wasn't found! 891 // else { 892 // cancel_action = true; 893 // // Show warning. 894 // JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Found_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Found_Title"), JOptionPane.ERROR_MESSAGE); 895 // // Force refresh of source folder. 896 // source_model.refresh(new TreePath(((FileNode)origin_node.getParent()).getPath())); 897 // } 898 899 // // If we haven't been cancelled and we created a new FileNode during the above phase, now is the time to deal with metadata 900 // if (!cancel_action && new_node != null) { 901 // // If the file came from inside our collection... 902 // if (job.source.toString().equals("Collection")) { 903 // // Get the non-folder level metadata assigned to the origin node... 904 // ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToFile(source_file); 905 // // ...and remove it from the original node and assign it to the new folder 906 // MetadataXMLFileManager.removeMetadata((CollectionTreeNode) origin_node, assigned_metadata); 907 // MetadataXMLFileManager.addMetadata((CollectionTreeNode) new_node, assigned_metadata); 908 // } 909 // // If it came from the workspace search for metadata assigned to the file 910 // else if (job.source.toString().equals("Workspace")) { 911 // ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToExternalFile(origin_node.getFile()); 912 // MetadataXMLFileManager.addMetadata((CollectionTreeNode) new_node, assigned_metadata); 913 // } 914 915 // if (job.type == FileJob.COPY && new_node.getFile().isFile()) { 916 // Gatherer.c_man.fireFileAddedToCollection(new_node.getFile()); 917 // } 918 // } 919 // new_node = null; 920 // } 921 // } 922 // } 923 // // If we haven't been cancelled, and we've been asked to delete a directory/file, or perhaps as part of a move, we delete the file. This involves removing any existing metadata and then copying the file to the recycled bin (for a delete only), then deleting the file. When deleting a directory record from the tree (or from the filesystem for that matter) we must ensure that all of the descendant records have already been removed. If we fail to do this the delete will fail, or you will be bombarded with hundreds of 'Parent node of null not allowed' error messages. Also be aware that if the user has cancelled just this action, because of say a name clash, then we shouldn't do any deleting of any sort dammit. 924 // if(!cancel_action && ready && (job.type == FileJob.DELETE || job.type == FileJob.MOVE)) { 925 // // Update the progress bar for this job 926 // if (source_file.isFile()) { 927 // progress.addValue(source_file.length()); 928 // } 929 930 // // If the source is a file or an empty directory (but not the root node of a tree) 931 // File[] child_list = source_file.listFiles(); 932 // if (source_file.isFile() || (child_list != null && child_list.length == 0 && origin_node.getParent() != null)) { 933 // // Update status area 934 // String args[] = new String[1]; 935 // args[0] = formatPath("FileActions.Deleting", source_file.getAbsolutePath(), file_status.getSize().width); 936 // file_status.setText(Dictionary.get("FileActions.Deleting", args)); 937 938 // // If it is a metadata.xml file, we must unload it 939 // if (source_file.getName().equals(StaticStrings.METADATA_XML)) { 940 // MetadataXMLFileManager.unloadMetadataXMLFile(source_file); 941 // } 942 943 // // Remove the metadata assigned directly to the file 944 // ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToFile(origin_node.getFile()); 945 // MetadataXMLFileManager.removeMetadata((CollectionTreeNode) origin_node, assigned_metadata); 946 947 // // Remove from model 948 // FileNode parent_record = (FileNode) origin_node.getParent(); 949 // if (parent_record != null) { 950 // SynchronizedTreeModelTools.removeNodeFromParent(source_model, origin_node); 951 // } 952 953 // // Delete the source file 954 // if (!Utility.delete(source_file)) { 955 // // Show message that we couldn't delete 956 // JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Deleted_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Deleted_Title"), JOptionPane.ERROR_MESSAGE); 957 // } 958 // } 959 // // Else the source is a directory and it has children remaining 960 // else if(child_list != null && child_list.length > 0) { 961 // // Don't worry about all this for true file move actions. 962 // if(job.type == FileJob.DELETE) { 963 // // queue all of its children, (both filtered and non-filtered), but for deleting only. Don't queue jobs for a current move event, as they would be queued as part of copying. I have no idea way, per sec, however the children within the origin node are always invalid during deletion (there are several copies of some nodes?!?). I'll check that each child is only added once. 964 // origin_node.refresh(); 965 // for(int i = 0; i < origin_node.size(); i++) { 966 // FileNode child_record = (FileNode) origin_node.getChildAtUnfiltered(i); 967 // ///atherer.println("Queuing: " + child_record); 968 // addJob(job.ID(), job.source, child_record, job.target, destination_node, FileJob.DELETE, false, position); 969 // } 970 // } 971 // // Requeue a delete job -after- the children have been dealt with. Remember I've reversed the direction of the queue so sooner is later. Te-he. Also have to remember that we have have followed this path to get here for a move job: Copy Directory -> Queue Child Files -> Delete Directory (must occur after child files) -> Queue Directory. 972 // // One special case. Do not requeue root nodes. Don't requeue jobs marked as done. 973 // if(origin_node.getParent() != null && !job.done) { 974 // ///atherer.println("Requeuing: " + origin_node.getFile().getAbsolutePath()); 975 // job.type = FileJob.DELETE; // You only requeue jobs that are deletes, as directories must be inspected before children, but deleted after. 976 // addJob(job, position); 977 // } 978 // else { 979 // DebugStream.println("I've already done this job twice. I refuse to requeue it again!"); 980 // } 981 // } 982 // } 983 // job = null; 984 // source_file = null; 985 // target_file = null; 986 // origin_node = null; 987 988 // // We only break out of the while loop if we are out of files or the action was cancelled 989 // if (cancel_action) { 990 // // Empty queue 991 // clearJobs(); 992 // cancel_action = false; 993 // } 994 // } 995 // else { // job == null 996 // // Disable stop button 997 // if (stop_button != null) { 998 // stop_button.setEnabled(false); 999 // } 1000 // synchronized(this) { 1001 // // Force both workspace and collection trees to refresh 1002 // if (Gatherer.g_man != null) { 1003 // Gatherer.g_man.refreshWorkspaceTree(DragTree.COLLECTION_CONTENTS_CHANGED); 1004 // Gatherer.g_man.refreshCollectionTree(DragTree.COLLECTION_CONTENTS_CHANGED); 1005 // } 1006 1007 // // Reset status area 1008 // file_status.setText(Dictionary.get("FileActions.No_Activity")); 1009 // progress.reset(); 1010 // progress.setString(Dictionary.get("FileActions.No_Activity")); 1011 // yes_to_all = false; 1012 // try { 1013 // wait(); 1014 // } 1015 // catch (InterruptedException exception) {} 1016 // } 1017 // } 1018 // } 1019 // catch (Exception error) { 1020 // DebugStream.printStackTrace(error); 1021 // } 1022 // } 1023 // } 565 1024 566 1025 … … 581 1040 * @param source The source directory 582 1041 * @param destination The destination directory 583 * @param progress A progress bar to monitor copying progress584 1042 * @see org.greenstone.gatherer.Gatherer 585 1043 */
Note:
See TracChangeset
for help on using the changeset viewer.