- Timestamp:
- 2003-10-29T12:15:36+13:00 (21 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/gli/src/org/greenstone/gatherer/file/FileQueue.java
r5719 r5751 552 552 completed_folder_mappings.put(origin_node, recycle_folder_record); 553 553 ///ystem.err.println("Added completed directories mapping " + origin_node); 554 // queue all of its children, (both filtered and non-filtered), but for deleting only. Don't queue jobs for a current move event, as they would be queued as part of copying. I have no idea way, per sec, however the children within the origin node are always invalid during deletion (there are several copies of some nodes?!?). I'll check that each child is only added once. 555 ///ystem.err.println("Directory has " + origin_node.getChildCount() + " children."); 554 **/ 555 // queue all of its children, (both filtered and non-filtered), but for deleting only. Don't queue jobs for a current move event, as they would be queued as part of copying. I have no idea way, per sec, however the children within the origin node are always invalid during deletion (there are several copies of some nodes?!?). I'll check that each child is only added once. 556 ///ystem.err.println("Directory has " + origin_node.getChildCount() + " children."); 556 557 ///ystem.err.println("Directory actually has " + child_list.length + " children."); 557 558 559 558 origin_node.unmap(); 559 origin_node.map(); 560 ///ystem.err.println("Directory has " + origin_node.getChildCount() + " children."); 560 561 ///ystem.err.println("Directory actually has " + child_list.length + " children."); 561 for(int i = 0; i < origin_node.getChildCount(); i++) {562 FileNode child_record = (FileNode) origin_node.getChildAt(i);563 addJob(job.ID(), job.source, child_record, job.target, destination_node, FileJob.DELETE, job.undo, false, false, position);564 if(recycle_folder_record != null) {565 recycle_folder_mappings.put(child_record, recycle_folder_record);566 }567 568 **/562 for(int i = 0; i < origin_node.getChildCount(); i++) { 563 FileNode child_record = (FileNode) origin_node.getChildAt(i); 564 addJob(job.ID(), job.source, child_record, job.target, destination_node, FileJob.DELETE, job.undo, false, false, position); 565 //if(recycle_folder_record != null) { 566 // recycle_folder_mappings.put(child_record, recycle_folder_record); 567 //} 568 } 569 569 570 } 570 571 // Requeue a delete job -after- the children have been dealt with. Remember I've reversed the direction of the queue so sooner is later. Te-he. Also have to remember that we have have followed this path to get here for a move job: Copy Directory -> Queue Child Files -> Delete Directory (must occur after child files) -> Queue Directory. 571 572 // One special case. Do not requeue root nodes. Don't requeue jobs marked as done. 572 573 if(origin_node.getParent() != null && !job.done) { 573 ///ystem.err.println("Requeue");574 System.err.println("Requeuing: " + origin_node.getFile().getAbsolutePath()); 574 575 job.type = FileJob.DELETE; // You only requeue jobs that are deletes, as directories must be inspected before children, but deleted after. 575 576 addJob(job, position); 576 577 } 577 578 else { 578 ///ystem.err.println("I've already done this job twice. I refuse to requeue it again!!!");579 System.err.println("I've already done this job twice. I refuse to requeue it again!!!"); 579 580 } 580 581 }
Note:
See TracChangeset
for help on using the changeset viewer.