Changeset 11621 for trunk


Ignore:
Timestamp:
2006-04-07T12:54:34+12:00 (18 years ago)
Author:
mdewsnip
Message:

Removed the monstrous old run() function, and a couple of other (now unused, since my rewrite of this class) functions.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/gli/src/org/greenstone/gatherer/file/FileQueue.java

    r11598 r11621  
    8181    }
    8282
    83     /** Requeue an existing job into the queue.
    84      * @param job A previously created FileJob.
    85      */
    86 //     synchronized private void addJob(FileJob job, int position) {
    87 //  job.done = true; // Ensure that the requeued job is marked as done.
    88 //  queue.add(position, job);
    89 //  notify();
    90 //     }
    9183
    9284    /** Add a new job to the queue, specifiying as many arguments as is necessary to complete this type of job (ie delete needs no target information).
     
    162154
    163155
    164 //     private int countFolderDepth(File file)
    165 //     {
    166 //  int depth = 0;
    167 //  while (file != null) {
    168 //      depth++;
    169 //      file = file.getParentFile();
    170 //  }
    171 //  return depth;
    172 //     }
    173 
    174 
    175156    /** Format the given filename path string so that it is no longer than the given width. If it is wider replace starting directories with ...
    176157     * @param key The key <strong>String</Strong> used to retrieve a phrase from the dictionary for this item.
     
    724705    }
    725706    }
    726 
    727 
    728     /** The run method exists in every thread, and here it is used to work its way through the queue of Jobs. If no jobs are waiting and it cans, it waits until a job arrives. If a job is present then it is either COPIED or DELETED, with the records being copied or removed as necessary, and directories being recursed through. Finally the user can press cancel to cause the loop to prematurely dump the job queue then wait.
    729      * @see org.greenstone.gatherer.Gatherer
    730      * @see org.greenstone.gatherer.collection.CollectionManager
    731      * @see org.greenstone.gatherer.file.FileJob
    732      * @see org.greenstone.gatherer.file.FileNode
    733      * @see org.greenstone.gatherer.gui.GProgressBar
    734      * @see org.greenstone.gatherer.util.Utility
    735      */
    736 //     public void run()
    737 //     {
    738 //  super.setName("FileQueue");
    739 
    740 //  while (!Gatherer.exit) {
    741 //      try {
    742 //      // Retrieve the next job
    743 //      int position = queue.size() - 1;
    744 //      FileJob job = null;
    745 //      if (position >= 0) {
    746 //          job = (FileJob) queue.remove(position);
    747 //      }
    748 
    749 //      if (job != null) {
    750 //          ///ystem.err.println("Found job: " + job);
    751 //          // Enabled stop button
    752 //          stop_button.setEnabled(true);
    753 //          // The user can cancel this individual action at several places, so keep track if the state is 'ready' for the next step.
    754 //          boolean ready = true;
    755 //          FileNode origin_node = job.getOrigin();
    756 //          FileNode destination_node = job.getDestination();
    757 //          FileSystemModel source_model = (FileSystemModel)job.source.getTreeModel();
    758 //          FileSystemModel target_model = (FileSystemModel)job.target.getTreeModel();
    759 //          if(destination_node == null) {
    760 //          // Retrieve the root node of the target model instead. A delete, or course, has no target file so all deleted files are added to the root of the Recycle Bin model.
    761 //          destination_node = (FileNode) target_model.getRoot();
    762 //          }
    763 
    764 //          // Extract common job details.
    765 //          File source_file = origin_node.getFile();
    766 //          File target_file = null;
    767 //          // Determine the target file for a copy or move.
    768 //          if (job.type == FileJob.COPY || job.type == FileJob.MOVE) {
    769 //          // use the name of the filenode instead of the name of the file - these should be the same except for the collection directories where we want the collection name to be used, not 'import' which is the underlying name
    770 //          target_file = new File(destination_node.getFile(), origin_node.toString());
    771 //          }
    772 //          // To copy a file, copy it then add any metadata found at the source. If this file was already in our collection then we must ensure the lastest version of its metadata.xml has been saved to disk. To copy a directory simply create the directory at the destination, then add all of its children files as new jobs.
    773 //          if((job.type == FileJob.COPY || job.type == FileJob.MOVE) && !job.done) {
    774 //          ///ystem.err.println("Copy/Move: " + origin_node);
    775 
    776 //          // The number one thing to check is whether we are in a cyclic loop. The easiest way is just to check how deep we are
    777 //          int max_folder_depth = Configuration.getInt("general.max_folder_depth", Configuration.COLLECTION_SPECIFIC);
    778 //          boolean continue_over_depth = false;
    779 //          if (countFolderDepth(source_file) > max_folder_depth) {
    780 //              Object[] options = { Dictionary.get("General.Yes"), Dictionary.get("General.No"), Dictionary.get("FileActions.Increase_Depth") };
    781 //              String args[] = { String.valueOf(max_folder_depth), source_file.getAbsolutePath() };
    782 //              int result = JOptionPane.showOptionDialog(Gatherer.g_man, Utility.formatHTMLWidth(Dictionary.get("FileActions.Possible_Cyclic_Path", args), 80), Dictionary.get("General.Warning"), JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, options[1]);
    783 //              args = null;
    784 //              options = null;
    785 //              switch(result) {
    786 //              case 0: // Yes
    787 //              continue_over_depth = true;
    788 //              break;
    789 //              case 2: // Continue and increase depth
    790 //              continue_over_depth = true;
    791 //              Configuration.setInt("general.max_folder_depth", Configuration.COLLECTION_SPECIFIC, (max_folder_depth + 1));
    792 //              break;
    793 //              }
    794 //          }
    795 //          else {
    796 //              continue_over_depth = true;
    797 //          }
    798                
    799 //          if(continue_over_depth) {
    800 //              FileNode new_node = null;
    801 //              // Check if file exists, and action as necessary. Be aware the user can choose to cancel the action all together (where upon ready becomes false).
    802 //              if(target_file.exists()) {
    803 //              // We've previously been told
    804 //              if(yes_to_all) {
    805 //                  // Remove the old file and tree entry.
    806 //                  target_file.delete();
    807 //                  ready = true;
    808 //              }
    809 //              else {
    810 //                  ///atherer.println("Opps! This filename already exists. Give the user some options.");
    811 //                  Object[] options = { Dictionary.get("General.Yes"), Dictionary.get("FileActions.Yes_To_All"), Dictionary.get("General.No"), Dictionary.get("General.Cancel") };
    812 //                  int result = JOptionPane.showOptionDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Exists", target_file.getName()), Dictionary.get("General.Warning"), JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, options[0]);
    813 //                  switch(result) {
    814 //                  case 1: // Yes To All
    815 //                  yes_to_all = true;
    816 //                  case 0: // Yes
    817 //                  // Remove the old file and tree entry.
    818 //                  if(destination_node != null) {
    819 //                      TreePath destination_path = new TreePath(destination_node.getPath());
    820 //                      CollectionTreeNode temp_target_node = new CollectionTreeNode(target_file);  // !!! , target_model, true);
    821 //                      TreePath target_path = destination_path.pathByAddingChild(temp_target_node);
    822 //                      SynchronizedTreeModelTools.removeNodeFromParent(target_model, target_model.getNode(target_path));
    823 //                      target_path = null;
    824 //                      temp_target_node = null;
    825 //                      destination_path = null;
    826 //                  }
    827 //                  target_file.delete();
    828 //                  ready = true;
    829 //                  break;
    830 //                  case 3: // No To All
    831 //                  cancel_action = true;
    832 //                  case 2: // No
    833 //                  default:
    834 //                  ready = false;
    835 //                  // Increment progress by size of potentially copied file
    836 //                  progress.addValue(origin_node.getFile().length());
    837 //                  }
    838 //              }
    839 //              }
    840 //              // We proceed with the copy/move if the ready flag is still set. If it is that means there is no longer any existing file of the same name.
    841 //              if(ready) {
    842 //              // update status area
    843 //              String args[] = new String[1];
    844 //              args[0] = "" + (queue.size() + 1) + "";
    845 //              if(job.type == FileJob.COPY) {
    846 //                  args[0] = formatPath("FileActions.Copying", source_file.getAbsolutePath(), file_status.getSize().width);
    847 //                  file_status.setText(Dictionary.get("FileActions.Copying", args));
    848 //              }
    849 //              else {
    850 //                  args[0] = formatPath("FileActions.Moving", source_file.getAbsolutePath(), file_status.getSize().width);
    851 //                  file_status.setText(Dictionary.get("FileActions.Moving", args));
    852 //              }
    853 //              args = null;
    854                
    855 //              // If source is a file
    856 //              if(source_file.isFile()) {
    857 //                  // copy the file. If anything goes wrong the copy file should throw the appropriate exception. No matter what exception is thrown (bar an IOException) we display some message, perhaps take some action, then cancel the remainder of the pending file jobs. No point in being told your out of hard drive space for each one of six thousand files eh?
    858 //                  try {
    859 //                  copyFile(source_file, target_file, false);
    860 //                  progress.addValue(source_file.length());
    861 //                  }
    862 //                  // If we can't find the source file, then the most likely reason is that the file system has changed since the last time it was mapped. Warn the user that the requested file can't be found, then force a refresh of the source folder involved.
    863 //                  catch(FileNotFoundException fnf_exception) {
    864 //                  DebugStream.printStackTrace(fnf_exception);
    865 //                  cancel_action = true;
    866 //                  // Show warning.
    867 //                  JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Found_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Found_Title"), JOptionPane.ERROR_MESSAGE);
    868 //                  // Force refresh of source folder.
    869 //                  source_model.refresh(new TreePath(((FileNode)origin_node.getParent()).getPath()));
    870 //                  }
    871 //                  catch(FileAlreadyExistsException fae_exception) {
    872 //                  DebugStream.printStackTrace(fae_exception);
    873 //                  cancel_action = true;
    874 //                  // Show warning.
    875 //                  JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Already_Exists_Message", target_file.getName()), Dictionary.get("FileActions.File_Already_Exists_Title"), JOptionPane.ERROR_MESSAGE);
    876 //                  // Nothing else can be done by the Gatherer.
    877 //                  }
    878 //                  catch(InsufficientSpaceException is_exception) {
    879 //                  DebugStream.printStackTrace(is_exception);
    880 //                  cancel_action = true;
    881 //                  // Show warning. The message body of the expection explains how much more space is required for this file copy.
    882 //                  JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Insufficient_Space_Message", is_exception.getMessage()), Dictionary.get("FileActions.Insufficient_Space_Title"), JOptionPane.ERROR_MESSAGE);
    883 //                  // Nothing else can be done by the Gatherer. In fact if we are really out of space I'm not even sure we can quit safely.
    884 //                  }
    885 //                  catch (ReadNotPermittedException rnp_exception) {
    886 //                  if (DebugStream.isDebuggingEnabled()) {
    887 //                      DebugStream.printStackTrace(rnp_exception);
    888 //                  }
    889 //                  cancel_action = true;
    890 //                  // Show warning
    891 //                  JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Read_Not_Permitted_Message", source_file.getAbsolutePath()), Dictionary.get("FileActions.Write_Not_Permitted_Title"), JOptionPane.ERROR_MESSAGE);
    892 //                  // Nothing else we can do.
    893 //                  }
    894 //                  catch(UnknownFileErrorException ufe_exception) {
    895 //                  DebugStream.printStackTrace(ufe_exception);
    896 //                  cancel_action = true;
    897 //                  // Show warning
    898 //                  JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Unknown_File_Error_Message"), Dictionary.get("FileActions.Unknown_File_Error_Title"), JOptionPane.ERROR_MESSAGE);
    899 //                  // Nothing else we can do.
    900 //                  }
    901 //                  catch(WriteNotPermittedException wnp_exception) {
    902 //                  if (DebugStream.isDebuggingEnabled()) {
    903 //                      DebugStream.printStackTrace(wnp_exception);
    904 //                  }
    905 //                  cancel_action = true;
    906 //                  // Show warning
    907 //                  JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.Write_Not_Permitted_Message", target_file.getAbsolutePath()), Dictionary.get("FileActions.Write_Not_Permitted_Title"), JOptionPane.ERROR_MESSAGE);
    908 //                  // Nothing else we can do.
    909 //                  }
    910 //                  catch(IOException exception) {
    911 //                  // Can't really do much about this.
    912 //                  DebugStream.printStackTrace(exception);
    913 //                  }
    914 //                  // If not cancelled
    915 //                  if (!cancel_action) {
    916 //                  // Create a dummy FileNode with the correct structure (so getPath works)
    917 //                  new_node = new CollectionTreeNode(target_file);
    918 //                  SynchronizedTreeModelTools.insertNodeInto(target_model, destination_node, new_node);
    919 //                  }
    920 //              }
    921 //              // Else
    922 //              else if(source_file.isDirectory()) {
    923 //                  // create new record
    924 //                  CollectionTreeNode directory_record = new CollectionTreeNode(target_file);
    925 //                  SynchronizedTreeModelTools.insertNodeInto(target_model, destination_node, directory_record);
    926 //                  // Why is this not happening eh?
    927 //                  directory_record.setParent(destination_node);
    928 //                  if(!target_file.exists()) {
    929 //                  // make the directory
    930 //                  target_file.mkdirs();
    931 //                  new_node = directory_record;
    932 //                  }
    933 
    934 //                  // Queue non-filtered child files for copying. If this directory already existed, the child records will have to generate the undo jobs, as we don't want to entirely delete this directory if it already existed.
    935 //                  FileNode child_record = null;
    936 //                  // In order to have a sane copy proceedure (rather than always copying last file first as it used to) we always add the child node at the position the parent was removed from. Consider the file job 'a' at the end of the queue which generates three new jobs 'b', 'c' and 'd'. The resulting flow should look like this.
    937 //                  // -- Starting queue                   ...[a]
    938 //                  // remove(position) = 'a'              ...
    939 //                  // add(position, 'b')                  ...[b]
    940 //                  // add(position, 'c')                  ...[c][b]
    941 //                  // add(position, 'd')                  ...[d][c][b]
    942 //                  // Next loop
    943 //                  // remove(position) = 'b'              ...[d][c]
    944 //                  //for(int i = 0; i < origin_node.getChildCount(); i++) {
    945 //                  for (int i=origin_node.getChildCount()-1; i>=0; i--) {
    946 //                  child_record = (FileNode) origin_node.getChildAt(i);
    947 //                  addJob(job.ID(), job.source, child_record, job.target, directory_record, job.type, false, position);
    948 //                  }
    949 //                  child_record = null;
    950 //                  directory_record = null;
    951 //              }
    952 //              // The file wasn't found!
    953 //              else {
    954 //                  cancel_action = true;
    955 //                  // Show warning.
    956 //                  JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Found_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Found_Title"), JOptionPane.ERROR_MESSAGE);
    957 //                  // Force refresh of source folder.
    958 //                  source_model.refresh(new TreePath(((FileNode)origin_node.getParent()).getPath()));
    959 //              }
    960 
    961 //              // If we haven't been cancelled and we created a new FileNode during the above phase, now is the time to deal with metadata
    962 //              if (!cancel_action && new_node != null) {
    963 //                  // If the file came from inside our collection...
    964 //                  if (job.source.toString().equals("Collection")) {
    965 //                  // Get the non-folder level metadata assigned to the origin node...
    966 //                  ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToFile(source_file);
    967 //                  // ...and remove it from the original node and assign it to the new folder
    968 //                  MetadataXMLFileManager.removeMetadata((CollectionTreeNode) origin_node, assigned_metadata);
    969 //                  MetadataXMLFileManager.addMetadata((CollectionTreeNode) new_node, assigned_metadata);
    970 //                  }
    971 //                  // If it came from the workspace search for metadata assigned to the file
    972 //                  else if (job.source.toString().equals("Workspace")) {
    973 //                  ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToExternalFile(origin_node.getFile());
    974 //                  MetadataXMLFileManager.addMetadata((CollectionTreeNode) new_node, assigned_metadata);
    975 //                  }
    976 
    977 //                  if (job.type == FileJob.COPY && new_node.getFile().isFile()) {
    978 //                  Gatherer.c_man.fireFileAddedToCollection(new_node.getFile());
    979 //                  }
    980 //              }
    981 //              new_node = null;
    982 //              }
    983 //          }
    984 //          }
    985 //          // If we haven't been cancelled, and we've been asked to delete a directory/file, or perhaps as part of a move, we delete the file. This involves removing any existing metadata and then copying the file to the recycled bin (for a delete only), then deleting the file. When deleting a directory record from the tree (or from the filesystem for that matter) we must ensure that all of the descendant records have already been removed. If we fail to do this the delete will fail, or you will be bombarded with hundreds of 'Parent node of null not allowed' error messages. Also be aware that if the user has cancelled just this action, because of say a name clash, then we shouldn't do any deleting of any sort dammit.
    986 //          if(!cancel_action && ready && (job.type == FileJob.DELETE || job.type == FileJob.MOVE)) {
    987 //          // Update the progress bar for this job
    988 //          if (source_file.isFile()) {
    989 //              progress.addValue(source_file.length());
    990 //          }
    991 
    992 //          // If the source is a file or an empty directory (but not the root node of a tree)
    993 //          File[] child_list = source_file.listFiles();
    994 //          if (source_file.isFile() || (child_list != null && child_list.length == 0 && origin_node.getParent() != null)) {
    995 //              // Update status area
    996 //              String args[] = new String[1];
    997 //              args[0] = formatPath("FileActions.Deleting", source_file.getAbsolutePath(), file_status.getSize().width);
    998 //              file_status.setText(Dictionary.get("FileActions.Deleting", args));
    999 
    1000 //              // If it is a metadata.xml file, we must unload it
    1001 //              if (source_file.getName().equals(StaticStrings.METADATA_XML)) {
    1002 //              MetadataXMLFileManager.unloadMetadataXMLFile(source_file);
    1003 //              }
    1004 
    1005 //              // Remove the metadata assigned directly to the file
    1006 //              ArrayList assigned_metadata = MetadataXMLFileManager.getMetadataAssignedDirectlyToFile(origin_node.getFile());
    1007 //              MetadataXMLFileManager.removeMetadata((CollectionTreeNode) origin_node, assigned_metadata);
    1008 
    1009 //              // Remove from model
    1010 //              FileNode parent_record = (FileNode) origin_node.getParent();
    1011 //              if (parent_record != null) {
    1012 //              SynchronizedTreeModelTools.removeNodeFromParent(source_model, origin_node);
    1013 //              }
    1014 
    1015 //              // Delete the source file
    1016 //              if (!Utility.delete(source_file)) {
    1017 //              // Show message that we couldn't delete
    1018 //              JOptionPane.showMessageDialog(Gatherer.g_man, Dictionary.get("FileActions.File_Not_Deleted_Message", source_file.getName()), Dictionary.get("FileActions.File_Not_Deleted_Title"), JOptionPane.ERROR_MESSAGE);
    1019 //              }
    1020 //          }
    1021 //          // Else the source is a directory and it has children remaining
    1022 //          else if(child_list != null && child_list.length > 0) {
    1023 //              // Don't worry about all this for true file move actions.
    1024 //              if(job.type == FileJob.DELETE) {
    1025 //              // queue all of its children, (both filtered and non-filtered), but for deleting only. Don't queue jobs for a current move event, as they would be queued as part of copying. I have no idea way, per sec, however the children within the origin node are always invalid during deletion (there are several copies of some nodes?!?). I'll check that each child is only added once.
    1026 //              origin_node.refresh();
    1027 //              for(int i = 0; i < origin_node.size(); i++) {
    1028 //                  FileNode child_record = (FileNode) origin_node.getChildAtUnfiltered(i);
    1029 //                  ///atherer.println("Queuing: " + child_record);
    1030 //                  addJob(job.ID(), job.source, child_record, job.target, destination_node, FileJob.DELETE, false, position);
    1031 //              } 
    1032 //              }
    1033 //              // Requeue a delete job -after- the children have been dealt with. Remember I've reversed the direction of the queue so sooner is later. Te-he. Also have to remember that we have have followed this path to get here for a move job: Copy Directory -> Queue Child Files -> Delete Directory (must occur after child files) -> Queue Directory.
    1034 //              // One special case. Do not requeue root nodes. Don't requeue jobs marked as done.
    1035 //              if(origin_node.getParent() != null && !job.done) {
    1036 //              ///atherer.println("Requeuing: " + origin_node.getFile().getAbsolutePath());
    1037 //              job.type = FileJob.DELETE; // You only requeue jobs that are deletes, as directories must be inspected before children, but deleted after.
    1038 //              addJob(job, position);
    1039 //              }
    1040 //              else {
    1041 //              DebugStream.println("I've already done this job twice. I refuse to requeue it again!");
    1042 //              }
    1043 //          }
    1044 //          }
    1045 //          job = null;
    1046 //          source_file = null;
    1047 //          target_file = null;
    1048 //          origin_node = null;
    1049 
    1050 //          // We only break out of the while loop if we are out of files or the action was cancelled
    1051 //          if (cancel_action) {
    1052 //          // Empty queue
    1053 //          clearJobs();
    1054 //          cancel_action = false;
    1055 //          }
    1056 //      }
    1057 //      else { // job == null
    1058 //          // Disable stop button
    1059 //          if (stop_button != null) {
    1060 //          stop_button.setEnabled(false);
    1061 //          }
    1062 //          synchronized(this) {
    1063 //          // Force both workspace and collection trees to refresh
    1064 //          if (Gatherer.g_man != null) {
    1065 //              Gatherer.g_man.refreshWorkspaceTree(DragTree.COLLECTION_CONTENTS_CHANGED);
    1066 //              Gatherer.g_man.refreshCollectionTree(DragTree.COLLECTION_CONTENTS_CHANGED);
    1067 //          }
    1068 
    1069 //          // Reset status area
    1070 //          file_status.setText(Dictionary.get("FileActions.No_Activity"));
    1071 //          progress.reset();
    1072 //          progress.setString(Dictionary.get("FileActions.No_Activity"));
    1073 //          yes_to_all = false;
    1074 //          try {
    1075 //              wait();
    1076 //          }
    1077 //          catch (InterruptedException exception) {}
    1078 //          }
    1079 //      }
    1080 //      }
    1081 //      catch (Exception error) {
    1082 //      DebugStream.printStackTrace(error);
    1083 //      }
    1084 //  }
    1085 //     }
    1086707
    1087708
Note: See TracChangeset for help on using the changeset viewer.