[32807] | 1 | import xlrd
|
---|
| 2 | import csv
|
---|
| 3 | import os
|
---|
[33204] | 4 | import re
|
---|
[32807] | 5 |
|
---|
[33188] | 6 | import xlsutil
|
---|
| 7 |
|
---|
[33018] | 8 | letter_to_folder = {
|
---|
| 9 | 'a': "A - B",
|
---|
| 10 | 'b': "A - B",
|
---|
| 11 | 'c': "C - D",
|
---|
| 12 | 'd': "C - D",
|
---|
| 13 | 'e': "E - F",
|
---|
| 14 | 'f': "E - F",
|
---|
| 15 | 'g': "G - H",
|
---|
| 16 | 'h': "G - H",
|
---|
| 17 | 'i': "I - J",
|
---|
| 18 | 'j': "I - J",
|
---|
| 19 | 'k': "K - L",
|
---|
| 20 | 'l': "K - L",
|
---|
| 21 | 'm': "M - N",
|
---|
| 22 | 'n': "M - N",
|
---|
| 23 | 'o': "O - P",
|
---|
| 24 | 'p': "O - P",
|
---|
| 25 | 'q': "Q - R",
|
---|
| 26 | 'r': "Q - R",
|
---|
| 27 | 's': "S - T",
|
---|
| 28 | 't': "S - T",
|
---|
| 29 | 'u': "U - V",
|
---|
| 30 | 'v': "U - V",
|
---|
| 31 | 'w': "W - Z",
|
---|
| 32 | 'x': "W - Z",
|
---|
| 33 | 'y': "W - Z",
|
---|
| 34 | 'z': "W - Z"
|
---|
| 35 | }
|
---|
| 36 |
|
---|
| 37 | def id_to_relative_pdf_file(id):
|
---|
| 38 | first_char = id[0:1]
|
---|
| 39 | first_char_lower = first_char.lower()
|
---|
| 40 |
|
---|
| 41 | file_tail = letter_to_folder[first_char_lower]
|
---|
| 42 | file_root = "Reports " + file_tail
|
---|
| 43 |
|
---|
| 44 | pdf_file = os.path.join("Digital-Library-PDFs",file_root,id+".pdf")
|
---|
| 45 |
|
---|
| 46 | return pdf_file
|
---|
| 47 |
|
---|
| 48 |
|
---|
[33191] | 49 | # **** Written, but never used
|
---|
| 50 | # **** Col numbers refer to older version of spreadsheet
|
---|
| 51 | #
|
---|
| 52 | # There is now a mapping of header-name to index position which would be
|
---|
| 53 | # a better way to do thing
|
---|
| 54 |
|
---|
[33188] | 55 | def fixup_cell(col_num,cell_str):
|
---|
| 56 | # col == 1 => Author
|
---|
| 57 | # col == 2 => Title
|
---|
| 58 | # col == 4 => TLA
|
---|
| 59 | # col == 5 => AuthorityNo
|
---|
| 60 | # col == 6 => SiteNo
|
---|
| 61 |
|
---|
| 62 | if col_num == 1: #Author
|
---|
| 63 | cell_str = re.sub('\|?(et al)|(Associates)', '', cell_str)
|
---|
| 64 |
|
---|
| 65 | if col_num == 2: #Title
|
---|
| 66 | # Detect any ALL-CAPS titles, and change to title-case
|
---|
| 67 | onlyletters = re.sub('[^A-Za-z]+', '', cell_str)
|
---|
| 68 | if onlyletters.isupper:
|
---|
| 69 | cell_str = cell_str.title()
|
---|
| 70 |
|
---|
| 71 | if col_num == 4: #TLA
|
---|
| 72 | if cell_str == "All": cell_str = ""
|
---|
| 73 | if cell_str == "n/a": cell_str = ""
|
---|
| 74 | if cell_str == "various": cell_str = "" # not a meaningful TLA
|
---|
| 75 |
|
---|
| 76 |
|
---|
| 77 | return cell_str
|
---|
| 78 |
|
---|
| 79 |
|
---|
[32807] | 80 | def excel_to_bound_pdf_csv(excel_filename):
|
---|
[33188] | 81 | workbook = xlrd.open_workbook(excel_filename)
|
---|
[32813] | 82 |
|
---|
[33191] | 83 | worksheet = workbook.sheet_by_index(xlsutil.worksheet_index_pos)
|
---|
| 84 | ## worksheet = workbook.sheet_by_name(xlsutil.worksheet_name)
|
---|
| 85 |
|
---|
[32807] | 86 | excel_filename_split = os.path.split(excel_filename)
|
---|
| 87 | excel_dirname = excel_filename_split[0]
|
---|
| 88 | excel_file = excel_filename_split[1]
|
---|
| 89 | excel_file_splitext = os.path.splitext(excel_file)
|
---|
| 90 | excel_file_root = excel_file_splitext[0];
|
---|
| 91 |
|
---|
| 92 | pdfbound_filename = os.path.join(excel_dirname,"PDF-BOUND "+excel_file_root+".csv")
|
---|
| 93 | unbound_filename = os.path.join(excel_dirname,"UNBOUND "+excel_file_root+".csv")
|
---|
| 94 |
|
---|
| 95 | csv_pdfbound_ofile = open(pdfbound_filename, 'wb')
|
---|
| 96 | csv_unbound_ofile = open(unbound_filename, 'wb')
|
---|
| 97 |
|
---|
| 98 | pdfbound_wr = csv.writer(csv_pdfbound_ofile, quoting=csv.QUOTE_ALL)
|
---|
| 99 | unbound_wr = csv.writer(csv_unbound_ofile, quoting=csv.QUOTE_ALL)
|
---|
| 100 |
|
---|
[33188] | 101 | # 1. For header-line, build up hashmap of header-names to column number
|
---|
| 102 | # 2. Write out header-line as Greenstone friendly Metadata terms
|
---|
[32820] | 103 | # => remove sp spaces, change "Report Identifier" to hnz.Identifier
|
---|
[32807] | 104 | # 3. Process the rest of the file, checking if accompanying
|
---|
[33188] | 105 | # PDF file present or not and only printing out the header-names
|
---|
[33202] | 106 | # as long as it has a non-empty header-name
|
---|
| 107 | ## # specified in 'sanitized_headers'
|
---|
[32807] | 108 |
|
---|
| 109 | num_rows = worksheet.nrows
|
---|
| 110 |
|
---|
[33191] | 111 | # 1. Get header-line hashmap of header-names to column numbers
|
---|
[33188] | 112 | header_names_mapping = xlsutil.getHeaderMappingToColNum(worksheet)
|
---|
| 113 |
|
---|
| 114 | if header_names_mapping is None:
|
---|
| 115 | print "Failed to find metadata label 'Report Identifier' in column 0"
|
---|
| 116 | exit()
|
---|
| 117 | else:
|
---|
[33191] | 118 | # 2. Process Header into Greenstone friendly metadata terms
|
---|
| 119 |
|
---|
[32807] | 120 | entry_utf8_row = []
|
---|
[33202] | 121 | num_header_cols = worksheet.row_len(xlsutil.header_row_pos)
|
---|
[32813] | 122 |
|
---|
[33202] | 123 | for col_j in range(num_header_cols):
|
---|
| 124 | ## for header_name in xlsutil.sanitized_headers:
|
---|
[33024] | 125 |
|
---|
[33222] | 126 | header_name = xlsutil.cell_value_tidy_unicode(worksheet,xlsutil.header_row_pos,col_j)
|
---|
[33202] | 127 |
|
---|
[33222] | 128 | if header_name:
|
---|
[33202] | 129 |
|
---|
[33188] | 130 | header_col_j = header_names_mapping[header_name]
|
---|
[33202] | 131 | ## if header_name in header_names_mapping:
|
---|
| 132 | ## header_col_j = header_names_mapping[header_name]
|
---|
[32807] | 133 |
|
---|
[33222] | 134 | ## header_name = xslutil.cell_value_tidy_unicode(worksheet,row_i,header_col_j)
|
---|
[33188] | 135 |
|
---|
[33222] | 136 | if header_name == u"Report Identifier":
|
---|
| 137 | header_name = u"hnz.Identifier"
|
---|
[33188] | 138 |
|
---|
[33222] | 139 | if header_name == u"Title":
|
---|
| 140 | header_name = u"dc.Title"
|
---|
[33188] | 141 |
|
---|
[33222] | 142 | if header_name == u"Relevant TLA's":
|
---|
| 143 | header_name = u"TLA"
|
---|
[33188] | 144 |
|
---|
[33222] | 145 | header_name_utf8 = header_name.encode("utf-8")
|
---|
[33202] | 146 | metadata_name_utf8 = header_name_utf8.replace(" ", "")
|
---|
[33188] | 147 |
|
---|
| 148 | entry_utf8_row.append(metadata_name_utf8)
|
---|
| 149 |
|
---|
[33222] | 150 | if header_name == u"Site No":
|
---|
| 151 | entry_utf8_row.append(u"SiteNoOrdering")
|
---|
[33204] | 152 |
|
---|
[33202] | 153 | ## else:
|
---|
| 154 | ## print("Warning: Failed to column mapping in spreadsheet for header name \""+header_name+"\" => skipping")
|
---|
[33188] | 155 |
|
---|
[32807] | 156 | unbound_wr.writerow(entry_utf8_row)
|
---|
| 157 |
|
---|
[32808] | 158 | # Add in 'Filename' as first column in pdfbound CSV file
|
---|
| 159 | entry_utf8_row.insert(0, "Filename")
|
---|
| 160 | pdfbound_wr.writerow(entry_utf8_row)
|
---|
[33188] | 161 |
|
---|
[33191] | 162 | # 3. Process the rest of the file (metadata values) ...
|
---|
[32808] | 163 |
|
---|
[33202] | 164 | row_i = xlsutil.header_row_pos + 1
|
---|
[32807] | 165 | while row_i<num_rows:
|
---|
[32808] | 166 | num_cols = worksheet.row_len(row_i)
|
---|
[32820] | 167 | formatted_utf8_row = []
|
---|
[32808] | 168 |
|
---|
| 169 | found_pdf = False
|
---|
| 170 |
|
---|
[33202] | 171 | for col_j in range(num_cols):
|
---|
| 172 | ## for header_name in xlsutil.sanitized_headers:
|
---|
[33222] | 173 | header_name = xlsutil.cell_value_tidy_unicode(worksheet,xlsutil.header_row_pos,col_j)
|
---|
[32808] | 174 |
|
---|
[33222] | 175 | if header_name:
|
---|
[33188] | 176 | col_j = header_names_mapping[header_name]
|
---|
[33202] | 177 | ## if header_name in header_names_mapping:
|
---|
| 178 | ## col_j = header_names_mapping[header_name]
|
---|
[32813] | 179 |
|
---|
[33222] | 180 | formatted_cell_value = xlsutil.cell_value_tidy_unicode(worksheet,row_i,col_j)
|
---|
[33018] | 181 |
|
---|
[33222] | 182 | if header_name == u"Report Identifier":
|
---|
[33188] | 183 | # Check to see if companion PDF file present
|
---|
| 184 | # pdf_filename = os.path.join(excel_dirname,"pdfs",formatted_cell_value+".pdf")
|
---|
[33224] | 185 | id = formatted_cell_value.replace(" ","-")
|
---|
[33188] | 186 |
|
---|
| 187 | pdf_file = id_to_relative_pdf_file(id)
|
---|
| 188 | pdf_file_root, pdf_ext = os.path.splitext(pdf_file)
|
---|
| 189 | PDF_file = pdf_file_root + ".PDF"
|
---|
| 190 |
|
---|
| 191 | pdf_filename = os.path.join(excel_dirname,pdf_file)
|
---|
| 192 | PDF_filename = os.path.join(excel_dirname,PDF_file)
|
---|
[33018] | 193 |
|
---|
[33188] | 194 | if os.path.exists(pdf_filename):
|
---|
| 195 | found_pdf = True
|
---|
| 196 | formatted_utf8_row.insert(0, pdf_file)
|
---|
| 197 | elif os.path.exists(PDF_filename):
|
---|
| 198 | found_pdf = True
|
---|
| 199 | formatted_utf8_row.insert(0, PDF_file)
|
---|
| 200 | else:
|
---|
| 201 | print "Unbound id: '" + id + "'"
|
---|
[33018] | 202 |
|
---|
[33224] | 203 | # if isinstance(formatted_cell_value, basestring):
|
---|
| 204 | # # Remove any trailing whitespace.
|
---|
| 205 | # # Newline at end particular harmful for a entry in the CSV file
|
---|
| 206 | # formatted_cell_value = formatted_cell_value.rstrip();
|
---|
[33018] | 207 |
|
---|
[33222] | 208 | formatted_cell_value_utf8 = formatted_cell_value.encode("utf-8")
|
---|
[33188] | 209 |
|
---|
| 210 | ## Perform any cell transformations to make DL used spreadsheet
|
---|
| 211 | ## cleaner to build
|
---|
| 212 | # formatted_cell_value_utf8 = fixup_cell(col_j,formatted_cell_value_utf8)
|
---|
| 213 |
|
---|
| 214 | formatted_utf8_row.append(formatted_cell_value_utf8)
|
---|
[33204] | 215 |
|
---|
[33222] | 216 | if header_name == u"Site No":
|
---|
[33204] | 217 | site_no = formatted_cell_value_utf8
|
---|
| 218 | site_no_ordering = None
|
---|
| 219 |
|
---|
| 220 | if site_no and site_no.strip():
|
---|
| 221 | # Non-empty entry
|
---|
| 222 | pattern = re.compile('^([A-Z])(\d+)')
|
---|
| 223 | site_no_match = pattern.match(site_no)
|
---|
| 224 |
|
---|
| 225 | if site_no_match:
|
---|
| 226 | site_no_primary_letter = site_no_match.group(1)
|
---|
| 227 | site_no_primary_num = int(site_no_match.group(2))
|
---|
| 228 |
|
---|
| 229 | site_no_ordering = "%s%03d" % (site_no_primary_letter, site_no_primary_num)
|
---|
| 230 | else:
|
---|
| 231 | if site_no != "various":
|
---|
| 232 | print "Warning: Site No '"+site_no+"' did not form Capital Letter followed by Digits"
|
---|
| 233 | print "Leaving value unchanged for column 'Site No Ordering'"
|
---|
| 234 |
|
---|
| 235 | site_no_ordering = site_no
|
---|
| 236 | else:
|
---|
| 237 | site_no_ordering = ""
|
---|
| 238 |
|
---|
| 239 | formatted_utf8_row.append(site_no_ordering)
|
---|
| 240 |
|
---|
[33202] | 241 | ## else:
|
---|
| 242 | ## print "Warning: No column number mapping for header name \""+header_name+"\" => skipping"
|
---|
[33188] | 243 |
|
---|
[32808] | 244 | if found_pdf:
|
---|
[32820] | 245 | pdfbound_wr.writerow(formatted_utf8_row)
|
---|
[32808] | 246 | else:
|
---|
[32820] | 247 | unbound_wr.writerow(formatted_utf8_row)
|
---|
[32807] | 248 |
|
---|
| 249 | row_i = row_i + 1
|
---|
| 250 |
|
---|
| 251 |
|
---|
| 252 | csv_pdfbound_ofile.close()
|
---|
| 253 | csv_unbound_ofile.close()
|
---|
| 254 |
|
---|
| 255 | if __name__ == "__main__":
|
---|
| 256 | excel_to_bound_pdf_csv(os.sys.argv[1])
|
---|
| 257 |
|
---|