phyloforfun commited on
Commit
1881d06
1 Parent(s): 3ef828a

mammal collage for Field Museum

Browse files
app.py CHANGED
@@ -468,11 +468,11 @@ def content_input_images(col_left, col_right):
468
  pass
469
 
470
  with col_left:
471
- # if st.session_state.is_hf:
472
- st.session_state['dir_uploaded_images'] = os.path.join(st.session_state.dir_home,'uploads')
473
- st.session_state['dir_uploaded_images_small'] = os.path.join(st.session_state.dir_home,'uploads_small')
474
- uploaded_files = st.file_uploader("Upload Images", type=['jpg', 'jpeg','pdf'], accept_multiple_files=True, key=st.session_state['uploader_idk'])
475
- st.button("Use Test Image",help="This will clear any uploaded images and load the 1 provided test image.",on_click=use_test_image)
476
 
477
  with col_right:
478
  if st.session_state.is_hf:
@@ -2218,11 +2218,52 @@ def content_collage_overlay():
2218
  st.markdown("Prior to transcription, use LeafMachine2 to crop all labels from input images to create label collages for each specimen image. Showing just the text labels to the OCR algorithms significantly improves performance. This runs slowly on the free Hugging Face Space, but runs quickly with a fast CPU or any GPU.")
2219
  st.markdown("Images that are mostly text (like a scanned notecard, or already cropped images) do not require LM2 collage.")
2220
 
2221
- if st.session_state.is_hf:
2222
- st.session_state.config['leafmachine']['use_RGB_label_images'] = st.checkbox(":rainbow[Use LeafMachine2 label collage for transcriptions]", st.session_state.config['leafmachine'].get('use_RGB_label_images', False), key='do make collage hf')
2223
- else:
2224
- st.session_state.config['leafmachine']['use_RGB_label_images'] = st.checkbox(":rainbow[Use LeafMachine2 label collage for transcriptions]", st.session_state.config['leafmachine'].get('use_RGB_label_images', True), key='do make collage local')
 
 
 
 
 
 
 
 
2225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2226
 
2227
  option_selected_crops = st.multiselect(label="Components to crop",
2228
  options=['ruler', 'barcode','label', 'colorcard','map','envelope','photo','attached_item','weights',
 
468
  pass
469
 
470
  with col_left:
471
+ if st.session_state.is_hf:
472
+ st.session_state['dir_uploaded_images'] = os.path.join(st.session_state.dir_home,'uploads')
473
+ st.session_state['dir_uploaded_images_small'] = os.path.join(st.session_state.dir_home,'uploads_small')
474
+ uploaded_files = st.file_uploader("Upload Images", type=['jpg', 'jpeg','pdf'], accept_multiple_files=True, key=st.session_state['uploader_idk'])
475
+ st.button("Use Test Image",help="This will clear any uploaded images and load the 1 provided test image.",on_click=use_test_image)
476
 
477
  with col_right:
478
  if st.session_state.is_hf:
 
2218
  st.markdown("Prior to transcription, use LeafMachine2 to crop all labels from input images to create label collages for each specimen image. Showing just the text labels to the OCR algorithms significantly improves performance. This runs slowly on the free Hugging Face Space, but runs quickly with a fast CPU or any GPU.")
2219
  st.markdown("Images that are mostly text (like a scanned notecard, or already cropped images) do not require LM2 collage.")
2220
 
2221
+ # if st.session_state.is_hf:
2222
+ # st.session_state.config['leafmachine']['use_RGB_label_images'] = st.checkbox(":rainbow[Use LeafMachine2 label collage for transcriptions]", st.session_state.config['leafmachine'].get('use_RGB_label_images', False), key='do make collage hf')
2223
+ # else:
2224
+ # st.session_state.config['leafmachine']['use_RGB_label_images'] = st.checkbox(":rainbow[Use LeafMachine2 label collage for transcriptions]", st.session_state.config['leafmachine'].get('use_RGB_label_images', True), key='do make collage local')
2225
+ # Set the options for the radio button
2226
+ # Set the options for the radio button with corresponding indices
2227
+ # Set the options for the transcription method radio button
2228
+ options = {
2229
+ 0: "Use LeafMachine2 label collage for transcriptions",
2230
+ 1: "Use original images for transcriptions",
2231
+ 2: "Use specimen collage for transcriptions"
2232
+ }
2233
 
2234
+ # Determine the default index based on the current configuration
2235
+ default_index = st.session_state.config['leafmachine'].get('use_RGB_label_images', 0)
2236
+
2237
+ # Create the radio button for transcription method selection
2238
+ selected_option = st.radio(
2239
+ "Select the transcription method:",
2240
+ options=list(options.values()),
2241
+ index=default_index
2242
+ )
2243
+
2244
+ # Update the session state based on the selected option
2245
+ selected_index = list(options.values()).index(selected_option)
2246
+ st.session_state.config['leafmachine']['use_RGB_label_images'] = selected_index
2247
+
2248
+ # If "Use specimen collage for transcriptions" is selected, show another radio button for rotation options
2249
+ if selected_index == 2:
2250
+ rotation_options = {
2251
+ True: "Rotate clockwise",
2252
+ False: "Rotate counterclockwise"
2253
+ }
2254
+
2255
+ # Determine the default rotation direction
2256
+ default_rotation = st.session_state.config['leafmachine']['project'].get('specimen_rotate', True)
2257
+
2258
+ # Create the radio button for rotation direction selection
2259
+ selected_rotation = st.radio(
2260
+ "Select the rotation direction:",
2261
+ options=list(rotation_options.values()),
2262
+ index=0 if default_rotation else 1
2263
+ )
2264
+
2265
+ # Update the configuration based on the selected rotation direction
2266
+ st.session_state.config['leafmachine']['project']['specimen_rotate'] = selected_rotation == "Rotate clockwise"
2267
 
2268
  option_selected_crops = st.multiselect(label="Components to crop",
2269
  options=['ruler', 'barcode','label', 'colorcard','map','envelope','photo','attached_item','weights',
vouchervision/VoucherVision_Config_Builder.py CHANGED
@@ -53,7 +53,7 @@ def build_VV_config(loaded_cfg=None):
53
 
54
  LLM_version_user = 'Gemini 1.5 Flash' # 'Azure GPT 4' #'Azure GPT 4 Turbo 1106-preview'
55
  prompt_version = 'SLTPvM_long.yaml' # from ["Version 1", "Version 1 No Domain Knowledge", "Version 2"]
56
- use_LeafMachine2_collage_images = True # Use LeafMachine2 collage images
57
  do_create_OCR_helper_image = True
58
 
59
  batch_size = 500
@@ -65,6 +65,8 @@ def build_VV_config(loaded_cfg=None):
65
  path_domain_knowledge = '' #os.path.join(dir_home,'domain_knowledge','SLTP_UM_AllAsiaMinimalInRegion.xlsx')
66
  embeddings_database_name = '' #os.path.splitext(os.path.basename(path_domain_knowledge))[0]
67
 
 
 
68
  #############################################
69
  #############################################
70
  ########## DO NOT EDIT BELOW HERE ###########
@@ -75,7 +77,7 @@ def build_VV_config(loaded_cfg=None):
75
  path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images,
76
  prompt_version, do_create_OCR_helper_image, do_use_trOCR, do_use_florence, trOCR_model_path, florence_model_path, OCR_option, OCR_option_llava,
77
  OCR_option_llava_bit, OCR_GPT_4o_mini_resolution, double_OCR, save_cropped_annotations,
78
- tool_GEO, tool_WFO, tool_wikipedia,
79
  check_for_illegal_filenames, skip_vertical, pdf_conversion_dpi, use_domain_knowledge=False)
80
  else:
81
  dir_home = os.path.dirname(os.path.dirname(__file__))
@@ -103,6 +105,8 @@ def build_VV_config(loaded_cfg=None):
103
  tool_WFO = loaded_cfg['leafmachine']['project']['tool_WFO']
104
  tool_wikipedia = loaded_cfg['leafmachine']['project']['tool_wikipedia']
105
 
 
 
106
  pdf_conversion_dpi = loaded_cfg['leafmachine']['project']['pdf_conversion_dpi']
107
 
108
  LLM_version_user = loaded_cfg['leafmachine']['LLM_version']
@@ -125,7 +129,7 @@ def build_VV_config(loaded_cfg=None):
125
  path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images,
126
  prompt_version, do_create_OCR_helper_image, do_use_trOCR, do_use_florence, trOCR_model_path, florence_model_path, OCR_option, OCR_option_llava,
127
  OCR_option_llava_bit, OCR_GPT_4o_mini_resolution, double_OCR, save_cropped_annotations,
128
- tool_GEO, tool_WFO, tool_wikipedia,
129
  check_for_illegal_filenames, skip_vertical, pdf_conversion_dpi, use_domain_knowledge=False)
130
 
131
 
@@ -134,7 +138,7 @@ def assemble_config(dir_home, run_name, dir_images_local,dir_output,
134
  path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images,
135
  prompt_version, do_create_OCR_helper_image_user, do_use_trOCR, do_use_florence, trOCR_model_path, florence_model_path, OCR_option, OCR_option_llava,
136
  OCR_option_llava_bit, OCR_GPT_4o_mini_resolution, double_OCR, save_cropped_annotations,
137
- tool_GEO, tool_WFO, tool_wikipedia,
138
  check_for_illegal_filenames, skip_vertical, pdf_conversion_dpi, use_domain_knowledge=False):
139
 
140
 
@@ -191,6 +195,7 @@ def assemble_config(dir_home, run_name, dir_images_local,dir_output,
191
  'tool_GEO': tool_GEO,
192
  'tool_WFO': tool_WFO,
193
  'tool_wikipedia': tool_wikipedia,
 
194
  }
195
 
196
  modules_section = {
 
53
 
54
  LLM_version_user = 'Gemini 1.5 Flash' # 'Azure GPT 4' #'Azure GPT 4 Turbo 1106-preview'
55
  prompt_version = 'SLTPvM_long.yaml' # from ["Version 1", "Version 1 No Domain Knowledge", "Version 2"]
56
+ use_LeafMachine2_collage_images = 0 # Use LeafMachine2 collage images [0, 1, 2]
57
  do_create_OCR_helper_image = True
58
 
59
  batch_size = 500
 
65
  path_domain_knowledge = '' #os.path.join(dir_home,'domain_knowledge','SLTP_UM_AllAsiaMinimalInRegion.xlsx')
66
  embeddings_database_name = '' #os.path.splitext(os.path.basename(path_domain_knowledge))[0]
67
 
68
+ specimen_rotate = False # False = counter clock, True = clock
69
+
70
  #############################################
71
  #############################################
72
  ########## DO NOT EDIT BELOW HERE ###########
 
77
  path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images,
78
  prompt_version, do_create_OCR_helper_image, do_use_trOCR, do_use_florence, trOCR_model_path, florence_model_path, OCR_option, OCR_option_llava,
79
  OCR_option_llava_bit, OCR_GPT_4o_mini_resolution, double_OCR, save_cropped_annotations,
80
+ tool_GEO, tool_WFO, tool_wikipedia,specimen_rotate,
81
  check_for_illegal_filenames, skip_vertical, pdf_conversion_dpi, use_domain_knowledge=False)
82
  else:
83
  dir_home = os.path.dirname(os.path.dirname(__file__))
 
105
  tool_WFO = loaded_cfg['leafmachine']['project']['tool_WFO']
106
  tool_wikipedia = loaded_cfg['leafmachine']['project']['tool_wikipedia']
107
 
108
+ specimen_rotate = loaded_cfg['leafmachine']['project']['specimen_rotate']
109
+
110
  pdf_conversion_dpi = loaded_cfg['leafmachine']['project']['pdf_conversion_dpi']
111
 
112
  LLM_version_user = loaded_cfg['leafmachine']['LLM_version']
 
129
  path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images,
130
  prompt_version, do_create_OCR_helper_image, do_use_trOCR, do_use_florence, trOCR_model_path, florence_model_path, OCR_option, OCR_option_llava,
131
  OCR_option_llava_bit, OCR_GPT_4o_mini_resolution, double_OCR, save_cropped_annotations,
132
+ tool_GEO, tool_WFO, tool_wikipedia,specimen_rotate,
133
  check_for_illegal_filenames, skip_vertical, pdf_conversion_dpi, use_domain_knowledge=False)
134
 
135
 
 
138
  path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images,
139
  prompt_version, do_create_OCR_helper_image_user, do_use_trOCR, do_use_florence, trOCR_model_path, florence_model_path, OCR_option, OCR_option_llava,
140
  OCR_option_llava_bit, OCR_GPT_4o_mini_resolution, double_OCR, save_cropped_annotations,
141
+ tool_GEO, tool_WFO, tool_wikipedia,specimen_rotate,
142
  check_for_illegal_filenames, skip_vertical, pdf_conversion_dpi, use_domain_knowledge=False):
143
 
144
 
 
195
  'tool_GEO': tool_GEO,
196
  'tool_WFO': tool_WFO,
197
  'tool_wikipedia': tool_wikipedia,
198
+ 'specimen_rotate': specimen_rotate,
199
  }
200
 
201
  modules_section = {
vouchervision/VoucherVision_GUI.py CHANGED
@@ -1576,8 +1576,22 @@ def content_collage_overlay():
1576
  with col_cropped_1:
1577
  default_crops = st.session_state.config['leafmachine']['cropped_components']['save_cropped_annotations']
1578
  st.write("Prior to transcription, use LeafMachine2 to crop all labels from input images to create label collages for each specimen image. (Requires GPU)")
1579
- st.session_state.config['leafmachine']['use_RGB_label_images'] = st.checkbox("Use LeafMachine2 label collage for transcriptions", st.session_state.config['leafmachine'].get('use_RGB_label_images', False))
 
 
 
 
 
 
 
 
 
 
 
1580
 
 
 
 
1581
 
1582
  option_selected_crops = st.multiselect(label="Components to crop",
1583
  options=['ruler', 'barcode','label', 'colorcard','map','envelope','photo','attached_item','weights',
 
1576
  with col_cropped_1:
1577
  default_crops = st.session_state.config['leafmachine']['cropped_components']['save_cropped_annotations']
1578
  st.write("Prior to transcription, use LeafMachine2 to crop all labels from input images to create label collages for each specimen image. (Requires GPU)")
1579
+ # Set the options for the radio button
1580
+ options = {
1581
+ "Use LeafMachine2 label collage for transcriptions": "use_RGB_label_images",
1582
+ "Use specimen collage for transcriptions": "use_specimen_collage"
1583
+ }
1584
+
1585
+ # Create the radio button with the available options
1586
+ selected_option = st.radio(
1587
+ "Select the transcription method:",
1588
+ options=list(options.keys()),
1589
+ index=0 if st.session_state.config['leafmachine'].get('use_RGB_label_images', False) else 1
1590
+ )
1591
 
1592
+ # Update the session state based on the selected option
1593
+ st.session_state.config['leafmachine']['use_RGB_label_images'] = (selected_option == "Use LeafMachine2 label collage for transcriptions")
1594
+ st.session_state.config['leafmachine']['project']['use_specimen_collage'] = (selected_option == "Use specimen collage for transcriptions")
1595
 
1596
  option_selected_crops = st.multiselect(label="Components to crop",
1597
  options=['ruler', 'barcode','label', 'colorcard','map','envelope','photo','attached_item','weights',
vouchervision/data_project.py CHANGED
@@ -116,14 +116,24 @@ class Project_Info():
116
  n_total = len(os.listdir(dir_images))
117
  for file in tqdm(os.listdir(dir_images), desc=f'{bcolors.HEADER} Copying images to working directory{bcolors.ENDC}', colour="white", position=0, total=n_total):
118
  source = os.path.join(dir_images, file)
 
 
 
 
 
 
 
 
 
 
119
  # Check if file is a PDF
120
- if file.lower().endswith('.pdf'):
121
  # Convert PDF pages to JPG images
122
  n_pages = convert_pdf_to_jpg(source, Dirs.save_original)
123
  self.logger.info(f"Converted {n_pages} pages to JPG from PDF: {file}")
124
  else:
125
- # Copy non-PDF files directly
126
- destination = os.path.join(Dirs.save_original, file)
127
  shutil.copy(source, destination)
128
 
129
  def make_file_names_custom(self, dir_images, cfg, Dirs):
 
116
  n_total = len(os.listdir(dir_images))
117
  for file in tqdm(os.listdir(dir_images), desc=f'{bcolors.HEADER} Copying images to working directory{bcolors.ENDC}', colour="white", position=0, total=n_total):
118
  source = os.path.join(dir_images, file)
119
+
120
+ # Split the filename and the extension
121
+ filename, ext = os.path.splitext(file)
122
+
123
+ # Convert the extension to lower case
124
+ ext_lower = ext.lower()
125
+
126
+ # Reconstruct the file name with the lower-cased extension
127
+ file_with_lower_ext = f"{filename}{ext_lower}"
128
+
129
  # Check if file is a PDF
130
+ if ext_lower == '.pdf':
131
  # Convert PDF pages to JPG images
132
  n_pages = convert_pdf_to_jpg(source, Dirs.save_original)
133
  self.logger.info(f"Converted {n_pages} pages to JPG from PDF: {file}")
134
  else:
135
+ # Copy non-PDF files directly, using the filename with the lower-cased extension
136
+ destination = os.path.join(Dirs.save_original, file_with_lower_ext)
137
  shutil.copy(source, destination)
138
 
139
  def make_file_names_custom(self, dir_images, cfg, Dirs):
vouchervision/general_utils.py CHANGED
@@ -9,6 +9,7 @@ import numpy as np
9
  import concurrent.futures
10
  from time import perf_counter
11
  import torch
 
12
 
13
  try:
14
  from vouchervision.model_maps import ModelMaps
@@ -928,6 +929,7 @@ def crop_detections_from_images(cfg, logger, dir_home, Project, Dirs, batch_size
928
  def crop_detections_from_images_VV(cfg, logger, dir_home, Project, Dirs, batch_size=50):
929
  t2_start = perf_counter()
930
  logger.name = 'Crop Components'
 
931
 
932
  if cfg['leafmachine']['cropped_components']['do_save_cropped_annotations']:
933
  detections = cfg['leafmachine']['cropped_components']['save_cropped_annotations']
@@ -1152,6 +1154,7 @@ def crop_component_from_yolo_coords_VV(anno_type, Dirs, analysis, all_detections
1152
  # print(f'detection_class: {detection_class} not in save_list: {save_list}')
1153
  pass
1154
 
 
1155
  # Initialize a list to hold all the acceptable cropped images
1156
  acceptable_cropped_images = []
1157
 
@@ -1243,7 +1246,68 @@ def crop_component_from_yolo_coords_VV(anno_type, Dirs, analysis, all_detections
1243
  original_image_name = '.'.join([filename,'jpg'])
1244
  cv2.imwrite(os.path.join(Dirs.save_original, original_image_name), full_image)
1245
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1246
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1247
 
1248
  def crop_component_from_yolo_coords(anno_type, Dirs, analysis, all_detections, full_image, filename, save_per_image, save_per_class, save_list):
1249
  height = analysis['height']
 
9
  import concurrent.futures
10
  from time import perf_counter
11
  import torch
12
+ from collections import defaultdict
13
 
14
  try:
15
  from vouchervision.model_maps import ModelMaps
 
929
  def crop_detections_from_images_VV(cfg, logger, dir_home, Project, Dirs, batch_size=50):
930
  t2_start = perf_counter()
931
  logger.name = 'Crop Components'
932
+
933
 
934
  if cfg['leafmachine']['cropped_components']['do_save_cropped_annotations']:
935
  detections = cfg['leafmachine']['cropped_components']['save_cropped_annotations']
 
1154
  # print(f'detection_class: {detection_class} not in save_list: {save_list}')
1155
  pass
1156
 
1157
+ ### Below creates the LM2 Label Collage image
1158
  # Initialize a list to hold all the acceptable cropped images
1159
  acceptable_cropped_images = []
1160
 
 
1246
  original_image_name = '.'.join([filename,'jpg'])
1247
  cv2.imwrite(os.path.join(Dirs.save_original, original_image_name), full_image)
1248
 
1249
+ def create_specimen_collage(cfg, logger, dir_home, Project, Dirs):
1250
+ if cfg['leafmachine']['use_RGB_label_images'] == 2:
1251
+ # Get all filenames in the save_original directory that end with .jpg or .jpeg
1252
+ filenames = [f for f in os.listdir(Dirs.save_original) if f.lower().endswith(('.jpg', '.jpeg'))]
1253
+
1254
+ # Dictionary to group filenames by their file stem (e.g., FMNH_6238)
1255
+ grouped_filenames = defaultdict(list)
1256
+
1257
+ for filename in filenames:
1258
+ parts = filename.rsplit('_', 1)
1259
+ if len(parts) == 2 and parts[1][0].isalnum():
1260
+ file_stem = parts[0]
1261
+ grouped_filenames[file_stem].append(filename)
1262
+ else:
1263
+ logger.warning(f"Filename {filename} does not match expected pattern. Skipping.")
1264
+
1265
+ # Process each group of images
1266
+ for file_stem, group in grouped_filenames.items():
1267
+ # Load all cropped images for the current group
1268
+ cropped_images = [cv2.imread(os.path.join(Dirs.save_original, filename)) for filename in group]
1269
+
1270
+ if not cropped_images:
1271
+ logger.error(f"No images found for {file_stem}. Skipping collage creation.")
1272
+ continue
1273
+
1274
+ # Rotate images so that width is greater than height
1275
+ for i, img in enumerate(cropped_images):
1276
+ if img.shape[0] > img.shape[1]: # height > width
1277
+ if cfg['leafmachine']['project']['specimen_rotate']:
1278
+ cropped_images[i] = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
1279
+ else:
1280
+ cropped_images[i] = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
1281
+
1282
+ # Calculate the maximum width and total height required for the collage
1283
+ max_width = max(img.shape[1] for img in cropped_images)
1284
+ total_height = sum(img.shape[0] for img in cropped_images)
1285
+
1286
+ # Create a black image with the required dimensions
1287
+ collage_image = np.zeros((total_height, max_width, 3), dtype=np.uint8)
1288
 
1289
+ # Stack images on top of each other
1290
+ y_offset = 0
1291
+ for img in cropped_images:
1292
+ collage_image[y_offset:y_offset+img.shape[0], :img.shape[1]] = img
1293
+ y_offset += img.shape[0]
1294
+
1295
+ # Generate the combined filename from the file stem
1296
+ collage_filename = f"{file_stem}_collage.jpg"
1297
+
1298
+ # Save the collage image
1299
+ collage_destination = os.path.join(Dirs.save_per_annotation_class, 'label', collage_filename)
1300
+ validate_dir(os.path.dirname(collage_destination))
1301
+ cv2.imwrite(collage_destination, collage_image)
1302
+ logger.info(f"Saved collage image: {collage_destination}")
1303
+
1304
+ # Save each individual image separately
1305
+ for filename in group:
1306
+ original_image_name = os.path.basename(filename)
1307
+ save_destination = os.path.join(Dirs.save_original, original_image_name)
1308
+ validate_dir(os.path.dirname(save_destination))
1309
+ cv2.imwrite(save_destination, cv2.imread(os.path.join(Dirs.save_original, filename)))
1310
+ logger.info(f"Saved original image: {save_destination}")
1311
 
1312
  def crop_component_from_yolo_coords(anno_type, Dirs, analysis, all_detections, full_image, filename, save_per_image, save_per_class, save_list):
1313
  height = analysis['height']
vouchervision/vouchervision_main.py CHANGED
@@ -8,7 +8,7 @@ from time import perf_counter
8
  # sys.path.append(parentdir)
9
  # sys.path.append(currentdir)
10
  from vouchervision.component_detector.component_detector import detect_plant_components, detect_archival_components
11
- from vouchervision.general_utils import save_token_info_as_csv, print_main_start, check_for_subdirs_VV, load_config_file, load_config_file_testing, report_config, save_config_file, crop_detections_from_images_VV
12
  from vouchervision.directory_structure_VV import Dir_Structure
13
  from vouchervision.data_project import Project_Info
14
  from vouchervision.LM2_logger import start_logging
@@ -62,6 +62,10 @@ def voucher_vision(cfg_file_path, dir_home, path_custom_prompts, cfg_test, progr
62
  # Save cropped detections
63
  crop_detections_from_images_VV(cfg, logger, dir_home, Project, Dirs)
64
 
 
 
 
 
65
  # Process labels
66
  Voucher_Vision = VoucherVision(cfg, logger, dir_home, path_custom_prompts, Project, Dirs, is_hf)
67
  n_images = len(Voucher_Vision.img_paths)
 
8
  # sys.path.append(parentdir)
9
  # sys.path.append(currentdir)
10
  from vouchervision.component_detector.component_detector import detect_plant_components, detect_archival_components
11
+ from vouchervision.general_utils import create_specimen_collage, save_token_info_as_csv, print_main_start, check_for_subdirs_VV, load_config_file, load_config_file_testing, report_config, save_config_file, crop_detections_from_images_VV
12
  from vouchervision.directory_structure_VV import Dir_Structure
13
  from vouchervision.data_project import Project_Info
14
  from vouchervision.LM2_logger import start_logging
 
62
  # Save cropped detections
63
  crop_detections_from_images_VV(cfg, logger, dir_home, Project, Dirs)
64
 
65
+
66
+ create_specimen_collage(cfg, logger, dir_home, Project, Dirs)
67
+
68
+
69
  # Process labels
70
  Voucher_Vision = VoucherVision(cfg, logger, dir_home, path_custom_prompts, Project, Dirs, is_hf)
71
  n_images = len(Voucher_Vision.img_paths)