phyloforfun commited on
Commit
d0291ae
1 Parent(s): a145e37

fixing pdfs

Browse files
api_cost/api_cost.yaml CHANGED
@@ -61,8 +61,8 @@ GEMINI_PRO:
61
  in: 0.0005
62
  out: 0.0015
63
  GEMINI_1_5_FLASH:
64
- in: 0.00035
65
- out: 0.00105
66
  GEMINI_1_5_PRO:
67
  in: 0.0035
68
  out: 0.0105
 
61
  in: 0.0005
62
  out: 0.0015
63
  GEMINI_1_5_FLASH:
64
+ in: 0.000075
65
+ out: 0.00030
66
  GEMINI_1_5_PRO:
67
  in: 0.0035
68
  out: 0.0105
app.py CHANGED
@@ -254,7 +254,6 @@ def load_gallery(converted_files, uploaded_file):
254
 
255
 
256
 
257
-
258
  def handle_image_upload_and_gallery_hf(uploaded_files):
259
  SAFE = SafetyCheck(st.session_state['is_hf'])
260
  if uploaded_files:
@@ -271,37 +270,46 @@ def handle_image_upload_and_gallery_hf(uploaded_files):
271
  st.error("Warning: You uploaded an image that violates our terms of service.")
272
  return True
273
 
 
 
 
 
 
 
274
 
275
  # Determine the file type
276
  if uploaded_file.name.lower().endswith('.pdf'):
277
- # Handle PDF files
278
- file_path = save_uploaded_file(st.session_state['dir_uploaded_images'], uploaded_file)
279
- # Convert each page of the PDF to an image
280
- n_pages = convert_pdf_to_jpg(file_path, st.session_state['dir_uploaded_images'], dpi=200)#st.session_state.config['leafmachine']['project']['dir_images_local'])
281
- # Update the input list for each page image
282
- converted_files = os.listdir(st.session_state['dir_uploaded_images'])
283
- for file_name in converted_files:
284
- if file_name.split('.')[1].lower() in ['jpg','jpeg']:
285
- ind_small += 1
286
- jpg_file_path = os.path.join(st.session_state['dir_uploaded_images'], file_name)
287
- st.session_state['input_list'].append(jpg_file_path)
288
-
289
- if ind_small < MAX_GALLERY_IMAGES +5:
290
- # Optionally, create a thumbnail for the gallery
291
- img = Image.open(jpg_file_path)
292
- img.thumbnail((GALLERY_IMAGE_SIZE, GALLERY_IMAGE_SIZE), Image.Resampling.LANCZOS)
293
- try:
294
- file_path_small = save_uploaded_file(st.session_state['dir_uploaded_images_small'], file_name, img)
295
- except:
296
- file_path_small = save_uploaded_file_local(st.session_state['dir_uploaded_images_small'],st.session_state['dir_uploaded_images_small'], file_name, img)
297
- st.session_state['input_list_small'].append(file_path_small)
298
-
 
 
 
 
299
  else:
300
- ind_small += 1
301
  # Handle JPG/JPEG files (existing process)
302
- file_path = save_uploaded_file(st.session_state['dir_uploaded_images'], uploaded_file)
303
  st.session_state['input_list'].append(file_path)
304
- if ind_small < MAX_GALLERY_IMAGES +5:
305
  img = Image.open(file_path)
306
  img.thumbnail((GALLERY_IMAGE_SIZE, GALLERY_IMAGE_SIZE), Image.Resampling.LANCZOS)
307
  file_path_small = save_uploaded_file(st.session_state['dir_uploaded_images_small'], uploaded_file, img)
@@ -313,15 +321,80 @@ def handle_image_upload_and_gallery_hf(uploaded_files):
313
 
314
  if st.session_state['input_list_small']:
315
  if len(st.session_state['input_list_small']) > MAX_GALLERY_IMAGES:
316
- # Only take the first 100 images from the list
317
  images_to_display = st.session_state['input_list_small'][:MAX_GALLERY_IMAGES]
318
  else:
319
- # If there are less than 100 images, take them all
320
  images_to_display = st.session_state['input_list_small']
321
  show_gallery_small_hf(images_to_display)
322
 
323
  return False
324
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
 
326
  def handle_image_upload_and_gallery():
327
 
@@ -371,6 +444,7 @@ def handle_image_upload_and_gallery():
371
 
372
 
373
  def content_input_images(col_left, col_right):
 
374
  st.write('---')
375
  # col1, col2 = st.columns([2,8])
376
  with col_left:
@@ -385,11 +459,11 @@ def content_input_images(col_left, col_right):
385
  pass
386
 
387
  with col_left:
388
- if st.session_state.is_hf:
389
- st.session_state['dir_uploaded_images'] = os.path.join(st.session_state.dir_home,'uploads')
390
- st.session_state['dir_uploaded_images_small'] = os.path.join(st.session_state.dir_home,'uploads_small')
391
- uploaded_files = st.file_uploader("Upload Images", type=['jpg', 'jpeg','pdf'], accept_multiple_files=True, key=st.session_state['uploader_idk'])
392
- st.button("Use Test Image",help="This will clear any uploaded images and load the 1 provided test image.",on_click=use_test_image)
393
 
394
  with col_right:
395
  if st.session_state.is_hf:
@@ -1007,7 +1081,8 @@ def create_private_file():
1007
 
1008
  cfg_private['open_cage_geocode'] = {}
1009
  cfg_private['open_cage_geocode']['API_KEY'] =''
1010
-
 
1011
 
1012
  with col_private:
1013
  st.header("Set API keys")
@@ -1060,7 +1135,7 @@ def create_private_file():
1060
  fullpath=os.path.join(st.session_state.dir_home, 'demo','google','google_api_5.PNG'))
1061
 
1062
  st.subheader("Getting a Google JSON authentication key")
1063
- st.write("Google uses a JSON file to store additional authentication information. Save this file in a safe, private location and assign the `GOOGLE_APPLICATION_CREDENTIALS` value to the file path. For Hugging Face, copy the contents of the JSON file including the `\{\}` and paste it as the secret value.")
1064
  st.write("To download your JSON key...")
1065
  blog_text_and_image(text="Open the navigation menu. Click on the hamburger menu (three horizontal lines) in the top left corner. Go to IAM & Admin. ",
1066
  fullpath=os.path.join(st.session_state.dir_home, 'demo','google','google_api_7.PNG'),width=300)
@@ -1958,11 +2033,20 @@ def content_ocr_method():
1958
  OCR_option_llava_bit = st.session_state.config['leafmachine']['project']['OCR_option_llava_bit']
1959
  double_OCR = st.session_state.config['leafmachine']['project']['double_OCR']
1960
 
1961
- # Map the OCR option to the index in options list
1962
- # You need to define the mapping based on your application's logic
1963
  default_index = 0 # Default to 0 if option not found
1964
  default_index_llava = 0 # Default to 0 if option not found
1965
  default_index_llava_bit = 0
 
 
 
 
 
 
 
 
 
 
 
1966
  with c1:
1967
  st.subheader("API Methods (Google Vision)")
1968
  st.write("Using APIs for OCR allows VoucherVision to run on most computers. You can use multiple OCR engines simultaneously.")
@@ -1999,7 +2083,7 @@ def content_ocr_method():
1999
 
2000
  # Map selected options to their corresponding internal representations
2001
  selected_OCR_options = [OCR_options[option] for option in OCR_option_select]
2002
-
2003
  # Assuming you need to use these mapped values elsewhere in your application
2004
  st.session_state.config['leafmachine']['project']['OCR_option'] = selected_OCR_options
2005
 
@@ -2043,7 +2127,7 @@ def content_ocr_method():
2043
  st.session_state.config['leafmachine']['project']['OCR_GPT_4o_mini_resolution'] = st.radio(
2044
  "Select level of detail for :violet[GPT-4o-mini] OCR. We only recommend 'high' detail in most scenarios.",
2045
  ["high", "low", ],
2046
- captions=["$0.50 per 1,000", "\$5 - \$10 per 1,000"])
2047
 
2048
 
2049
  if 'LLaVA' in selected_OCR_options:
 
254
 
255
 
256
 
 
257
  def handle_image_upload_and_gallery_hf(uploaded_files):
258
  SAFE = SafetyCheck(st.session_state['is_hf'])
259
  if uploaded_files:
 
270
  st.error("Warning: You uploaded an image that violates our terms of service.")
271
  return True
272
 
273
+ # Save the uploaded file (PDF or image)
274
+ file_path = save_uploaded_file(st.session_state['dir_uploaded_images'], uploaded_file)
275
+
276
+ if not file_path:
277
+ st.error(f"Failed to process the file: {uploaded_file.name}")
278
+ continue # Skip to the next file
279
 
280
  # Determine the file type
281
  if uploaded_file.name.lower().endswith('.pdf'):
282
+ try:
283
+ # Convert each page of the PDF to an image
284
+ n_pages = convert_pdf_to_jpg(file_path, st.session_state['dir_uploaded_images'], dpi=200)
285
+ if n_pages == 0:
286
+ st.error(f"No pages were converted from the PDF: {uploaded_file.name}")
287
+ continue # Skip to the next file
288
+
289
+ # Update the input list for each page image
290
+ converted_files = os.listdir(st.session_state['dir_uploaded_images'])
291
+ for file_name in converted_files:
292
+ if file_name.split('.')[1].lower() in ['jpg', 'jpeg']:
293
+ ind_small += 1
294
+ jpg_file_path = os.path.join(st.session_state['dir_uploaded_images'], file_name)
295
+ st.session_state['input_list'].append(jpg_file_path)
296
+
297
+ if ind_small < MAX_GALLERY_IMAGES + 5:
298
+ # Create a thumbnail for the gallery
299
+ img = Image.open(jpg_file_path)
300
+ img.thumbnail((GALLERY_IMAGE_SIZE, GALLERY_IMAGE_SIZE), Image.Resampling.LANCZOS)
301
+ file_path_small = save_uploaded_file(st.session_state['dir_uploaded_images_small'], jpg_file_path, img)
302
+ st.session_state['input_list_small'].append(file_path_small)
303
+
304
+ except Exception as e:
305
+ st.error(f"Failed to process PDF file {uploaded_file.name}. Error: {e}")
306
+ continue # Skip to the next file
307
+
308
  else:
 
309
  # Handle JPG/JPEG files (existing process)
310
+ ind_small += 1
311
  st.session_state['input_list'].append(file_path)
312
+ if ind_small < MAX_GALLERY_IMAGES + 5:
313
  img = Image.open(file_path)
314
  img.thumbnail((GALLERY_IMAGE_SIZE, GALLERY_IMAGE_SIZE), Image.Resampling.LANCZOS)
315
  file_path_small = save_uploaded_file(st.session_state['dir_uploaded_images_small'], uploaded_file, img)
 
321
 
322
  if st.session_state['input_list_small']:
323
  if len(st.session_state['input_list_small']) > MAX_GALLERY_IMAGES:
 
324
  images_to_display = st.session_state['input_list_small'][:MAX_GALLERY_IMAGES]
325
  else:
 
326
  images_to_display = st.session_state['input_list_small']
327
  show_gallery_small_hf(images_to_display)
328
 
329
  return False
330
 
331
+ # def handle_image_upload_and_gallery_hf(uploaded_files): # not working with pdfs
332
+ # SAFE = SafetyCheck(st.session_state['is_hf'])
333
+ # if uploaded_files:
334
+
335
+ # # Clear input image gallery and input list
336
+ # clear_image_uploads()
337
+
338
+ # ind_small = 0
339
+ # for uploaded_file in uploaded_files:
340
+
341
+ # if SAFE.check_for_inappropriate_content(uploaded_file):
342
+ # clear_image_uploads()
343
+ # report_violation(uploaded_file.name, is_hf=st.session_state['is_hf'])
344
+ # st.error("Warning: You uploaded an image that violates our terms of service.")
345
+ # return True
346
+
347
+
348
+ # # Determine the file type
349
+ # if uploaded_file.name.lower().endswith('.pdf'):
350
+ # # Handle PDF files
351
+ # file_path = save_uploaded_file(st.session_state['dir_uploaded_images'], uploaded_file)
352
+ # # Convert each page of the PDF to an image
353
+ # n_pages = convert_pdf_to_jpg(file_path, st.session_state['dir_uploaded_images'], dpi=200)#st.session_state.config['leafmachine']['project']['dir_images_local'])
354
+ # # Update the input list for each page image
355
+ # converted_files = os.listdir(st.session_state['dir_uploaded_images'])
356
+ # for file_name in converted_files:
357
+ # if file_name.split('.')[1].lower() in ['jpg','jpeg']:
358
+ # ind_small += 1
359
+ # jpg_file_path = os.path.join(st.session_state['dir_uploaded_images'], file_name)
360
+ # st.session_state['input_list'].append(jpg_file_path)
361
+
362
+ # if ind_small < MAX_GALLERY_IMAGES +5:
363
+ # # Optionally, create a thumbnail for the gallery
364
+ # img = Image.open(jpg_file_path)
365
+ # img.thumbnail((GALLERY_IMAGE_SIZE, GALLERY_IMAGE_SIZE), Image.Resampling.LANCZOS)
366
+ # try:
367
+ # file_path_small = save_uploaded_file(st.session_state['dir_uploaded_images_small'], file_name, img)
368
+ # except:
369
+ # file_path_small = save_uploaded_file_local(st.session_state['dir_uploaded_images_small'],st.session_state['dir_uploaded_images_small'], file_name, img)
370
+ # st.session_state['input_list_small'].append(file_path_small)
371
+
372
+ # else:
373
+ # ind_small += 1
374
+ # # Handle JPG/JPEG files (existing process)
375
+ # file_path = save_uploaded_file(st.session_state['dir_uploaded_images'], uploaded_file)
376
+ # st.session_state['input_list'].append(file_path)
377
+ # if ind_small < MAX_GALLERY_IMAGES +5:
378
+ # img = Image.open(file_path)
379
+ # img.thumbnail((GALLERY_IMAGE_SIZE, GALLERY_IMAGE_SIZE), Image.Resampling.LANCZOS)
380
+ # file_path_small = save_uploaded_file(st.session_state['dir_uploaded_images_small'], uploaded_file, img)
381
+ # st.session_state['input_list_small'].append(file_path_small)
382
+
383
+ # # After processing all files
384
+ # st.session_state.config['leafmachine']['project']['dir_images_local'] = st.session_state['dir_uploaded_images']
385
+ # st.info(f"Processing images from {st.session_state.config['leafmachine']['project']['dir_images_local']}")
386
+
387
+ # if st.session_state['input_list_small']:
388
+ # if len(st.session_state['input_list_small']) > MAX_GALLERY_IMAGES:
389
+ # # Only take the first 100 images from the list
390
+ # images_to_display = st.session_state['input_list_small'][:MAX_GALLERY_IMAGES]
391
+ # else:
392
+ # # If there are less than 100 images, take them all
393
+ # images_to_display = st.session_state['input_list_small']
394
+ # show_gallery_small_hf(images_to_display)
395
+
396
+ # return False
397
+
398
 
399
  def handle_image_upload_and_gallery():
400
 
 
444
 
445
 
446
  def content_input_images(col_left, col_right):
447
+
448
  st.write('---')
449
  # col1, col2 = st.columns([2,8])
450
  with col_left:
 
459
  pass
460
 
461
  with col_left:
462
+ # if st.session_state.is_hf:
463
+ st.session_state['dir_uploaded_images'] = os.path.join(st.session_state.dir_home,'uploads')
464
+ st.session_state['dir_uploaded_images_small'] = os.path.join(st.session_state.dir_home,'uploads_small')
465
+ uploaded_files = st.file_uploader("Upload Images", type=['jpg', 'jpeg','pdf'], accept_multiple_files=True, key=st.session_state['uploader_idk'])
466
+ st.button("Use Test Image",help="This will clear any uploaded images and load the 1 provided test image.",on_click=use_test_image)
467
 
468
  with col_right:
469
  if st.session_state.is_hf:
 
1081
 
1082
  cfg_private['open_cage_geocode'] = {}
1083
  cfg_private['open_cage_geocode']['API_KEY'] =''
1084
+
1085
+ cfg_private['huggingface'] = {}
1086
 
1087
  with col_private:
1088
  st.header("Set API keys")
 
1135
  fullpath=os.path.join(st.session_state.dir_home, 'demo','google','google_api_5.PNG'))
1136
 
1137
  st.subheader("Getting a Google JSON authentication key")
1138
+ st.write(f"Google uses a JSON file to store additional authentication information. Save this file in a safe, private location and assign the `GOOGLE_APPLICATION_CREDENTIALS` value to the file path. For Hugging Face, copy the contents of the JSON file including the curly brackets and paste it as the secret value.")
1139
  st.write("To download your JSON key...")
1140
  blog_text_and_image(text="Open the navigation menu. Click on the hamburger menu (three horizontal lines) in the top left corner. Go to IAM & Admin. ",
1141
  fullpath=os.path.join(st.session_state.dir_home, 'demo','google','google_api_7.PNG'),width=300)
 
2033
  OCR_option_llava_bit = st.session_state.config['leafmachine']['project']['OCR_option_llava_bit']
2034
  double_OCR = st.session_state.config['leafmachine']['project']['double_OCR']
2035
 
 
 
2036
  default_index = 0 # Default to 0 if option not found
2037
  default_index_llava = 0 # Default to 0 if option not found
2038
  default_index_llava_bit = 0
2039
+
2040
+ # Map the OCR option to the index in options list
2041
+ # You need to define the mapping for multiple OCR options
2042
+ # based on your application's logic
2043
+ if len(OCR_option) == 1:
2044
+ OCR_option = OCR_option[0]
2045
+ try:
2046
+ default_index = options.index(OCR_option)
2047
+ except ValueError:
2048
+ pass
2049
+
2050
  with c1:
2051
  st.subheader("API Methods (Google Vision)")
2052
  st.write("Using APIs for OCR allows VoucherVision to run on most computers. You can use multiple OCR engines simultaneously.")
 
2083
 
2084
  # Map selected options to their corresponding internal representations
2085
  selected_OCR_options = [OCR_options[option] for option in OCR_option_select]
2086
+ print('Selected OCR options:',selected_OCR_options)
2087
  # Assuming you need to use these mapped values elsewhere in your application
2088
  st.session_state.config['leafmachine']['project']['OCR_option'] = selected_OCR_options
2089
 
 
2127
  st.session_state.config['leafmachine']['project']['OCR_GPT_4o_mini_resolution'] = st.radio(
2128
  "Select level of detail for :violet[GPT-4o-mini] OCR. We only recommend 'high' detail in most scenarios.",
2129
  ["high", "low", ],
2130
+ captions=[f"$0.50 per 1,000", f"$5 - $10 per 1,000"])
2131
 
2132
 
2133
  if 'LLaVA' in selected_OCR_options:
vouchervision/LLM_GoogleGemini.py CHANGED
@@ -101,7 +101,10 @@ class GoogleGeminiHandler:
101
  # top_p=self.config.get('top_p'))
102
 
103
  # Set up the retry parser with the runnable
104
- self.retry_parser = RetryWithErrorOutputParser.from_llm(parser=self.parser, llm=self.llm_model, max_retries=self.MAX_RETRIES)
 
 
 
105
  # Prepare the chain
106
  self.chain = self.prompt | self.call_google_gemini
107
 
 
101
  # top_p=self.config.get('top_p'))
102
 
103
  # Set up the retry parser with the runnable
104
+ self.retry_parser = RetryWithErrorOutputParser.from_llm(
105
+ parser=self.parser,
106
+ llm=self.llm_model,
107
+ max_retries=self.MAX_RETRIES)
108
  # Prepare the chain
109
  self.chain = self.prompt | self.call_google_gemini
110
 
vouchervision/LLM_GooglePalm2.py CHANGED
@@ -1,17 +1,17 @@
1
  import os, time, json, typing
 
2
  # import vertexai
3
  from vertexai.language_models import TextGenerationModel
4
  from vertexai.generative_models._generative_models import HarmCategory, HarmBlockThreshold
5
  from vertexai.language_models import TextGenerationModel
6
  # from vertexai.preview.generative_models import GenerativeModel
7
- from langchain.output_parsers import RetryWithErrorOutputParser
8
  # from langchain.schema import HumanMessage
9
  from langchain.prompts import PromptTemplate
10
  from langchain_core.output_parsers import JsonOutputParser
11
  # from langchain_google_genai import ChatGoogleGenerativeAI
12
  from langchain_google_vertexai import VertexAI
13
  from langchain_core.messages import BaseMessage, HumanMessage
14
- from langchain_core.prompt_values import PromptValue as BasePromptValue
15
 
16
  from vouchervision.utils_LLM import SystemLoadMonitor, run_tools, count_tokens, save_individual_prompt, sanitize_prompt
17
  from vouchervision.utils_LLM_JSON_validation import validate_and_align_JSON_keys_with_template
@@ -25,6 +25,18 @@ from vouchervision.utils_LLM_JSON_validation import validate_and_align_JSON_keys
25
  # gcloud config set project XXXXXXXXX
26
  # https://cloud.google.com/docs/authentication
27
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  class GooglePalm2Handler:
29
 
30
  RETRY_DELAY = 10 # Wait 10 seconds before retrying
@@ -45,8 +57,6 @@ class GooglePalm2Handler:
45
 
46
  self.config_vals_for_permutation = config_vals_for_permutation
47
 
48
-
49
-
50
  self.monitor = SystemLoadMonitor(logger)
51
 
52
  self.parser = JsonOutputParser()
@@ -104,19 +114,35 @@ class GooglePalm2Handler:
104
  self.adjust_temp = self.starting_temp
105
  self.config['temperature'] = self.starting_temp
106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  def _build_model_chain_parser(self):
108
  # Instantiate the parser and the retry parser
109
- # self.llm_model = ChatGoogleGenerativeAI(model=self.model_name)
110
  self.llm_model = VertexAI(model=self.model_name,
111
  max_output_tokens=self.config.get('max_output_tokens'),
112
  temperature=self.config.get('temperature'),
113
  top_k=self.config.get('top_k'),
114
  top_p=self.config.get('top_p'))
115
-
116
  self.retry_parser = RetryWithErrorOutputParser.from_llm(
117
- parser=self.parser,
118
- llm=self.llm_model,
119
- max_retries=self.MAX_RETRIES)
 
 
120
  # Prepare the chain
121
  self.chain = self.prompt | self.call_google_palm2
122
 
@@ -148,22 +174,27 @@ class GooglePalm2Handler:
148
  while ind < self.MAX_RETRIES:
149
  ind += 1
150
  try:
151
- # model_kwargs = {"temperature": self.adjust_temp}
152
  # Invoke the chain to generate prompt text
153
- response = self.chain.invoke({"query": prompt_template})#, "model_kwargs": model_kwargs})
154
-
155
- # Use retry_parser to parse the response with retry logic
156
- try:
157
- output = self.retry_parser.parse_with_prompt(response, prompt_value=PromptValue(prompt_template))
158
- except:
159
- try:
160
- output = self.retry_parser.parse_with_prompt(response, prompt_value=prompt_template)
161
- except:
162
- try:
163
- output = json.loads(response)
164
- except Exception as e:
165
- print(e)
166
- output = None
 
 
 
 
 
167
 
168
 
169
  if output is None:
@@ -215,8 +246,3 @@ class GooglePalm2Handler:
215
  self.json_report.set_text(text_main=f'LLM call failed')
216
  return None, nt_in, nt_out, None, None, usage_report
217
 
218
- class PromptValue(BasePromptValue):
219
- prompt_str: str
220
-
221
- def to_string(self) -> str:
222
- return self.prompt_str
 
1
  import os, time, json, typing
2
+ from dataclasses import dataclass
3
  # import vertexai
4
  from vertexai.language_models import TextGenerationModel
5
  from vertexai.generative_models._generative_models import HarmCategory, HarmBlockThreshold
6
  from vertexai.language_models import TextGenerationModel
7
  # from vertexai.preview.generative_models import GenerativeModel
8
+ from langchain.output_parsers.retry import RetryWithErrorOutputParser
9
  # from langchain.schema import HumanMessage
10
  from langchain.prompts import PromptTemplate
11
  from langchain_core.output_parsers import JsonOutputParser
12
  # from langchain_google_genai import ChatGoogleGenerativeAI
13
  from langchain_google_vertexai import VertexAI
14
  from langchain_core.messages import BaseMessage, HumanMessage
 
15
 
16
  from vouchervision.utils_LLM import SystemLoadMonitor, run_tools, count_tokens, save_individual_prompt, sanitize_prompt
17
  from vouchervision.utils_LLM_JSON_validation import validate_and_align_JSON_keys_with_template
 
25
  # gcloud config set project XXXXXXXXX
26
  # https://cloud.google.com/docs/authentication
27
 
28
+ from pydantic import BaseModel
29
+ from langchain_core.prompt_values import PromptValue as BasePromptValue
30
+
31
+ class PromptValueWrapper(BaseModel):
32
+ prompt_str: str
33
+
34
+ def to_string(self) -> str:
35
+ return self.prompt_str
36
+
37
+ def to_messages(self):
38
+ return [HumanMessage(content=self.prompt_str)]
39
+
40
  class GooglePalm2Handler:
41
 
42
  RETRY_DELAY = 10 # Wait 10 seconds before retrying
 
57
 
58
  self.config_vals_for_permutation = config_vals_for_permutation
59
 
 
 
60
  self.monitor = SystemLoadMonitor(logger)
61
 
62
  self.parser = JsonOutputParser()
 
114
  self.adjust_temp = self.starting_temp
115
  self.config['temperature'] = self.starting_temp
116
 
117
+ # def _build_model_chain_parser(self):
118
+ # # Instantiate the parser and the retry parser
119
+ # # self.llm_model = ChatGoogleGenerativeAI(model=self.model_name)
120
+ # self.llm_model = VertexAI(model=self.model_name,
121
+ # max_output_tokens=self.config.get('max_output_tokens'),
122
+ # temperature=self.config.get('temperature'),
123
+ # top_k=self.config.get('top_k'),
124
+ # top_p=self.config.get('top_p'))
125
+
126
+ # self.retry_parser = RetryWithErrorOutputParser.from_llm(
127
+ # parser=self.parser,
128
+ # llm=self.llm_model,
129
+ # max_retries=self.MAX_RETRIES)
130
+ # # Prepare the chain
131
+ # self.chain = self.prompt | self.call_google_palm2
132
  def _build_model_chain_parser(self):
133
  # Instantiate the parser and the retry parser
 
134
  self.llm_model = VertexAI(model=self.model_name,
135
  max_output_tokens=self.config.get('max_output_tokens'),
136
  temperature=self.config.get('temperature'),
137
  top_k=self.config.get('top_k'),
138
  top_p=self.config.get('top_p'))
139
+
140
  self.retry_parser = RetryWithErrorOutputParser.from_llm(
141
+ llm=self.llm_model,
142
+ parser=self.parser,
143
+ max_retries=self.MAX_RETRIES
144
+ )
145
+
146
  # Prepare the chain
147
  self.chain = self.prompt | self.call_google_palm2
148
 
 
174
  while ind < self.MAX_RETRIES:
175
  ind += 1
176
  try:
177
+ model_kwargs = {"temperature": self.adjust_temp}
178
  # Invoke the chain to generate prompt text
179
+ response = self.chain.invoke({"query": prompt_template, "model_kwargs": model_kwargs})
180
+
181
+ # Clean up the response by removing any Markdown formatting
182
+ response_text = response.strip('```JSON\n').strip('\n```')
183
+
184
+ output = json.loads(response_text)
185
+
186
+ # # Use retry_parser to parse the response with retry logic
187
+ # try:
188
+ # output = self.retry_parser.parse_with_prompt(response, prompt_value=PromptValue(prompt_template))
189
+ # except:
190
+ # try:
191
+ # output = self.retry_parser.parse_with_prompt(response, prompt_value=prompt_template)
192
+ # except:
193
+ # try:
194
+ # output = json.loads(response)
195
+ # except Exception as e:
196
+ # print(e)
197
+ # output = None
198
 
199
 
200
  if output is None:
 
246
  self.json_report.set_text(text_main=f'LLM call failed')
247
  return None, nt_in, nt_out, None, None, usage_report
248
 
 
 
 
 
 
vouchervision/OCR_GPT4oMini.py CHANGED
@@ -74,7 +74,7 @@ def main():
74
  img_path = 'D:/D_Desktop/BR_1839468565_Ochnaceae_Campylospermum_reticulatum_label.jpg'
75
 
76
  # $env:OPENAI_API_KEY="KEY"
77
- API_KEY = "sk-proj-DxHlMH1H6jZzs8V12qbLT3BlbkFJIJnAVzt4kquOfhGURGW0"
78
 
79
 
80
  ocr = GPT4oMiniOCR(API_KEY)
 
74
  img_path = 'D:/D_Desktop/BR_1839468565_Ochnaceae_Campylospermum_reticulatum_label.jpg'
75
 
76
  # $env:OPENAI_API_KEY="KEY"
77
+ API_KEY = ""
78
 
79
 
80
  ocr = GPT4oMiniOCR(API_KEY)
vouchervision/OCR_google_cloud_vision.py CHANGED
@@ -99,12 +99,14 @@ class OCREngine:
99
 
100
 
101
  def set_client(self):
102
- if self.is_hf:
103
- self.client_beta = vision_beta.ImageAnnotatorClient(credentials=self.get_google_credentials())
104
- self.client = vision.ImageAnnotatorClient(credentials=self.get_google_credentials())
105
- else:
106
- self.client_beta = vision_beta.ImageAnnotatorClient(credentials=self.get_google_credentials())
107
- self.client = vision.ImageAnnotatorClient(credentials=self.get_google_credentials())
 
 
108
 
109
 
110
  def get_google_credentials(self):
 
99
 
100
 
101
  def set_client(self):
102
+ # Only init Google Vision if it is needed
103
+ if 'hand' in self.OCR_option or 'normal' in self.OCR_option:
104
+ if self.is_hf:
105
+ self.client_beta = vision_beta.ImageAnnotatorClient(credentials=self.get_google_credentials())
106
+ self.client = vision.ImageAnnotatorClient(credentials=self.get_google_credentials())
107
+ else:
108
+ self.client_beta = vision_beta.ImageAnnotatorClient(credentials=self.get_google_credentials())
109
+ self.client = vision.ImageAnnotatorClient(credentials=self.get_google_credentials())
110
 
111
 
112
  def get_google_credentials(self):
vouchervision/model_maps.py CHANGED
@@ -1,5 +1,5 @@
1
  class ModelMaps:
2
- PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE = ["Version 1", "Version 1 PaLM 2"]
3
  COLORS_EXPENSE_REPORT = {
4
  'GPT_4': '#32CD32', # Lime Green
5
  'GPT_3_5': '#008000', # Green
@@ -41,49 +41,49 @@ class ModelMaps:
41
  }
42
 
43
  MODELS_OPENAI = [
44
- "GPT 4o 2024-05-13", #GPT_4o_2024_05_13
45
- "GPT 4o mini 2024-07-18",
46
- "GPT 4 Turbo 2024-04-09",#GPT_4_TURBO_2024_04_09
47
- "GPT 4",
48
- "GPT 4 32k",
49
- "GPT 4 Turbo 0125-preview",
50
- "GPT 4 Turbo 1106-preview",
51
- "GPT 3.5 Turbo",
52
- "GPT 3.5 Instruct",
53
  ]
54
 
55
 
56
  MODELS_OPENAI_AZURE = [
57
- "Azure GPT 4",
58
- # "Azure GPT 4 32k",
59
- # "Azure GPT 4 Turbo 0125-preview",
60
- # "Azure GPT 4 Turbo 1106-preview",
61
- # "Azure GPT 3.5 Turbo",
62
- # "Azure GPT 3.5 Instruct",
63
  ]
64
 
65
  MODELS_GOOGLE = [
66
- # "PaLM 2 text-bison@001",
67
- "PaLM 2 text-bison@002",
68
- "PaLM 2 text-unicorn@001",
69
- "Gemini 1.0 Pro ",
70
- "Gemini 1.5 Flash",
71
- "Gemini 1.5 Pro",
72
  ]
73
 
74
- MODELS_MISTRAL = ["Mistral Small",
75
- "Mistral Medium",
76
- "Mistral Large",
77
- "Open Mixtral 8x7B",
78
- "Open Mistral 7B",
79
  ]
80
 
81
- MODELS_LOCAL = ["LOCAL Mixtral 8x7B Instruct v0.1",
82
- "LOCAL Mistral 7B Instruct v0.2",
83
- "LOCAL CPU Mistral 7B Instruct v0.2 GGUF",
84
  'phyloforfun/mistral-7b-instruct-v2-bnb-4bit__HLT_MICH_Angiospermae_SLTPvC_v1-0_medium_OCR-C25-L25-E50-R05']
85
 
86
- MODELS_GUI_DEFAULT = "Gemini 1.5 Flash" #"Azure GPT 4" # "GPT 4 Turbo 1106-preview"
87
 
88
  MODEL_FAMILY = {
89
  'OpenAI': MODELS_OPENAI,
@@ -95,9 +95,9 @@ class ModelMaps:
95
  version_mapping_cost = {
96
  'GPT 4 32k': 'GPT_4_32K',
97
  'GPT 4': 'GPT_4',
98
- "GPT 4o 2024-05-13": 'GPT_4o_2024_05_13',
99
- "GPT 4o mini 2024-07-18": 'GPT_4o_mini_2024_07_18',
100
- "GPT 4 Turbo 2024-04-09": 'GPT_4_TURBO_2024_04_09',
101
  'GPT 4 Turbo 0125-preview': 'GPT_4_TURBO_0125',
102
  'GPT 4 Turbo 1106-preview': 'GPT_4_TURBO_1106',
103
  'GPT 3.5 Instruct': 'GPT_3_5_INSTRUCT',
@@ -110,7 +110,7 @@ class ModelMaps:
110
  # 'Azure GPT 3.5 Instruct': 'AZURE_GPT_3_5_INSTRUCT',
111
  # 'Azure GPT 3.5 Turbo': 'AZURE_GPT_3_5',
112
 
113
- 'Gemini 1.0 Pro ': 'GEMINI_PRO',
114
  'Gemini 1.5 Flash': 'GEMINI_1_5_FLASH', # gemini-1.5-flash
115
  'Gemini 1.5 Pro': 'GEMINI_1_5_PRO', # gemini-1.5-pro
116
 
@@ -136,12 +136,12 @@ class ModelMaps:
136
  def get_version_has_key(cls, key, has_key_openai, has_key_azure_openai, has_key_google_application_credentials, has_key_mistral):
137
  # Define the mapping for 'has_key' values
138
  version_has_key = {
139
- "GPT 4 Turbo 2024-04-09": has_key_openai,
140
  'GPT 4 Turbo 1106-preview': has_key_openai,
141
  'GPT 4 Turbo 0125-preview': has_key_openai,
142
  'GPT 4': has_key_openai,
143
- "GPT 4o 2024-05-13": has_key_openai,
144
- "GPT 4o mini 2024-07-18": has_key_openai,
145
  'GPT 4 32k': has_key_openai,
146
  'GPT 3.5 Turbo': has_key_openai,
147
  'GPT 3.5 Instruct': has_key_openai,
@@ -156,9 +156,9 @@ class ModelMaps:
156
  # 'PaLM 2 text-bison@001': has_key_google_application_credentials,
157
  'PaLM 2 text-bison@002': has_key_google_application_credentials,
158
  'PaLM 2 text-unicorn@001': has_key_google_application_credentials,
159
- 'Gemini 1.0 Pro ': has_key_google_application_credentials,
160
  'Gemini 1.5 Flash': has_key_google_application_credentials,
161
- 'Gemini 1.5 Pro ': has_key_google_application_credentials,
162
 
163
  'Mistral Small': has_key_mistral,
164
  'Mistral Medium': has_key_mistral,
@@ -178,11 +178,11 @@ class ModelMaps:
178
  @classmethod
179
  def get_version_mapping_is_azure(cls, key):
180
  version_mapping_is_azure = {
181
- "GPT 4o 2024-05-13": False,
182
- "GPT 4o mini 2024-07-18": False,
183
- "GPT 4 Turbo 2024-04-09": False,
184
- "GPT 4 Turbo 1106-preview": False,
185
- "GPT 4 Turbo 0125-preview": False,
186
  'GPT 4': False,
187
  'GPT 4 32k': False,
188
  'GPT 3.5 Turbo': False,
@@ -198,9 +198,9 @@ class ModelMaps:
198
  # 'PaLM 2 text-bison@001': False,
199
  'PaLM 2 text-bison@002': False,
200
  'PaLM 2 text-unicorn@001': False,
201
- 'Gemini 1.0 Pro ': False,
202
  'Gemini 1.5 Flash': False,
203
- 'Gemini 1.5 Pro ': False,
204
 
205
  'Mistral Small': False,
206
  'Mistral Medium': False,
@@ -256,39 +256,39 @@ class ModelMaps:
256
  # return 'gpt-35-turbo-instruct'
257
 
258
  elif key == 'AZURE_GPT_4':
259
- return "gpt-4"
260
 
261
  # elif key == 'AZURE_GPT_4_TURBO_1106':
262
- # return "gpt-4-1106-preview"
263
 
264
  # elif key == 'AZURE_GPT_4_TURBO_0125':
265
  # return 'gpt-4-0125-preview'
266
 
267
  # elif key == 'AZURE_GPT_4_32K':
268
- # return "gpt-4-32k"
269
 
270
  ### Google
271
  # elif key == 'PALM2_TB_1':
272
- # return "text-bison@001"
273
 
274
  elif key == 'PALM2_TB_2':
275
- return "text-bison@002"
276
 
277
  elif key == 'PALM2_TU_1':
278
- return "text-unicorn@001"
279
 
280
  elif key == 'GEMINI_PRO':
281
- return "gemini-1.0-pro"
282
 
283
  elif key == 'GEMINI_1_5_FLASH':
284
- return "gemini-1.5-flash"
285
 
286
  elif key == 'GEMINI_1_5_PRO':
287
- return "gemini-1.5-pro"
288
 
289
  ### Mistral
290
  elif key == 'OPEN_MISTRAL_7B':
291
- return "open-mistral-7b"
292
 
293
  elif key == 'OPEN_MIXTRAL_8X7B':
294
  return 'open-mixtral-8x7b'
@@ -323,7 +323,7 @@ class ModelMaps:
323
 
324
 
325
  else:
326
- raise ValueError(f"Invalid model name {key}. See model_maps.py")
327
 
328
  @classmethod
329
  def get_models_gui_list(cls):
 
1
  class ModelMaps:
2
+ PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE = ['Version 1', 'Version 1 PaLM 2']
3
  COLORS_EXPENSE_REPORT = {
4
  'GPT_4': '#32CD32', # Lime Green
5
  'GPT_3_5': '#008000', # Green
 
41
  }
42
 
43
  MODELS_OPENAI = [
44
+ 'GPT 4o 2024-05-13', #GPT_4o_2024_05_13
45
+ 'GPT 4o mini 2024-07-18',
46
+ 'GPT 4 Turbo 2024-04-09',#GPT_4_TURBO_2024_04_09
47
+ 'GPT 4',
48
+ 'GPT 4 32k',
49
+ 'GPT 4 Turbo 0125-preview',
50
+ 'GPT 4 Turbo 1106-preview',
51
+ 'GPT 3.5 Turbo',
52
+ 'GPT 3.5 Instruct',
53
  ]
54
 
55
 
56
  MODELS_OPENAI_AZURE = [
57
+ 'Azure GPT 4',
58
+ # 'Azure GPT 4 32k',
59
+ # 'Azure GPT 4 Turbo 0125-preview',
60
+ # 'Azure GPT 4 Turbo 1106-preview',
61
+ # 'Azure GPT 3.5 Turbo',
62
+ # 'Azure GPT 3.5 Instruct',
63
  ]
64
 
65
  MODELS_GOOGLE = [
66
+ # 'PaLM 2 text-bison@001',
67
+ 'PaLM 2 text-bison@002',
68
+ 'PaLM 2 text-unicorn@001',
69
+ 'Gemini 1.0 Pro',
70
+ 'Gemini 1.5 Flash',
71
+ 'Gemini 1.5 Pro',
72
  ]
73
 
74
+ MODELS_MISTRAL = ['Mistral Small',
75
+ 'Mistral Medium',
76
+ 'Mistral Large',
77
+ 'Open Mixtral 8x7B',
78
+ 'Open Mistral 7B',
79
  ]
80
 
81
+ MODELS_LOCAL = ['LOCAL Mixtral 8x7B Instruct v0.1',
82
+ 'LOCAL Mistral 7B Instruct v0.2',
83
+ 'LOCAL CPU Mistral 7B Instruct v0.2 GGUF',
84
  'phyloforfun/mistral-7b-instruct-v2-bnb-4bit__HLT_MICH_Angiospermae_SLTPvC_v1-0_medium_OCR-C25-L25-E50-R05']
85
 
86
+ MODELS_GUI_DEFAULT = 'Gemini 1.5 Flash' #'Azure GPT 4' # 'GPT 4 Turbo 1106-preview'
87
 
88
  MODEL_FAMILY = {
89
  'OpenAI': MODELS_OPENAI,
 
95
  version_mapping_cost = {
96
  'GPT 4 32k': 'GPT_4_32K',
97
  'GPT 4': 'GPT_4',
98
+ 'GPT 4o 2024-05-13': 'GPT_4o_2024_05_13',
99
+ 'GPT 4o mini 2024-07-18': 'GPT_4o_mini_2024_07_18',
100
+ 'GPT 4 Turbo 2024-04-09': 'GPT_4_TURBO_2024_04_09',
101
  'GPT 4 Turbo 0125-preview': 'GPT_4_TURBO_0125',
102
  'GPT 4 Turbo 1106-preview': 'GPT_4_TURBO_1106',
103
  'GPT 3.5 Instruct': 'GPT_3_5_INSTRUCT',
 
110
  # 'Azure GPT 3.5 Instruct': 'AZURE_GPT_3_5_INSTRUCT',
111
  # 'Azure GPT 3.5 Turbo': 'AZURE_GPT_3_5',
112
 
113
+ 'Gemini 1.0 Pro': 'GEMINI_PRO',
114
  'Gemini 1.5 Flash': 'GEMINI_1_5_FLASH', # gemini-1.5-flash
115
  'Gemini 1.5 Pro': 'GEMINI_1_5_PRO', # gemini-1.5-pro
116
 
 
136
  def get_version_has_key(cls, key, has_key_openai, has_key_azure_openai, has_key_google_application_credentials, has_key_mistral):
137
  # Define the mapping for 'has_key' values
138
  version_has_key = {
139
+ 'GPT 4 Turbo 2024-04-09': has_key_openai,
140
  'GPT 4 Turbo 1106-preview': has_key_openai,
141
  'GPT 4 Turbo 0125-preview': has_key_openai,
142
  'GPT 4': has_key_openai,
143
+ 'GPT 4o 2024-05-13': has_key_openai,
144
+ 'GPT 4o mini 2024-07-18': has_key_openai,
145
  'GPT 4 32k': has_key_openai,
146
  'GPT 3.5 Turbo': has_key_openai,
147
  'GPT 3.5 Instruct': has_key_openai,
 
156
  # 'PaLM 2 text-bison@001': has_key_google_application_credentials,
157
  'PaLM 2 text-bison@002': has_key_google_application_credentials,
158
  'PaLM 2 text-unicorn@001': has_key_google_application_credentials,
159
+ 'Gemini 1.0 Pro': has_key_google_application_credentials,
160
  'Gemini 1.5 Flash': has_key_google_application_credentials,
161
+ 'Gemini 1.5 Pro': has_key_google_application_credentials,
162
 
163
  'Mistral Small': has_key_mistral,
164
  'Mistral Medium': has_key_mistral,
 
178
  @classmethod
179
  def get_version_mapping_is_azure(cls, key):
180
  version_mapping_is_azure = {
181
+ 'GPT 4o 2024-05-13': False,
182
+ 'GPT 4o mini 2024-07-18': False,
183
+ 'GPT 4 Turbo 2024-04-09': False,
184
+ 'GPT 4 Turbo 1106-preview': False,
185
+ 'GPT 4 Turbo 0125-preview': False,
186
  'GPT 4': False,
187
  'GPT 4 32k': False,
188
  'GPT 3.5 Turbo': False,
 
198
  # 'PaLM 2 text-bison@001': False,
199
  'PaLM 2 text-bison@002': False,
200
  'PaLM 2 text-unicorn@001': False,
201
+ 'Gemini 1.0 Pro': False,
202
  'Gemini 1.5 Flash': False,
203
+ 'Gemini 1.5 Pro': False,
204
 
205
  'Mistral Small': False,
206
  'Mistral Medium': False,
 
256
  # return 'gpt-35-turbo-instruct'
257
 
258
  elif key == 'AZURE_GPT_4':
259
+ return 'gpt-4'
260
 
261
  # elif key == 'AZURE_GPT_4_TURBO_1106':
262
+ # return 'gpt-4-1106-preview'
263
 
264
  # elif key == 'AZURE_GPT_4_TURBO_0125':
265
  # return 'gpt-4-0125-preview'
266
 
267
  # elif key == 'AZURE_GPT_4_32K':
268
+ # return 'gpt-4-32k'
269
 
270
  ### Google
271
  # elif key == 'PALM2_TB_1':
272
+ # return 'text-bison@001'
273
 
274
  elif key == 'PALM2_TB_2':
275
+ return 'text-bison@002'
276
 
277
  elif key == 'PALM2_TU_1':
278
+ return 'text-unicorn@001'
279
 
280
  elif key == 'GEMINI_PRO':
281
+ return 'gemini-1.0-pro'
282
 
283
  elif key == 'GEMINI_1_5_FLASH':
284
+ return 'gemini-1.5-flash'
285
 
286
  elif key == 'GEMINI_1_5_PRO':
287
+ return 'gemini-1.5-pro'
288
 
289
  ### Mistral
290
  elif key == 'OPEN_MISTRAL_7B':
291
+ return 'open-mistral-7b'
292
 
293
  elif key == 'OPEN_MIXTRAL_8X7B':
294
  return 'open-mixtral-8x7b'
 
323
 
324
 
325
  else:
326
+ raise ValueError(f'Invalid model name {key}. See model_maps.py')
327
 
328
  @classmethod
329
  def get_models_gui_list(cls):
vouchervision/utils_hf.py CHANGED
@@ -68,57 +68,106 @@ def save_uploaded_file_local(directory_in, directory_out, img_file_name, image=N
68
  except:
69
  pass
70
 
71
-
72
- def save_uploaded_file(directory, img_file, image=None):
73
  if not os.path.exists(directory):
74
  os.makedirs(directory)
75
 
76
- full_path = os.path.join(directory, img_file.name) ########## TODO THIS MUST BE MOVED TO conditional specific location
77
 
78
- # Assuming the uploaded file is an image
79
- if img_file.name.lower().endswith('.pdf'):
80
- with open(full_path, 'wb') as out_file:
81
- # If img_file is a file-like object (e.g., Django's UploadedFile),
82
- # you can use copyfileobj or read chunks.
83
- # If it's a path, you'd need to open and then save it.
84
- if hasattr(img_file, 'read'):
85
- # This is a file-like object
86
- copyfileobj(img_file, out_file)
87
- else:
88
- # If img_file is a path string
89
- with open(img_file, 'rb') as fd:
90
- copyfileobj(fd, out_file)
 
91
  return full_path
 
 
 
92
  else:
 
93
  if image is None:
94
  try:
95
- with Image.open(img_file) as image:
96
- full_path = os.path.join(directory, img_file.name)
97
  image.save(full_path, "JPEG")
98
- # Return the full path of the saved image
99
- return full_path
100
- except:
101
- try:
102
- with Image.open(os.path.join(directory,img_file)) as image:
103
- full_path = os.path.join(directory, img_file)
104
- image.save(full_path, "JPEG")
105
- # Return the full path of the saved image
106
- return full_path
107
- except:
108
- with Image.open(img_file.name) as image:
109
- full_path = os.path.join(directory, img_file.name)
110
- image.save(full_path, "JPEG")
111
- # Return the full path of the saved image
112
- return full_path
113
  else:
114
  try:
115
- full_path = os.path.join(directory, img_file.name)
116
- image.save(full_path, "JPEG")
117
- return full_path
118
- except:
119
- full_path = os.path.join(directory, img_file)
120
  image.save(full_path, "JPEG")
121
- return full_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  # def save_uploaded_file(directory, uploaded_file, image=None):
123
  # if not os.path.exists(directory):
124
  # os.makedirs(directory)
 
68
  except:
69
  pass
70
 
71
+ def save_uploaded_file(directory, uploaded_file, image=None):
 
72
  if not os.path.exists(directory):
73
  os.makedirs(directory)
74
 
75
+ full_path = os.path.join(directory, uploaded_file.name)
76
 
77
+ # Handle PDF and Image files differently
78
+ if uploaded_file.name.lower().endswith('.pdf'):
79
+ # Save PDF file
80
+ try:
81
+ with open(full_path, 'wb') as out_file:
82
+ if hasattr(uploaded_file, 'read'):
83
+ # This is a file-like object
84
+ out_file.write(uploaded_file.read())
85
+ else:
86
+ # If uploaded_file is a path string
87
+ with open(uploaded_file, 'rb') as fd:
88
+ out_file.write(fd.read())
89
+ if os.path.getsize(full_path) == 0:
90
+ raise ValueError(f"The file {uploaded_file.name} is empty.")
91
  return full_path
92
+ except Exception as e:
93
+ st.error(f"Failed to save PDF file {uploaded_file.name}. Error: {e}")
94
+ return None
95
  else:
96
+ # Handle image files
97
  if image is None:
98
  try:
99
+ with Image.open(uploaded_file) as image:
 
100
  image.save(full_path, "JPEG")
101
+ except Exception as e:
102
+ st.error(f"Failed to save image file {uploaded_file.name}. Error: {e}")
103
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
104
  else:
105
  try:
 
 
 
 
 
106
  image.save(full_path, "JPEG")
107
+ except Exception as e:
108
+ st.error(f"Failed to save processed image file {uploaded_file.name}. Error: {e}")
109
+ return None
110
+
111
+ if os.path.getsize(full_path) == 0:
112
+ st.error(f"The image file {uploaded_file.name} is empty.")
113
+ return None
114
+
115
+ return full_path
116
+
117
+
118
+ # def save_uploaded_file(directory, img_file, image=None): # not working with pdfs
119
+ # if not os.path.exists(directory):
120
+ # os.makedirs(directory)
121
+
122
+ # full_path = os.path.join(directory, img_file.name) ########## TODO THIS MUST BE MOVED TO conditional specific location
123
+
124
+ # # Assuming the uploaded file is an image
125
+ # if img_file.name.lower().endswith('.pdf'):
126
+ # with open(full_path, 'wb') as out_file:
127
+ # # If img_file is a file-like object (e.g., Django's UploadedFile),
128
+ # # you can use copyfileobj or read chunks.
129
+ # # If it's a path, you'd need to open and then save it.
130
+ # if hasattr(img_file, 'read'):
131
+ # # This is a file-like object
132
+ # copyfileobj(img_file, out_file)
133
+ # else:
134
+ # # If img_file is a path string
135
+ # with open(img_file, 'rb') as fd:
136
+ # copyfileobj(fd, out_file)
137
+ # return full_path
138
+ # else:
139
+ # if image is None:
140
+ # try:
141
+ # with Image.open(img_file) as image:
142
+ # full_path = os.path.join(directory, img_file.name)
143
+ # image.save(full_path, "JPEG")
144
+ # # Return the full path of the saved image
145
+ # return full_path
146
+ # except:
147
+ # try:
148
+ # with Image.open(os.path.join(directory,img_file)) as image:
149
+ # full_path = os.path.join(directory, img_file)
150
+ # image.save(full_path, "JPEG")
151
+ # # Return the full path of the saved image
152
+ # return full_path
153
+ # except:
154
+ # with Image.open(img_file.name) as image:
155
+ # full_path = os.path.join(directory, img_file.name)
156
+ # image.save(full_path, "JPEG")
157
+ # # Return the full path of the saved image
158
+ # return full_path
159
+ # else:
160
+ # try:
161
+ # full_path = os.path.join(directory, img_file.name)
162
+ # image.save(full_path, "JPEG")
163
+ # return full_path
164
+ # except:
165
+ # full_path = os.path.join(directory, img_file)
166
+ # image.save(full_path, "JPEG")
167
+ # return full_path
168
+
169
+
170
+
171
  # def save_uploaded_file(directory, uploaded_file, image=None):
172
  # if not os.path.exists(directory):
173
  # os.makedirs(directory)