AdrienB134 commited on
Commit
affda77
1 Parent(s): 8fb0879
Files changed (1) hide show
  1. app.py +7 -3
app.py CHANGED
@@ -28,7 +28,7 @@ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENT
28
 
29
  @spaces.GPU
30
  def model_inference(
31
- images, text, assistant_prefix= "Réfléchis step by step. Répond en faisant de belles phrases", decoding_strategy = "Greedy", temperature= 0.4, max_new_tokens=512,
32
  repetition_penalty=1.2, top_p=0.8
33
  ):
34
  ## Load idefics
@@ -199,7 +199,11 @@ def index_gpu(images, ds):
199
 
200
 
201
  def get_example():
202
- return [[["RAPPORT_DEVELOPPEMENT_DURABLE_2019.pdf"], "Quels sont les axes de développement du service achats?"]]
 
 
 
 
203
 
204
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
205
  gr.Markdown("# ColPali + Idefics3: Efficient Document Retrieval with Vision Language Models 📚")
@@ -234,7 +238,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
234
  search_button.click(search, inputs=[query, embeds, imgs, k], outputs=[output_gallery])
235
 
236
  answer_button = gr.Button("Answer", variant="primary")
237
- output = gr.Textbox(label="Output")
238
  answer_button.click(model_inference, inputs=[output_gallery, query], outputs=output)
239
 
240
  if __name__ == "__main__":
 
28
 
29
  @spaces.GPU
30
  def model_inference(
31
+ images, text, assistant_prefix= "Réfléchis step by step. Répond en faisant de belles phrases uniquement avec les informations du document fourni.", decoding_strategy = "Greedy", temperature= 0.4, max_new_tokens=512,
32
  repetition_penalty=1.2, top_p=0.8
33
  ):
34
  ## Load idefics
 
199
 
200
 
201
  def get_example():
202
+ return [
203
+ [["RAPPORT_DEVELOPPEMENT_DURABLE_2019.pdf"], "Quels sont les 4 axes majeurs des achats?"],
204
+ [["RAPPORT_DEVELOPPEMENT_DURABLE_2019.pdf"], "Quelles sont les actions entreprise en Afrique du Sud?"],
205
+ [["RAPPORT_DEVELOPPEMENT_DURABLE_2019.pdf"], "fais moi un tableau sur la répartition homme femme"],
206
+ ]
207
 
208
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
209
  gr.Markdown("# ColPali + Idefics3: Efficient Document Retrieval with Vision Language Models 📚")
 
238
  search_button.click(search, inputs=[query, embeds, imgs, k], outputs=[output_gallery])
239
 
240
  answer_button = gr.Button("Answer", variant="primary")
241
+ output = gr.Markdown(label="Output")
242
  answer_button.click(model_inference, inputs=[output_gallery, query], outputs=output)
243
 
244
  if __name__ == "__main__":