Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -272,8 +272,7 @@ def img_to_bytes(image_path):
|
|
272 |
|
273 |
# Format user prompt with image history and system conditioning
|
274 |
def format_user_prompt_with_im_history_and_system_conditioning(
|
275 |
-
user_prompt, chat_history
|
276 |
-
) -> List[Dict[str, Union[List, str]]]:
|
277 |
"""
|
278 |
Produce the resulting list that needs to go inside the processor. It handles the potential image(s), the history, and the system conditioning.
|
279 |
"""
|
@@ -287,7 +286,7 @@ def format_user_prompt_with_im_history_and_system_conditioning(
|
|
287 |
# Format history
|
288 |
for turn in chat_history:
|
289 |
if not resulting_messages or (
|
290 |
-
|
291 |
):
|
292 |
resulting_messages.append(
|
293 |
{
|
@@ -422,13 +421,14 @@ def search(term, num_results=3, lang="en", advanced=True, sleep_interval=0, time
|
|
422 |
def format_prompt(user_prompt, chat_history):
|
423 |
prompt = "<s>"
|
424 |
for item in chat_history:
|
425 |
-
|
426 |
-
|
427 |
-
prompt += f" {item[
|
428 |
-
|
429 |
-
|
430 |
-
else:
|
431 |
-
|
|
|
432 |
prompt += f"[INST] {user_prompt} [/INST]"
|
433 |
return prompt
|
434 |
|
@@ -455,7 +455,7 @@ def model_inference(
|
|
455 |
web_results = search(user_prompt["text"])
|
456 |
web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
|
457 |
# Load the language model
|
458 |
-
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.
|
459 |
generate_kwargs = dict(
|
460 |
max_new_tokens=4000,
|
461 |
do_sample=True,
|
|
|
272 |
|
273 |
# Format user prompt with image history and system conditioning
|
274 |
def format_user_prompt_with_im_history_and_system_conditioning(
|
275 |
+
user_prompt, chat_history) -> List[Dict[str, Union[List, str]]]:
|
|
|
276 |
"""
|
277 |
Produce the resulting list that needs to go inside the processor. It handles the potential image(s), the history, and the system conditioning.
|
278 |
"""
|
|
|
286 |
# Format history
|
287 |
for turn in chat_history:
|
288 |
if not resulting_messages or (
|
289 |
+
resulting_messages and resulting_messages[-1]["role"] != "user"
|
290 |
):
|
291 |
resulting_messages.append(
|
292 |
{
|
|
|
421 |
def format_prompt(user_prompt, chat_history):
|
422 |
prompt = "<s>"
|
423 |
for item in chat_history:
|
424 |
+
# Check if the item is a tuple (text response)
|
425 |
+
if isinstance(item, tuple):
|
426 |
+
prompt += f"[INST] {item[0]} [/INST]" # User prompt
|
427 |
+
prompt += f" {item[1]}</s> " # Bot response
|
428 |
+
# Otherwise, assume it's related to an image - you might need to adjust this logic
|
429 |
+
else:
|
430 |
+
# Handle image representation in the prompt, e.g., add a placeholder
|
431 |
+
prompt += f" [Image] "
|
432 |
prompt += f"[INST] {user_prompt} [/INST]"
|
433 |
return prompt
|
434 |
|
|
|
455 |
web_results = search(user_prompt["text"])
|
456 |
web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
|
457 |
# Load the language model
|
458 |
+
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
459 |
generate_kwargs = dict(
|
460 |
max_new_tokens=4000,
|
461 |
do_sample=True,
|