akhaliq HF staff commited on
Commit
2c271aa
1 Parent(s): 3807c9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -11
app.py CHANGED
@@ -40,21 +40,13 @@ def generate_gradio_app(api_key, image):
40
 
41
  Please generate the entire Gradio code based on the provided image."""
42
 
43
- # Make the API call
44
  response = client.chat.completions.create(
45
  model="meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
46
  messages=[
47
  {
48
  "role": "user",
49
- "content": [
50
- {"type": "text", "text": prompt},
51
- {
52
- "type": "image_url",
53
- "image_url": {
54
- "url": f"data:image/png;base64,{img_str}",
55
- },
56
- },
57
- ],
58
  }
59
  ],
60
  max_tokens=4096,
@@ -63,7 +55,7 @@ Please generate the entire Gradio code based on the provided image."""
63
  top_k=50,
64
  repetition_penalty=1,
65
  stop=["<|eot_id|>", "<|eom_id|>"],
66
- stream=False # Changed to False for easier debugging
67
  )
68
 
69
  # Debug: Print the entire response
 
40
 
41
  Please generate the entire Gradio code based on the provided image."""
42
 
43
+ # Make the API call with the corrected message format
44
  response = client.chat.completions.create(
45
  model="meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
46
  messages=[
47
  {
48
  "role": "user",
49
+ "content": prompt + f"\n\n"
 
 
 
 
 
 
 
 
50
  }
51
  ],
52
  max_tokens=4096,
 
55
  top_k=50,
56
  repetition_penalty=1,
57
  stop=["<|eot_id|>", "<|eom_id|>"],
58
+ stream=False
59
  )
60
 
61
  # Debug: Print the entire response