Spaces:
Running
Running
Upload 5 files
Browse files- dockerfile +24 -0
- main.py +29 -0
- prompts.py +21 -0
- requirements.txt +6 -0
- utils.py +14 -0
dockerfile
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use the official Python image
|
2 |
+
FROM python:3.9-slim
|
3 |
+
|
4 |
+
# Set the working directory in the container
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
# Install system dependencies
|
8 |
+
RUN apt-get update && apt-get install -y \
|
9 |
+
git \
|
10 |
+
&& apt-get clean
|
11 |
+
|
12 |
+
# Install Python dependencies
|
13 |
+
COPY requirements.txt .
|
14 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
15 |
+
|
16 |
+
|
17 |
+
# Copy the application code
|
18 |
+
COPY . .
|
19 |
+
|
20 |
+
# Expose the application port
|
21 |
+
EXPOSE 8000
|
22 |
+
|
23 |
+
# Run the application
|
24 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
|
main.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# main.py
|
2 |
+
|
3 |
+
from fastapi import FastAPI, File, UploadFile
|
4 |
+
from transformers import pipeline
|
5 |
+
from PIL import Image
|
6 |
+
import io
|
7 |
+
|
8 |
+
from prompts import generate_health_feedback_prompt
|
9 |
+
from utils import send_prompt_to_llm
|
10 |
+
|
11 |
+
app = FastAPI()
|
12 |
+
|
13 |
+
pipe = pipeline("image-classification", model="nateraw/food")
|
14 |
+
|
15 |
+
@app.post("/classify/")
|
16 |
+
async def classify_image(file: UploadFile = File(...)):
|
17 |
+
# Read the uploaded image
|
18 |
+
image_bytes = await file.read()
|
19 |
+
image = Image.open(io.BytesIO(image_bytes))
|
20 |
+
|
21 |
+
# Classify the image to get ingredients
|
22 |
+
result = pipe(image)
|
23 |
+
ingredients = [res['label'] for res in result]
|
24 |
+
|
25 |
+
# Generate prompt and get feedback from the LLM
|
26 |
+
prompt = generate_health_feedback_prompt(ingredients)
|
27 |
+
health_feedback = send_prompt_to_llm(prompt)
|
28 |
+
|
29 |
+
return {"ingredients": ingredients, "health_feedback": health_feedback}
|
prompts.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# prompts.py
|
2 |
+
|
3 |
+
json_structure = {
|
4 |
+
"meal": {
|
5 |
+
"ingredients": ["list of ingredients here"]
|
6 |
+
},
|
7 |
+
"response_format": {
|
8 |
+
"carbs": "numeric value in grams",
|
9 |
+
"fats": "numeric value in grams",
|
10 |
+
"proteins": "numeric value in grams",
|
11 |
+
"fibers": "numeric value in grams",
|
12 |
+
"feedback": "One-sentence feedback about the healthiness of the meal and suggestions for improvement"
|
13 |
+
}
|
14 |
+
}
|
15 |
+
|
16 |
+
def generate_health_feedback_prompt(ingredients):
|
17 |
+
return f"""
|
18 |
+
Given this list of ingredients: {ingredients}, estimate the macronutrient content and return only the values as a JSON object.
|
19 |
+
Specify the amount of carbs, protein, fat, and fiber in grams, and include a one-sentence feedback on the meal healthiness.
|
20 |
+
The response should strictly follow this JSON format: {json_structure}. Please respond with only the JSON object.
|
21 |
+
"""
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi==0.74.*
|
2 |
+
requests==2.27.*
|
3 |
+
uvicorn[standard]==0.17.*
|
4 |
+
sentencepiece==0.1.*
|
5 |
+
torch==1.11.*
|
6 |
+
transformers==4.*
|
utils.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# utils.py
|
2 |
+
|
3 |
+
import requests
|
4 |
+
|
5 |
+
def send_prompt_to_llm(prompt, model="llama3", endpoint="http://localhost:11434/v1/chat/completions"):
|
6 |
+
response = requests.post(
|
7 |
+
endpoint,
|
8 |
+
json={
|
9 |
+
"model": model,
|
10 |
+
"messages": [{"role": "user", "content": prompt}],
|
11 |
+
},
|
12 |
+
)
|
13 |
+
response.raise_for_status() # Raise an error for failed requests
|
14 |
+
return response.json()["choices"][0]["message"]["content"]
|