Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import
|
|
|
3 |
import torch
|
4 |
from PIL import Image
|
5 |
import io
|
@@ -8,7 +9,7 @@ import io
|
|
8 |
def load_model():
|
9 |
model_name = "Qwen/Qwen2-VL-7B-Instruct"
|
10 |
try:
|
11 |
-
tokenizer =
|
12 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True)
|
13 |
return tokenizer, model
|
14 |
except Exception as e:
|
@@ -22,7 +23,7 @@ def generate_response(prompt, image, tokenizer, model):
|
|
22 |
try:
|
23 |
if image:
|
24 |
image = Image.open(image).convert('RGB')
|
25 |
-
inputs = tokenizer
|
26 |
else:
|
27 |
inputs = tokenizer(prompt, return_tensors='pt').to(model.device)
|
28 |
|
@@ -40,6 +41,8 @@ tokenizer, model = load_model()
|
|
40 |
|
41 |
if tokenizer is None or model is None:
|
42 |
st.warning("Модель не загружена. Приложение может работать некорректно.")
|
|
|
|
|
43 |
|
44 |
if "messages" not in st.session_state:
|
45 |
st.session_state.messages = []
|
@@ -67,7 +70,8 @@ if prompt or uploaded_file:
|
|
67 |
st.markdown(prompt)
|
68 |
|
69 |
with st.chat_message("assistant"):
|
70 |
-
|
|
|
71 |
st.markdown(response)
|
72 |
|
73 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import AutoModelForCausalLM
|
3 |
+
from transformers.models.qwen2.tokenization_qwen2 import Qwen2Tokenizer
|
4 |
import torch
|
5 |
from PIL import Image
|
6 |
import io
|
|
|
9 |
def load_model():
|
10 |
model_name = "Qwen/Qwen2-VL-7B-Instruct"
|
11 |
try:
|
12 |
+
tokenizer = Qwen2Tokenizer.from_pretrained(model_name, trust_remote_code=True)
|
13 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True)
|
14 |
return tokenizer, model
|
15 |
except Exception as e:
|
|
|
23 |
try:
|
24 |
if image:
|
25 |
image = Image.open(image).convert('RGB')
|
26 |
+
inputs = tokenizer(prompt, images=[image], return_tensors='pt').to(model.device)
|
27 |
else:
|
28 |
inputs = tokenizer(prompt, return_tensors='pt').to(model.device)
|
29 |
|
|
|
41 |
|
42 |
if tokenizer is None or model is None:
|
43 |
st.warning("Модель не загружена. Приложение может работать некорректно.")
|
44 |
+
else:
|
45 |
+
st.success("Модель успешно загружена!")
|
46 |
|
47 |
if "messages" not in st.session_state:
|
48 |
st.session_state.messages = []
|
|
|
70 |
st.markdown(prompt)
|
71 |
|
72 |
with st.chat_message("assistant"):
|
73 |
+
with st.spinner("Генерация ответа..."):
|
74 |
+
response = generate_response(prompt, uploaded_file, tokenizer, model)
|
75 |
st.markdown(response)
|
76 |
|
77 |
st.session_state.messages.append({"role": "assistant", "content": response})
|