File size: 3,282 Bytes
1d52b89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8f4648b
 
 
 
 
 
1d52b89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f12d387
1d52b89
 
 
 
 
 
 
 
 
 
 
 
 
 
da203a4
1d52b89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
da203a4
8f4648b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import chainlit as cl
from openai import OpenAI
from langsmith.run_helpers import traceable
from langsmith_config import setup_langsmith_config
import base64
import os

os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
model = "gpt-4-1106-preview"
model_vision = "gpt-4-vision-preview"
setup_langsmith_config()
    
def process_images(msg: cl.Message):
    # Processing images exclusively
    images = [file for file in msg.elements if "image" in file.mime]

    # Accessing the bytes of a specific image
    image_bytes = images[0].content # take the first image just for demo purposes
    
    # we need base64 encoded image
    image_base64 = base64.b64encode(image_bytes).decode('utf-8')
    return image_base64

async def process_stream(stream, msg: cl.Message):
    for part in stream:
            if token := part.choices[0].delta.content or "":
                await msg.stream_token(token)

def handle_vision_call(msg, image_history):
    image_base64 = None
    image_base64 = process_images(msg)
    
    if image_base64:
        # add the image to the image history
        image_history.append(
        {
            "role": "user",
            "content": [
                    {"type": "text", "text": msg.content},
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{image_base64}"
                        }
                    },
                ],
            }
        )
        stream = gpt_vision_call(image_history)
        return stream

@traceable(run_type="llm", name="gpt 4 turbo call")
async def gpt_call(message_history: list = []):
    client = OpenAI()

    stream = client.chat.completions.create(
        model=model,
        messages=message_history,
        stream=True,
    )
    
    return stream

@traceable(run_type="llm", name="gpt 4 turbo vision call")
def gpt_vision_call(image_history: list = []):
    client = OpenAI()
  
    stream = client.chat.completions.create(
        model=model_vision,
        messages=image_history,
        max_tokens=300,
        stream=True,
    )

    return stream

@cl.on_chat_start
def start_chat():
    cl.user_session.set(
        "message_history",
        [{"role": "system", "content": "You are a helpful assistant."}],
    )
    cl.user_session.set("image_history", [{"role": "system", "content": "You are a helpful assistant."}])

@cl.on_message
@traceable(run_type="chain", name="gpt 4 turbo")
async def on_message(msg: cl.Message):
    message_history = cl.user_session.get("message_history")
    image_history = cl.user_session.get("image_history")
    
    stream_msg = cl.Message(content="") 
    stream = None

    if msg.elements:
        stream = handle_vision_call(msg, image_history)

    else:
        # add the message in both to keep the coherence between the two histories
        message_history.append({"role": "user", "content": msg.content})
        image_history.append({"role": "user", "content": msg.content})
        
        stream = await gpt_call(message_history)
    
    if stream:
        await process_stream(stream, msg=stream_msg)
        message_history.append({"role": "system", "content": stream_msg.content})

    return stream_msg.content