SafeChat / app.py
jeremierostan's picture
Update app.py
0f25a8a verified
raw
history blame
No virus
3.42 kB
import os
import asyncio
import gradio as gr
from groq import AsyncGroq
GROQ_API_KEY = os.getenv('GROQ_API_KEY')
client = AsyncGroq(api_key=GROQ_API_KEY)
chat_model = "lama-3.1-70b-versatile"
moderation_model = "llama-guard-3-8b"
system_prompt = """You are a helpful, respectful assistant engaging in a conversation with students about social issues.
Your goal is to help the student explore social issues and relfect on their causes, consequences, and potential solutions."""
async def moderate_message(message):
"""Moderate the user message using Llama Guard 3."""
response = await client.chat.completions.create(
model=moderation_model,
messages=[{
"role": "user",
"content": message
}],
temperature=0.1,
max_tokens=10,
)
print(response.choices[0].message.content)
return response.choices[0].message.content.strip().lower() == "safe"
async def chat_response(message, history):
"""Generate a chat response, including moderation."""
is_safe = await moderate_message(message)
if not is_safe:
yield "I apologize, but I can't respond to that type of message. Let's keep our conversation respectful and appropriate."
return
messages = [
{
"role": "system",
"content": system_prompt
},
] + [{
"role": "user" if i % 2 == 0 else "assistant",
"content": msg
} for i, msg in enumerate(history + [message])]
stream = await client.chat.completions.create(
model=chat_model,
messages=messages,
temperature=0.7,
max_tokens=1024,
top_p=1,
stream=True,
)
response_content = ""
async for chunk in stream:
content = chunk.choices[0].delta.content
if content:
response_content += content
yield response_content
async def user(message, history):
"""Process user input and update history."""
return "", history + [[message, None]]
async def bot(history):
"""Process bot response and update history."""
user_message = history[-1][0]
async for response in chat_response(user_message, history[:-1]):
history[-1][1] = response
yield history
with gr.Blocks(theme=gr.themes.Soft(), fill_height=True, head=js) as demo:
gr.Markdown("# SafeChat: Moderated AI Conversation")
gr.Markdown(
"This chat interface uses Llama Guard 3 to moderate messages and ensure safe interactions."
)
chatbot = gr.Chatbot(height=450, show_label=False)
message = gr.Textbox(placeholder="Type your message here...",
label="User Input",
show_label=False,
container=False)
submit = gr.Button("Send", variant="primary")
clear = gr.Button("Clear")
submit.click(user, [message, chatbot], [message, chatbot],
queue=False).then(bot, chatbot, chatbot)
message.submit(user, [message, chatbot], [message, chatbot],
queue=False).then(bot, chatbot, chatbot)
clear.click(lambda: None, None, chatbot, queue=False)
gr.Examples(examples=[
"Why are there poor and rich people in society?",
"Does race really matter in modern societies?",
"What is feminism?"
],
inputs=message)
if __name__ == "__main__":
demo.queue()
demo.launch()