Lean0.1 / app.py
Yiyaozz's picture
Update app.py
443c3d1
from transformers import pipeline
import gradio as gr
# Initialize the pipeline with your specific model and specify the max_length
chatbot = pipeline(model="morph-labs/morph-prover-v0-7b",
max_length=512, # Increase this as needed
)
def vanilla_chatbot(message):
try:
# Format the message with the instructional tokens
formatted_message = f"[INST] <<SYS>>\nYou are a helpful assistant.\n<</SYS>>\n\n{message}[/INST]"
# Generate the model's response with a specified max_new_tokens
output = chatbot(formatted_message, max_new_tokens=500) # Adjust max_new_tokens as needed
# Extract the model's response from the output
response = output[0] if output else "No response generated."
# Return the response to be displayed in the Gradio interface
return response
except Exception as e:
# Log the exception message
print(f"An unexpected error occurred: {e}")
# Return a generic error message
return "An unexpected error occurred."
# Create a Gradio interface
demo_chatbot = gr.Interface(
fn=vanilla_chatbot,
inputs="text",
outputs="text",
title="Vanilla Chatbot",
description="Enter text to start chatting."
)
# Launch the Gradio app
demo_chatbot.launch()