hosseinhimself's picture
Update app.py
9aa7704 verified
raw
history blame
972 Bytes
from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline
import gradio as gr
model = "hosseinhimself/tara-roberta-base-fa-qa"
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model)
model = AutoModelForQuestionAnswering.from_pretrained(model)
# Create a QA pipeline
qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer)
def answer_question(context, question):
response = qa_pipeline(question=question, context=context)
return response['answer']
# Define the Gradio interface
interface = gr.Interface(
fn=answer_question,
inputs=[
gr.Textbox(lines=10, placeholder="Enter context here..."),
gr.Textbox(lines=1, placeholder="Enter question here...")
],
outputs=gr.Markdown(label="Result"),
title="Tara Question Answering Model",
description="This model answers questions based on the provided context."
)
# Launch the interface
interface.launch()