File size: 965 Bytes
2d64bbc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline
import gradio as gr

model = "hosseinhimself/tara-roberta-base-fa-qa"

# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model)
model = AutoModelForQuestionAnswering.from_pretrained(model)

# Create a QA pipeline
qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer)


def answer_question(context, question):
    response = qa_pipeline(question=question, context=context) 
    return response['answer']

# Define the Gradio interface
interface = gr.Interface(
    fn=answer_question,
    inputs=[
        gr.inputs.Textbox(lines=10, placeholder="Enter context here..."),
        gr.inputs.Textbox(lines=1, placeholder="Enter question here...")
    ],
    outputs="text",
    title="Tara Question Answering Model",
    description="This model answers questions based on the provided context."
)

# Launch the interface
interface.launch()