|
from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline |
|
import gradio as gr |
|
|
|
model = "hosseinhimself/tara-roberta-base-fa-qa" |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model) |
|
model = AutoModelForQuestionAnswering.from_pretrained(model) |
|
|
|
|
|
qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer) |
|
|
|
|
|
def answer_question(context, question): |
|
response = qa_pipeline(question=question, context=context) |
|
return response['answer'] |
|
|
|
|
|
interface = gr.Interface( |
|
fn=answer_question, |
|
inputs=[ |
|
gr.inputs.Textbox(lines=10, placeholder="Enter context here..."), |
|
gr.inputs.Textbox(lines=1, placeholder="Enter question here...") |
|
], |
|
outputs="text", |
|
title="Tara Question Answering Model", |
|
description="This model answers questions based on the provided context." |
|
) |
|
|
|
|
|
interface.launch() |
|
|