ns-gradio-apps / app.py
nsethi610's picture
created shareable link
1f422ed verified
import gradio as gr
from pipeline_utils import task_dropdown_choices, handle_task_change, review_training_choices, test_pipeline
playground = gr.Blocks()
def create_playground_header():
gr.Markdown("""
# 🤗 Hugging Face Playground
**Try your ideas here. Select from Text, Image or Audio**
""")
def create_playground_footer():
gr.Markdown("""
### To Learn More about 🤗 Hugging Face,[Click Here](https://huggingface.co/docs)
### [Click Here](https://huggingface.co/spaces/nsethi610/ns-gradio-apps/discussions/1) to provide Feedback, or participate in development of this tool.Let's make AI easy for everyone.
""")
def create_tabs_header():
with gr.Row():
with gr.Column(scale=4):
radio = gr.Radio(
["Use Pipeline", "Fine Tune"],
label="Select Use Pipeline to try out HF models or Fine Tune to test it on your own datasets",
value="Use Pipeline",
interactive=True,
)
with gr.Column(scale=1):
test_pipeline_button = gr.Button(
value="Test", variant="primary", size="sm")
return radio, test_pipeline_button
with playground:
create_playground_header()
with gr.Tabs():
with gr.TabItem("Text"):
radio, test_pipeline_button = create_tabs_header()
with gr.Row(visible=True) as use_pipeline:
with gr.Column():
task_dropdown = gr.Dropdown(
choices=task_dropdown_choices(),
label="Task",
interactive=True,
info="Select Pipelines for natural language processing tasks or type if you have your own."
)
model_dropdown = gr.Dropdown(
[], label="Model", info="Select appropriate Model based on the task you selected")
prompt_textarea = gr.TextArea(
label="Prompt",
value="Enter your prompt here",
text_align="left",
info="Copy/Paste or type your prompt to try out. Make sure to provide clear prompt or try with different prompts"
)
context_for_question_answer = gr.TextArea(
label="Context",
value="Enter Context for your question here",
visible=False,
interactive=True,
info="Question answering tasks return an answer given a question. If you’ve ever asked a virtual assistant like Alexa, Siri or Google what the weather is, then you’ve used a question answering model before. Here, we are doing Extractive(extract the answer from the given context) Question answering. "
)
task_dropdown.change(handle_task_change,
inputs=[task_dropdown],
outputs=[context_for_question_answer,
model_dropdown, task_dropdown])
with gr.Column():
text = gr.TextArea(label="Generated Text")
radio.change(review_training_choices,
inputs=radio, outputs=use_pipeline)
test_pipeline_button.click(test_pipeline,
inputs=[
task_dropdown, model_dropdown, prompt_textarea,
context_for_question_answer],
outputs=text)
with gr.TabItem("Image"):
radio, test_pipeline_button = create_tabs_header()
gr.Markdown("""
> WIP
""")
with gr.TabItem("Audio"):
radio, test_pipeline_button = create_tabs_header()
gr.Markdown("""
> WIP
""")
create_playground_footer()
playground.launch(share=True)