Spaces:
Running
Running
File size: 1,945 Bytes
94f04b7 abe2204 94f04b7 46a60b0 94f04b7 afe246e 46a60b0 7a3883a afe246e 46a60b0 afe246e 46a60b0 afe246e 46a60b0 afe246e 46a60b0 afe246e 46a60b0 afe246e 94f04b7 46a60b0 94f04b7 abe2204 94f04b7 46a60b0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import os
import gradio as gr
import numpy as np
from PIL import Image
from inference.seg import process_image_or_video
from config import SAPIENS_LITE_MODELS_PATH
def update_model_choices(task):
model_choices = list(SAPIENS_LITE_MODELS_PATH[task.lower()].keys())
return gr.Dropdown(choices=model_choices, value=model_choices[0] if model_choices else None)
def gradio_wrapper(input_image, task, version):
if isinstance(input_image, np.ndarray):
input_image = Image.fromarray(input_image)
result = process_image_or_video(input_image, task=task.lower(), version=version)
return result
with gr.Blocks() as demo:
gr.Markdown("# Sapiens Arena 🤸🏽♂️ - WIP devmode- Not yet available")
with gr.Tabs():
with gr.TabItem('Image'):
with gr.Row():
with gr.Column():
input_image = gr.Image(label="Input Image", type="pil")
select_task = gr.Radio(
["seg", "pose", "depth", "normal"],
label="Task",
info="Choose the task to perform",
value="seg"
)
model_name = gr.Dropdown(
label="Model Version",
choices=list(SAPIENS_LITE_MODELS_PATH["seg"].keys()),
value="sapiens_0.3b",
)
with gr.Column():
result_image = gr.Image(label="Result")
run_button = gr.Button("Run")
with gr.TabItem('Video'):
gr.Markdown("In construction")
select_task.change(fn=update_model_choices, inputs=select_task, outputs=model_name)
run_button.click(
fn=gradio_wrapper,
inputs=[input_image, select_task, model_name],
outputs=[result_image],
)
if __name__ == "__main__":
demo.launch(share=True) |