Spaces:
Runtime error
Runtime error
from __future__ import annotations | |
from gradio_imageslider import ImageSlider | |
import functools | |
import os | |
import tempfile | |
import diffusers | |
import gradio as gr | |
import imageio as imageio | |
import numpy as np | |
import spaces | |
import torch as torch | |
from PIL import Image | |
from tqdm import tqdm | |
from pathlib import Path | |
import gradio | |
from gradio.utils import get_cache_folder | |
from infer import lotus | |
# def process_image_check(path_input): | |
# if path_input is None: | |
# raise gr.Error( | |
# "Missing image in the first pane: upload a file or use one from the gallery below." | |
# ) | |
# def infer(path_input, seed=0): | |
# print(f"==> Processing image {path_input}") | |
# return path_input | |
# return [path_input, path_input] | |
# # name_base, name_ext = os.path.splitext(os.path.basename(path_input)) | |
# # print(f"==> Processing image {name_base}{name_ext}") | |
# # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
# # print(f"==> Device: {device}") | |
# # output_g, output_d = lotus(path_input, 'depth', seed, device) | |
# # if not os.path.exists("files/output"): | |
# # os.makedirs("files/output") | |
# # g_save_path = os.path.join("files/output", f"{name_base}_g{name_ext}") | |
# # d_save_path = os.path.join("files/output", f"{name_base}_d{name_ext}") | |
# # output_g.save(g_save_path) | |
# # output_d.save(d_save_path) | |
# # yield [path_input, g_save_path], [path_input, d_save_path] | |
# def run_demo_server(): | |
# gradio_theme = gr.themes.Default() | |
# with gr.Blocks( | |
# theme=gradio_theme, | |
# title="LOTUS (Depth)", | |
# css=""" | |
# #download { | |
# height: 118px; | |
# } | |
# .slider .inner { | |
# width: 5px; | |
# background: #FFF; | |
# } | |
# .viewport { | |
# aspect-ratio: 4/3; | |
# } | |
# .tabs button.selected { | |
# font-size: 20px !important; | |
# color: crimson !important; | |
# } | |
# h1 { | |
# text-align: center; | |
# display: block; | |
# } | |
# h2 { | |
# text-align: center; | |
# display: block; | |
# } | |
# h3 { | |
# text-align: center; | |
# display: block; | |
# } | |
# .md_feedback li { | |
# margin-bottom: 0px !important; | |
# } | |
# """, | |
# head=""" | |
# <script async src="https://www.googletagmanager.com/gtag/js?id=G-1FWSVCGZTG"></script> | |
# <script> | |
# window.dataLayer = window.dataLayer || []; | |
# function gtag() {dataLayer.push(arguments);} | |
# gtag('js', new Date()); | |
# gtag('config', 'G-1FWSVCGZTG'); | |
# </script> | |
# """, | |
# ) as demo: | |
# gr.Markdown( | |
# """ | |
# # LOTUS: Diffusion-based Visual Foundation Model for High-quality Dense Prediction | |
# <p align="center"> | |
# <a title="Page" href="https://lotus3d.github.io/" target="_blank" rel="noopener noreferrer" style="display: inline-block;"> | |
# <img src="https://img.shields.io/badge/Project-Website-pink?logo=googlechrome&logoColor=white"> | |
# </a> | |
# <a title="arXiv" href="https://arxiv.org/abs/2409.18124" target="_blank" rel="noopener noreferrer" style="display: inline-block;"> | |
# <img src="https://img.shields.io/badge/arXiv-Paper-b31b1b?logo=arxiv&logoColor=white"> | |
# </a> | |
# <a title="Github" href="https://github.com/EnVision-Research/Lotus" target="_blank" rel="noopener noreferrer" style="display: inline-block;"> | |
# <img src="https://img.shields.io/github/stars/EnVision-Research/Lotus?label=GitHub%20%E2%98%85&logo=github&color=C8C" alt="badge-github-stars"> | |
# </a> | |
# <a title="Social" href="https://x.com/haodongli00/status/1839524569058582884" target="_blank" rel="noopener noreferrer" style="display: inline-block;"> | |
# <img src="https://www.obukhov.ai/img/badges/badge-social.svg" alt="social"> | |
# </a> | |
# """ | |
# ) | |
# with gr.Tabs(elem_classes=["tabs"]): | |
# with gr.Tab("IMAGE"): | |
# with gr.Row(): | |
# with gr.Column(): | |
# image_input = gr.Image( | |
# label="Input Image", | |
# type="filepath", | |
# ) | |
# seed = gr.Number( | |
# label="Seed", | |
# minimum=0, | |
# maximum=999999, | |
# ) | |
# with gr.Row(): | |
# image_submit_btn = gr.Button( | |
# value="Predict Depth!", variant="primary" | |
# ) | |
# # image_reset_btn = gr.Button(value="Reset") | |
# with gr.Column(): | |
# image_output_g = gr.Image( | |
# label="Output (Generative)", | |
# type="filepath", | |
# ) | |
# # image_output_g = ImageSlider( | |
# # label="Output (Generative)", | |
# # type="filepath", | |
# # show_download_button=True, | |
# # show_share_button=True, | |
# # interactive=False, | |
# # elem_classes="slider", | |
# # position=0.25, | |
# # ) | |
# # with gr.Row(): | |
# # image_output_d = gr.Image( | |
# # label="Output (Generative)", | |
# # type="filepath", | |
# # ) | |
# # image_output_d = ImageSlider( | |
# # label="Output (Discriminative)", | |
# # type="filepath", | |
# # show_download_button=True, | |
# # show_share_button=True, | |
# # interactive=False, | |
# # elem_classes="slider", | |
# # position=0.25, | |
# # ) | |
# # gr.Examples( | |
# # fn=infer, | |
# # examples=sorted([ | |
# # os.path.join("files", "images", name) | |
# # for name in os.listdir(os.path.join("files", "images")) | |
# # ]), | |
# # inputs=[image_input], | |
# # outputs=[image_output_g], | |
# # cache_examples=True, | |
# # ) | |
# with gr.Tab("VIDEO"): | |
# with gr.Column(): | |
# gr.Markdown("Coming soon") | |
# ### Image | |
# image_submit_btn.click( | |
# fn=infer, | |
# inputs=[ | |
# image_input | |
# ], | |
# outputs=image_output_g, | |
# concurrency_limit=1, | |
# ) | |
# # image_reset_btn.click( | |
# # fn=lambda: ( | |
# # None, | |
# # None, | |
# # None, | |
# # ), | |
# # inputs=[], | |
# # outputs=image_output_g, | |
# # queue=False, | |
# # ) | |
# ### Video | |
# ### Server launch | |
# demo.queue( | |
# api_open=False, | |
# ).launch( | |
# server_name="0.0.0.0", | |
# server_port=7860, | |
# ) | |
# def main(): | |
# os.system("pip freeze") | |
# run_demo_server() | |
# if __name__ == "__main__": | |
# main() | |
def flip_text(x): | |
return x[::-1] | |
def flip_image(x): | |
return np.fliplr(x) | |
with gr.Blocks() as demo: | |
gr.Markdown("Flip text or image files using this demo.") | |
with gr.Tab("Flip Text"): | |
text_input = gr.Textbox() | |
text_output = gr.Textbox() | |
text_button = gr.Button("Flip") | |
with gr.Tab("Flip Image"): | |
with gr.Row(): | |
image_input = gr.Image() | |
image_output = gr.Image() | |
image_button = gr.Button("Flip") | |
with gr.Accordion("Open for More!", open=False): | |
gr.Markdown("Look at me...") | |
temp_slider = gr.Slider( | |
0, 1, | |
value=0.1, | |
step=0.1, | |
interactive=True, | |
label="Slide me", | |
) | |
text_button.click(flip_text, inputs=text_input, outputs=text_output) | |
image_button.click(flip_image, inputs=image_input, outputs=image_output) | |
demo.launch(share=True) | |