Spaces:
Runtime error
Runtime error
# install | |
import gradio as gr | |
import os | |
import subprocess | |
# if os.getenv('SYSTEM') == 'spaces': | |
# # subprocess.run('pip install pyembree'.split()) | |
# try: | |
# import pytorch3d | |
# except ImportError: | |
# subprocess.run( | |
# 'pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu116_pyt1130/download.html' | |
# .split() | |
# ) | |
# subprocess.run("python setup.py build_ext --inplace".split(), cwd="./lib/common/libmesh/") | |
# subprocess.run("python setup.py build_ext --inplace".split(), cwd="./lib/common/libvoxelize/") | |
from apps.infer import generate_model, generate_video | |
# running | |
title = ''' | |
# Unconstrained & Detailed Clothed Human Digitization (ECON + ControlNet) | |
### ECON: Explicit Clothed humans Optimized via Normal integration (CVPR 2023, Highlight) | |
''' | |
bottom = ''' | |
#### Citation | |
``` | |
@inproceedings{xiu2023econ, | |
title = {{ECON: Explicit Clothed humans Optimized via Normal integration}}, | |
author = {Xiu, Yuliang and Yang, Jinlong and Cao, Xu and Tzionas, Dimitrios and Black, Michael J.}, | |
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, | |
month = {June}, | |
year = {2023}, | |
} | |
``` | |
<details> | |
<summary>More</summary> | |
#### Acknowledgments: | |
- [controlnet-openpose](https://huggingface.co/spaces/diffusers/controlnet-openpose) | |
- [TEXTure](https://huggingface.co/spaces/TEXTurePaper/TEXTure) | |
#### Image Credits | |
* [Pinterest](https://www.pinterest.com/search/pins/?q=parkour&rs=sitelinks_searchbox) | |
#### Related works | |
* [ICON @ MPI-IS](https://icon.is.tue.mpg.de/) | |
* [MonoPort @ USC](https://xiuyuliang.cn/monoport) | |
* [Phorhum @ Google](https://phorhum.github.io/) | |
* [PIFuHD @ Meta](https://shunsukesaito.github.io/PIFuHD/) | |
* [PaMIR @ Tsinghua](http://www.liuyebin.com/pamir/pamir.html) | |
</details> | |
<center> | |
<a href="https://huggingface.co/spaces/Yuliang/ECON?duplicate=true"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-lg-dark.svg"/></a> | |
<h2> Generate pose & prompt-guided images / Upload photos / Use examples → Submit Image (~3min) → Generate Video (~3min) </h2> | |
<h2><span style="color:red">ECON is only suitable for "humanoid images" and will not work well on cartoons with non-human shapes.</span></h2> | |
</center> | |
''' | |
description = ''' | |
<table> | |
<th width="20%"> | |
<ul> | |
<li><strong>Homepage</strong> <a href="https://econ.is.tue.mpg.de/">econ.is.tue.mpg.de</a></li> | |
<li><strong>Code</strong> <a href="https://github.com/YuliangXiu/ECON">YuliangXiu/ECON</a></li> | |
<li><strong>Paper</strong> <a href="https://arxiv.org/abs/2212.07422">arXiv</a>, <a href="https://readpaper.com/paper/4736821012688027649">ReadPaper</a></li> | |
<li><strong>Chatroom</strong> <a href="https://discord.gg/Vqa7KBGRyk">Discord</a></li> | |
</ul> | |
<br> | |
<ul> | |
<li><strong>Colab Notebook</strong> <a href='https://colab.research.google.com/drive/1YRgwoRCZIrSB2e7auEWFyG10Xzjbrbno?usp=sharing'><img style="display: inline-block;" src='https://colab.research.google.com/assets/colab-badge.svg' alt='Google Colab'></a></li> | |
<li><strong>Blender Plugin</strong> <a href='https://carlosedubarreto.gumroad.com/l/CEB_ECON'><img style="display: inline-block;" src='https://img.shields.io/badge/Blender-F6DDCC.svg?logo=Blender' alt='Blender'></a></li> | |
<li><strong>Docker Image</strong> <a href='https://github.com/YuliangXiu/ECON/blob/master/docs/installation-docker.md'><img style="display: inline-block;" src='https://img.shields.io/badge/Docker-9cf.svg?logo=Docker' alt='Docker'></a></li> | |
<li><strong>Windows Setup</strong> <a href="https://github.com/YuliangXiu/ECON/blob/master/docs/installation-windows.md"><img style="display: inline-block;" src='https://img.shields.io/badge/Windows-00a2ed.svg?logo=Windows' akt='Windows'></a></li> | |
</ul> | |
<br> | |
<a href="https://twitter.com/yuliangxiu"><img alt="Twitter Follow" src="https://img.shields.io/twitter/follow/yuliangxiu?style=social"></a><br> | |
<iframe src="https://ghbtns.com/github-btn.html?user=yuliangxiu&repo=ECON&type=star&count=true&v=2&size=small" frameborder="0" scrolling="0" width="100" height="20"></iframe> | |
</th> | |
<th width="40%"> | |
<iframe width="560" height="315" src="https://www.youtube.com/embed/5PEd_p90kS0" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> | |
</th> | |
<th width="40%"> | |
<iframe width="560" height="315" src="https://www.youtube.com/embed/sbWZbTf6ZYk" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> | |
</th> | |
</table> | |
''' | |
from controlnet_aux import OpenposeDetector | |
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel | |
from diffusers import UniPCMultistepScheduler | |
import gradio as gr | |
import torch | |
import base64 | |
from io import BytesIO | |
from PIL import Image | |
# live conditioning | |
canvas_html = "<pose-canvas id='canvas-root' style='display:flex;max-width: 500px;margin: 0 auto;'></pose-canvas>" | |
load_js = """ | |
async () => { | |
const url = "https://huggingface.co/datasets/radames/gradio-components/raw/main/pose-gradio.js" | |
fetch(url) | |
.then(res => res.text()) | |
.then(text => { | |
const script = document.createElement('script'); | |
script.type = "module" | |
script.src = URL.createObjectURL(new Blob([text], { type: 'application/javascript' })); | |
document.head.appendChild(script); | |
}); | |
} | |
""" | |
get_js_image = """ | |
async (image_in_img, prompt, image_file_live_opt, live_conditioning) => { | |
const canvasEl = document.getElementById("canvas-root"); | |
const data = canvasEl? canvasEl._data : null; | |
return [image_in_img, prompt, image_file_live_opt, data] | |
} | |
""" | |
# Constants | |
cached = False | |
# Models | |
pose_model = OpenposeDetector.from_pretrained("lllyasviel/ControlNet") | |
controlnet = ControlNetModel.from_pretrained( | |
"lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16 | |
) | |
pipe = StableDiffusionControlNetPipeline.from_pretrained( | |
"runwayml/stable-diffusion-v1-5", | |
controlnet=controlnet, | |
safety_checker=None, | |
torch_dtype=torch.float16 | |
) | |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) | |
# This command loads the individual model components on GPU on-demand. So, we don't | |
# need to explicitly call pipe.to("cuda"). | |
pipe.enable_model_cpu_offload() | |
# xformers | |
pipe.enable_xformers_memory_efficient_attention() | |
# Generator seed, | |
generator = torch.manual_seed(0) | |
hint_prompts = ''' | |
<strong>Hints</strong>: <br> | |
best quality, extremely detailed, solid color background, | |
super detail, high detail, edge lighting, soft focus, | |
light and dark contrast, 8k, edge lighting, 3d, c4d, | |
blender, oc renderer, ultra high definition, 3d rendering | |
''' | |
def get_pose(image): | |
return pose_model(image) | |
import sys | |
def read_logs(): | |
sys.stdout.flush() | |
with open("output.log", "r") as f: | |
return f.read() | |
def generate_images(image, prompt, image_file_live_opt='file', live_conditioning=None): | |
if image is None and 'image' not in live_conditioning: | |
raise gr.Error("Please provide an image") | |
try: | |
if image_file_live_opt == 'file': | |
pose = get_pose(image) | |
elif image_file_live_opt == 'webcam': | |
base64_img = live_conditioning['image'] | |
image_data = base64.b64decode(base64_img.split(',')[1]) | |
pose = Image.open(BytesIO(image_data)).convert('RGB').resize((512, 512)) | |
output = pipe( | |
prompt, | |
pose, | |
generator=generator, | |
num_images_per_prompt=3, | |
num_inference_steps=50, | |
) | |
all_outputs = [] | |
all_outputs.append(pose) | |
for image in output.images: | |
all_outputs.append(image) | |
return all_outputs, all_outputs | |
except Exception as e: | |
raise gr.Error(str(e)) | |
def toggle(choice): | |
if choice == "file": | |
return gr.update(visible=True, value=None), gr.update(visible=False, value=None) | |
elif choice == "webcam": | |
return gr.update(visible=False, value=None), gr.update(visible=True, value=canvas_html) | |
examples_pose = 'examples/pose' | |
examples_cloth = 'examples/cloth' | |
def show_video(): | |
return gr.update(visible=True), gr.update(visible=True) | |
with gr.Blocks() as demo: | |
gr.Markdown(title) | |
gr.HTML(description) | |
gr.Markdown(bottom) | |
out_lst = [] | |
with gr.Row(): | |
with gr.Column(): | |
with gr.Row(): | |
live_conditioning = gr.JSON(value={}, visible=False) | |
with gr.Column(): | |
image_file_live_opt = gr.Radio(["file", "webcam"], | |
value="file", | |
label="How would you like to upload your image?") | |
with gr.Row(): | |
image_in_img = gr.Image( | |
visible=True, type="pil", label="Image for Pose" | |
) | |
canvas = gr.HTML(None, elem_id="canvas_html", visible=False) | |
image_file_live_opt.change( | |
fn=toggle, | |
inputs=[image_file_live_opt], | |
outputs=[image_in_img, canvas], | |
queue=False | |
) | |
prompt = gr.Textbox( | |
label="Enter your prompt to synthesise the image", | |
max_lines=10, | |
placeholder="best quality, extremely detailed", | |
) | |
gr.Markdown(hint_prompts) | |
with gr.Column(): | |
gallery = gr.Gallery(label="Generated Images", columns=[2],rows=[2]) | |
gallery_cache = gr.State() | |
gr.Markdown( | |
''' | |
<center> | |
<strong>Click the target generated image for Reconstruction.</strong> <br> | |
↓ | |
</center> | |
''' | |
) | |
inp = gr.Image(type="filepath", label="Input Image for Reconstruction") | |
fitting_step = gr.Slider( | |
10, | |
100, | |
step=10, | |
label='Fitting steps (Slower yet Better-aligned SMPL-X)', | |
value=50 | |
) | |
with gr.Row(): | |
btn_sample = gr.Button("Generate Image") | |
btn_submit = gr.Button("Submit Image (~3min)") | |
btn_sample.click( | |
fn=generate_images, | |
inputs=[image_in_img, prompt, image_file_live_opt, live_conditioning], | |
outputs=[gallery, gallery_cache], | |
js=get_js_image | |
) | |
def get_select_index(cache, evt: gr.SelectData): | |
return cache[evt.index] | |
gallery.select( | |
fn=get_select_index, | |
inputs=[gallery_cache], | |
outputs=[inp], | |
) | |
with gr.Row(): | |
gr.Examples( | |
examples=examples_pose, | |
inputs=[inp], | |
cache_examples=cached, | |
fn=generate_model, | |
outputs=out_lst, | |
label="Hard Pose Examples" | |
) | |
gr.Examples( | |
examples=examples_cloth, | |
inputs=[inp], | |
cache_examples=cached, | |
fn=generate_model, | |
outputs=out_lst, | |
label="Loose Cloth Examples" | |
) | |
with gr.Column(): | |
overlap_inp = gr.Image(type="filepath", label="Image Normal Overlap") | |
out_final = gr.Model3D( | |
clear_color=[0.0, 0.0, 0.0, 0.0], label="Clothed human", elem_id="avatar" | |
) | |
out_smpl = gr.Model3D( | |
clear_color=[0.0, 0.0, 0.0, 0.0], label="SMPL-X body (via PIXIE)", elem_id="avatar" | |
) | |
vis_tensor_path = gr.State() | |
# logs = gr.Textbox(max_lines=10, label="Logs") | |
btn_video = gr.Button("Generate Video (~3min)", visible=False) | |
out_vid = gr.Video(label="Shared on Twitter with #ECON", visible=False) | |
out_lst = [out_smpl, out_final, overlap_inp, vis_tensor_path] | |
btn_video.click( | |
fn=generate_video, | |
inputs=[vis_tensor_path], | |
outputs=[out_vid], | |
) | |
btn_submit.click(fn=generate_model, inputs=[inp, fitting_step], outputs=out_lst) | |
btn_submit.click(fn=show_video, outputs=[btn_video, out_vid]) | |
# demo.load(read_logs, None, logs, every=1, queue=True, scroll_to_output=True) | |
demo.load(None, None, None, js=load_js) | |
if __name__ == "__main__": | |
demo.queue() | |
demo.launch(max_threads=4) | |
# demo.launch(max_threads=2, debug=True, server_port=8888, server_name="0.0.0.0") |