Spaces:
Runtime error
Runtime error
GUI and cache_clean for locals
Browse files- app.py +36 -17
- apps/infer.py +4 -10
- examples/959c4c726a69901ce71b93a9242ed900.png +0 -3
- examples/slack_trial2-000150.png +0 -3
app.py
CHANGED
@@ -26,33 +26,53 @@ from apps.infer import generate_model
|
|
26 |
# running
|
27 |
|
28 |
description = '''
|
29 |
-
# ICON Clothed Human Digitization
|
30 |
### ICON: Implicit Clothed humans Obtained from Normals (CVPR 2022)
|
31 |
|
32 |
-
<table
|
33 |
-
<
|
34 |
-
<
|
35 |
-
<
|
36 |
-
<
|
37 |
-
</
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
</table>
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
#### Acknowledgments:
|
41 |
|
42 |
- [StyleGAN-Human, ECCV 2022](https://stylegan-human.github.io/)
|
43 |
- [nagolinc/styleGanHuman_and_PIFu](https://huggingface.co/spaces/nagolinc/styleGanHuman_and_PIFu)
|
44 |
- [radames/PIFu-Clothed-Human-Digitization](https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization)
|
45 |
|
46 |
-
#### The reconstruction + refinement + video take about 80 seconds for single image.
|
47 |
-
|
48 |
-
<details>
|
49 |
-
|
50 |
-
<summary>More</summary>
|
51 |
-
|
52 |
#### Image Credits
|
53 |
|
54 |
* [Pinterest](https://www.pinterest.com/search/pins/?q=parkour&rs=sitelinks_searchbox)
|
55 |
-
* [Qianli Ma](https://qianlim.github.io/)
|
56 |
|
57 |
#### Related works
|
58 |
|
@@ -72,10 +92,9 @@ def generate_image(seed, psi):
|
|
72 |
return img
|
73 |
|
74 |
|
75 |
-
random.seed(
|
76 |
model_types = ['icon-filter', 'pifu', 'pamir']
|
77 |
-
examples = [[item, random.choice(model_types)] for item in
|
78 |
-
sorted(glob.glob('examples/*.png')), 8)]
|
79 |
|
80 |
with gr.Blocks() as demo:
|
81 |
gr.Markdown(description)
|
|
|
26 |
# running
|
27 |
|
28 |
description = '''
|
29 |
+
# ICON Clothed Human Digitization
|
30 |
### ICON: Implicit Clothed humans Obtained from Normals (CVPR 2022)
|
31 |
|
32 |
+
<table>
|
33 |
+
<th>
|
34 |
+
<ul>
|
35 |
+
<li><strong>Homepage</strong> <a href="http://icon.is.tue.mpg.de">icon.is.tue.mpg.de</a></li>
|
36 |
+
<li><strong>Code</strong> <a href="https://github.com/YuliangXiu/ICON">YuliangXiu/ICON</a>
|
37 |
+
<li><strong>Paper</strong> <a href="https://arxiv.org/abs/2112.09127">arXiv</a>, <a href="https://readpaper.com/paper/4569785684533977089">ReadPaper</a>
|
38 |
+
<li><strong>Chatroom</strong> <a href="https://discord.gg/Vqa7KBGRyk">Discord</a>
|
39 |
+
</ul>
|
40 |
+
<a href="https://twitter.com/yuliangxiu"><img alt="Twitter Follow" src="https://img.shields.io/twitter/follow/yuliangxiu?style=social"></a>
|
41 |
+
<iframe src="https://ghbtns.com/github-btn.html?user=yuliangxiu&repo=ICON&type=star&count=true&v=2&size=small" frameborder="0" scrolling="0" width="100" height="20"></iframe>
|
42 |
+
<a href="https://youtu.be/hZd6AYin2DE"><img alt="YouTube Video Views" src="https://img.shields.io/youtube/views/hZd6AYin2DE?style=social"></a>
|
43 |
+
</th>
|
44 |
+
<th>
|
45 |
+
<iframe width="560" height="315" src="https://www.youtube.com/embed/hZd6AYin2DE" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
46 |
+
</th>
|
47 |
</table>
|
48 |
|
49 |
+
#### The reconstruction + refinement + video take about 80~120 seconds for single image.
|
50 |
+
|
51 |
+
<details>
|
52 |
+
|
53 |
+
<summary>More</summary>
|
54 |
+
|
55 |
+
#### Citation
|
56 |
+
```
|
57 |
+
@inproceedings{xiu2022icon,
|
58 |
+
title = {{ICON}: {I}mplicit {C}lothed humans {O}btained from {N}ormals},
|
59 |
+
author = {Xiu, Yuliang and Yang, Jinlong and Tzionas, Dimitrios and Black, Michael J.},
|
60 |
+
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
|
61 |
+
month = {June},
|
62 |
+
year = {2022},
|
63 |
+
pages = {13296-13306}
|
64 |
+
}
|
65 |
+
```
|
66 |
+
|
67 |
#### Acknowledgments:
|
68 |
|
69 |
- [StyleGAN-Human, ECCV 2022](https://stylegan-human.github.io/)
|
70 |
- [nagolinc/styleGanHuman_and_PIFu](https://huggingface.co/spaces/nagolinc/styleGanHuman_and_PIFu)
|
71 |
- [radames/PIFu-Clothed-Human-Digitization](https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization)
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
#### Image Credits
|
74 |
|
75 |
* [Pinterest](https://www.pinterest.com/search/pins/?q=parkour&rs=sitelinks_searchbox)
|
|
|
76 |
|
77 |
#### Related works
|
78 |
|
|
|
92 |
return img
|
93 |
|
94 |
|
95 |
+
random.seed(2022)
|
96 |
model_types = ['icon-filter', 'pifu', 'pamir']
|
97 |
+
examples = [[item, random.choice(model_types)] for item in glob.glob('examples/*.png')]
|
|
|
98 |
|
99 |
with gr.Blocks() as demo:
|
100 |
gr.Markdown(description)
|
apps/infer.py
CHANGED
@@ -450,16 +450,10 @@ def generate_model(in_path, model_type):
|
|
450 |
video_path = os.path.join(config_dict['out_dir'], cfg.name, f"vid/{data['name']}_cloth.mp4")
|
451 |
overlap_path = os.path.join(config_dict['out_dir'], cfg.name, f"png/{data['name']}_overlap.png")
|
452 |
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
del optimizer_cloth
|
458 |
-
del scheduler_smpl
|
459 |
-
del scheduler_cloth
|
460 |
-
del losses
|
461 |
-
del in_tensor
|
462 |
-
|
463 |
torch.cuda.empty_cache()
|
464 |
|
465 |
return [smpl_path, smpl_path, smpl_npy_path, recon_path, recon_path, refine_path, refine_path, video_path, overlap_path]
|
|
|
450 |
video_path = os.path.join(config_dict['out_dir'], cfg.name, f"vid/{data['name']}_cloth.mp4")
|
451 |
overlap_path = os.path.join(config_dict['out_dir'], cfg.name, f"png/{data['name']}_overlap.png")
|
452 |
|
453 |
+
# clean all the variables
|
454 |
+
for element in dir():
|
455 |
+
if 'path' not in element:
|
456 |
+
del locals()[element]
|
|
|
|
|
|
|
|
|
|
|
|
|
457 |
torch.cuda.empty_cache()
|
458 |
|
459 |
return [smpl_path, smpl_path, smpl_npy_path, recon_path, recon_path, refine_path, refine_path, video_path, overlap_path]
|
examples/959c4c726a69901ce71b93a9242ed900.png
DELETED
Git LFS Details
|
examples/slack_trial2-000150.png
DELETED
Git LFS Details
|