Ashrafb commited on
Commit
42a199f
1 Parent(s): 400396e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -281
app.py CHANGED
@@ -1,291 +1,34 @@
1
- #!/usr/bin/env python
2
-
3
- from __future__ import annotations
4
-
5
- import argparse
6
- import pathlib
7
  import torch
8
- import gradio as gr
9
-
10
- from vtoonify_model import Model
11
-
12
- def parse_args() -> argparse.Namespace:
13
- parser = argparse.ArgumentParser()
14
- parser.add_argument('--device', type=str, default='cpu')
15
- parser.add_argument('--theme', type=str)
16
- parser.add_argument('--share', action='store_true')
17
- parser.add_argument('--port', type=int)
18
- parser.add_argument('--disable-queue',
19
- dest='enable_queue',
20
- action='store_false')
21
- return parser.parse_args()
22
-
23
- DESCRIPTION = '''
24
- <div align=center>
25
- <h1 style="font-weight: 900; margin-bottom: 7px;">
26
- Portrait Style Transfer with <a href="https://github.com/williamyang1991/VToonify">VToonify</a>
27
- </h1>
28
- <p>For faster inference without waiting in queue, you may duplicate the space and use the GPU setting.
29
- <br/>
30
- <a href="https://huggingface.co/spaces/PKUWilliamYang/VToonify?duplicate=true">
31
- <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
32
- <p/>
33
- <video id="video" width=50% controls="" preload="none" poster="https://repository-images.githubusercontent.com/534480768/53715b0f-a2df-4daa-969c-0e74c102d339">
34
- <source id="mp4" src="https://user-images.githubusercontent.com/18130694/189483939-0fc4a358-fb34-43cc-811a-b22adb820d57.mp4
35
- " type="video/mp4">
36
- </videos>
37
- </div>
38
- '''
39
- FOOTER = '<div align=center><img id="visitor-badge" alt="visitor badge" src="https://visitor-badge.laobi.icu/badge?page_id=williamyang1991/VToonify" /></div>'
40
-
41
- ARTICLE = r"""
42
- If VToonify is helpful, please help to ⭐ the <a href='https://github.com/williamyang1991/VToonify' target='_blank'>Github Repo</a>. Thanks!
43
- [![GitHub Stars](https://img.shields.io/github/stars/williamyang1991/VToonify?style=social)](https://github.com/williamyang1991/VToonify)
44
- ---
45
- 📝 **Citation**
46
- If our work is useful for your research, please consider citing:
47
- ```bibtex
48
- @article{yang2022Vtoonify,
49
- title={VToonify: Controllable High-Resolution Portrait Video Style Transfer},
50
- author={Yang, Shuai and Jiang, Liming and Liu, Ziwei and Loy, Chen Change},
51
- journal={ACM Transactions on Graphics (TOG)},
52
- volume={41},
53
- number={6},
54
- articleno={203},
55
- pages={1--15},
56
- year={2022},
57
- publisher={ACM New York, NY, USA},
58
- doi={10.1145/3550454.3555437},
59
- }
60
- ```
61
-
62
- 📋 **License**
63
- This project is licensed under <a rel="license" href="https://github.com/williamyang1991/VToonify/blob/main/LICENSE.md">S-Lab License 1.0</a>.
64
- Redistribution and use for non-commercial purposes should follow this license.
65
-
66
- 📧 **Contact**
67
- If you have any questions, please feel free to reach me out at <b>[email protected]</b>.
68
- """
69
-
70
- def update_slider(choice: str) -> dict:
71
- if type(choice) == str and choice.endswith('-d'):
72
- return gr.Slider.update(maximum=1, minimum=0, value=0.5)
73
- else:
74
- return gr.Slider.update(maximum=0.5, minimum=0.5, value=0.5)
75
-
76
- def set_example_image(example: list) -> dict:
77
- return gr.Image.update(value=example[0])
78
-
79
- def set_example_video(example: list) -> dict:
80
- return gr.Video.update(value=example[0]),
81
-
82
- sample_video = ['./vtoonify/data/529_2.mp4','./vtoonify/data/7154235.mp4','./vtoonify/data/651.mp4','./vtoonify/data/908.mp4']
83
- sample_vid = gr.Video(label='Video file') #for displaying the example
84
- example_videos = gr.components.Dataset(components=[sample_vid], samples=[[path] for path in sample_video], type='values', label='Video Examples')
85
-
86
- def main():
87
- args = parse_args()
88
- args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
89
- print('*** Now using %s.'%(args.device))
90
- model = Model(device=args.device)
91
-
92
- with gr.Blocks(theme=args.theme, css='style.css') as demo:
93
-
94
- gr.Markdown(DESCRIPTION)
95
-
96
- with gr.Box():
97
- gr.Markdown('''## Step 1(Select Style)
98
- - Select **Style Type**.
99
- - Type with `-d` means it supports style degree adjustment.
100
- - Type without `-d` usually has better toonification quality.
101
-
102
- ''')
103
- with gr.Row():
104
- with gr.Column():
105
- gr.Markdown('''Select Style Type''')
106
- with gr.Row():
107
- style_type = gr.Radio(label='Style Type',
108
- choices=['cartoon1','cartoon1-d','cartoon2-d','cartoon3-d',
109
- 'cartoon4','cartoon4-d','cartoon5-d','comic1-d',
110
- 'comic2-d','arcane1','arcane1-d','arcane2', 'arcane2-d',
111
- 'caricature1','caricature2','pixar','pixar-d',
112
- 'illustration1-d', 'illustration2-d', 'illustration3-d', 'illustration4-d', 'illustration5-d',
113
- ]
114
- )
115
- exstyle = gr.Variable()
116
- with gr.Row():
117
- loadmodel_button = gr.Button('Load Model')
118
- with gr.Row():
119
- load_info = gr.Textbox(label='Process Information', interactive=False, value='No model loaded.')
120
- with gr.Column():
121
- gr.Markdown('''Reference Styles
122
- ![example](https://raw.githubusercontent.com/williamyang1991/tmpfile/master/vtoonify/style.jpg)''')
123
-
124
-
125
- with gr.Box():
126
- gr.Markdown('''## Step 2 (Preprocess Input Image / Video)
127
- - Drop an image/video containing a near-frontal face to the **Input Image**/**Input Video**.
128
- - Hit the **Rescale Image**/**Rescale First Frame** button.
129
- - Rescale the input to make it best fit the model.
130
- - The final image result will be based on this **Rescaled Face**. Use padding parameters to adjust the background space.
131
- - **<font color=red>Solution to [Error: no face detected!]</font>**: VToonify uses dlib.get_frontal_face_detector but sometimes it fails to detect a face. You can try several times or use other images until a face is detected, then switch back to the original image.
132
- - For video input, further hit the **Rescale Video** button.
133
- - The final video result will be based on this **Rescaled Video**. To avoid overload, video is cut to at most **100/300** frames for CPU/GPU, respectively.
134
-
135
- ''')
136
- with gr.Row():
137
- with gr.Box():
138
- with gr.Column():
139
- gr.Markdown('''Choose the padding parameters.
140
- ![example](https://raw.githubusercontent.com/williamyang1991/tmpfile/master/vtoonify/rescale.jpg)''')
141
- with gr.Row():
142
- top = gr.Slider(128,
143
- 256,
144
- value=200,
145
- step=8,
146
- label='top')
147
- with gr.Row():
148
- bottom = gr.Slider(128,
149
- 256,
150
- value=200,
151
- step=8,
152
- label='bottom')
153
- with gr.Row():
154
- left = gr.Slider(128,
155
- 256,
156
- value=200,
157
- step=8,
158
- label='left')
159
- with gr.Row():
160
- right = gr.Slider(128,
161
- 256,
162
- value=200,
163
- step=8,
164
- label='right')
165
- with gr.Box():
166
- with gr.Column():
167
- gr.Markdown('''Input''')
168
- with gr.Row():
169
- input_image = gr.Image(label='Input Image',
170
- type='filepath')
171
- with gr.Row():
172
- preprocess_image_button = gr.Button('Rescale Image')
173
- with gr.Row():
174
- input_video = gr.Video(label='Input Video',
175
- mirror_webcam=False,
176
- type='filepath')
177
- with gr.Row():
178
- preprocess_video0_button = gr.Button('Rescale First Frame')
179
- preprocess_video1_button = gr.Button('Rescale Video')
180
-
181
- with gr.Box():
182
- with gr.Column():
183
- gr.Markdown('''View''')
184
- with gr.Row():
185
- input_info = gr.Textbox(label='Process Information', interactive=False, value='n.a.')
186
- with gr.Row():
187
- aligned_face = gr.Image(label='Rescaled Face',
188
- type='numpy',
189
- interactive=False)
190
- instyle = gr.Variable()
191
- with gr.Row():
192
- aligned_video = gr.Video(label='Rescaled Video',
193
- type='mp4',
194
- interactive=False)
195
- with gr.Row():
196
- with gr.Column():
197
- paths = ['./vtoonify/data/pexels-andrea-piacquadio-733872.jpg','./vtoonify/data/i5R8hbZFDdc.jpg','./vtoonify/data/yRpe13BHdKw.jpg','./vtoonify/data/ILip77SbmOE.jpg','./vtoonify/data/077436.jpg','./vtoonify/data/081680.jpg']
198
- example_images = gr.Dataset(components=[input_image],
199
- samples=[[path] for path in paths],
200
- label='Image Examples')
201
- with gr.Column():
202
- #example_videos = gr.Dataset(components=[input_video], samples=[['./vtoonify/data/529.mp4']], type='values')
203
- #to render video example on mouse hover/click
204
- example_videos.render()
205
- #to load sample video into input_video upon clicking on it
206
- def load_examples(video):
207
- #print("****** inside load_example() ******")
208
- #print("in_video is : ", video[0])
209
- return video[0]
210
-
211
- example_videos.click(load_examples, example_videos, input_video)
212
-
213
- with gr.Box():
214
- gr.Markdown('''## Step 3 (Generate Style Transferred Image/Video)''')
215
- with gr.Row():
216
- with gr.Column():
217
- gr.Markdown('''
218
-
219
- - Adjust **Style Degree**.
220
- - Hit **Toonify!** to toonify one frame. Hit **VToonify!** to toonify full video.
221
- - Estimated time on 1600x1440 video of 300 frames: 1 hour (CPU); 2 mins (GPU)
222
- ''')
223
- style_degree = gr.Slider(0,
224
- 1,
225
- value=0.5,
226
- step=0.05,
227
- label='Style Degree')
228
- with gr.Column():
229
- gr.Markdown('''![example](https://raw.githubusercontent.com/williamyang1991/tmpfile/master/vtoonify/degree.jpg)
230
- ''')
231
- with gr.Row():
232
- output_info = gr.Textbox(label='Process Information', interactive=False, value='n.a.')
233
- with gr.Row():
234
- with gr.Column():
235
- with gr.Row():
236
- result_face = gr.Image(label='Result Image',
237
- type='numpy',
238
- interactive=False)
239
- with gr.Row():
240
- toonify_button = gr.Button('Toonify!')
241
- with gr.Column():
242
- with gr.Row():
243
- result_video = gr.Video(label='Result Video',
244
- type='mp4',
245
- interactive=False)
246
- with gr.Row():
247
- vtoonify_button = gr.Button('VToonify!')
248
-
249
- gr.Markdown(ARTICLE)
250
- gr.Markdown(FOOTER)
251
 
252
- loadmodel_button.click(fn=model.load_model,
253
- inputs=[style_type],
254
- outputs=[exstyle, load_info])
255
 
 
256
 
257
- style_type.change(fn=update_slider,
258
- inputs=style_type,
259
- outputs=style_degree)
260
 
261
- preprocess_image_button.click(fn=model.detect_and_align_image,
262
- inputs=[input_image, top, bottom, left, right],
263
- outputs=[aligned_face, instyle, input_info])
264
- preprocess_video0_button.click(fn=model.detect_and_align_video,
265
- inputs=[input_video, top, bottom, left, right],
266
- outputs=[aligned_face, instyle, input_info])
267
- preprocess_video1_button.click(fn=model.detect_and_align_full_video,
268
- inputs=[input_video, top, bottom, left, right],
269
- outputs=[aligned_video, instyle, input_info])
270
 
271
- toonify_button.click(fn=model.image_toonify,
272
- inputs=[aligned_face, instyle, exstyle, style_degree, style_type],
273
- outputs=[result_face, output_info])
274
- vtoonify_button.click(fn=model.video_tooniy,
275
- inputs=[aligned_video, instyle, exstyle, style_degree, style_type],
276
- outputs=[result_video, output_info])
277
 
 
 
 
 
 
 
278
 
279
- example_images.click(fn=set_example_image,
280
- inputs=example_images,
281
- outputs=example_images.components)
282
-
283
- demo.launch(
284
- enable_queue=args.enable_queue,
285
- server_port=args.port,
286
- share=args.share,
287
- )
288
 
 
289
 
290
- if __name__ == '__main__':
291
- main()
 
 
1
+ from fastapi import FastAPI, File, UploadFile
2
+ from fastapi.responses import HTMLResponse
3
+ from fastapi.staticfiles import StaticFiles
4
+ import shutil
5
+ from io import BytesIO
 
6
  import torch
7
+ from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ from vtoonify_model import cacartoon1
 
 
10
 
11
+ app = FastAPI()
12
 
 
 
 
13
 
 
 
 
 
 
 
 
 
 
14
 
15
+ model = cacartoon1(device='cuda' if torch.cuda.is_available() else 'cpu')
 
 
 
 
 
16
 
17
+ def generate_cartoon(image_bytes: bytes) -> bytes:
18
+ image = Image.open(BytesIO(image_bytes))
19
+ cartoon_image = model.generate_cartoon(image)
20
+ with BytesIO() as output:
21
+ cartoon_image.save(output, format="PNG")
22
+ return output.getvalue()
23
 
24
+ @app.post("/upload/")
25
+ async def upload_image(file: UploadFile = File(...)):
26
+ contents = await file.read()
27
+ result_bytes = generate_cartoon(contents)
28
+ return {"result": result_bytes}
 
 
 
 
29
 
30
+ app.mount("/", StaticFiles(directory="AB", html=True), name="static")
31
 
32
+ @app.get("/")
33
+ def index() -> FileResponse:
34
+ return FileResponse(path="/app/AB/index.html", media_type="text/html")