hysts HF staff commited on
Commit
8eb5d1d
1 Parent(s): 424257d
Files changed (8) hide show
  1. .pre-commit-config.yaml +36 -0
  2. .style.yapf +5 -0
  3. .vscode/settings.json +18 -0
  4. LICENSE +21 -0
  5. README.md +2 -1
  6. app.py +132 -0
  7. requirements.txt +9 -0
  8. style.css +9 -0
.pre-commit-config.yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.2.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: double-quote-string-fixer
12
+ - id: end-of-file-fixer
13
+ - id: mixed-line-ending
14
+ args: ['--fix=lf']
15
+ - id: requirements-txt-fixer
16
+ - id: trailing-whitespace
17
+ - repo: https://github.com/myint/docformatter
18
+ rev: v1.4
19
+ hooks:
20
+ - id: docformatter
21
+ args: ['--in-place']
22
+ - repo: https://github.com/pycqa/isort
23
+ rev: 5.12.0
24
+ hooks:
25
+ - id: isort
26
+ - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v0.991
28
+ hooks:
29
+ - id: mypy
30
+ args: ['--ignore-missing-imports']
31
+ additional_dependencies: ['types-python-slugify']
32
+ - repo: https://github.com/google/yapf
33
+ rev: v0.32.0
34
+ hooks:
35
+ - id: yapf
36
+ args: ['--parallel', '--in-place']
.style.yapf ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [style]
2
+ based_on_style = pep8
3
+ blank_line_before_nested_class_or_def = false
4
+ spaces_before_comment = 2
5
+ split_before_logical_operator = true
.vscode/settings.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "python.linting.enabled": true,
3
+ "python.linting.flake8Enabled": true,
4
+ "python.linting.pylintEnabled": false,
5
+ "python.linting.lintOnSave": true,
6
+ "python.formatting.provider": "yapf",
7
+ "python.formatting.yapfArgs": [
8
+ "--style={based_on_style: pep8, indent_width: 4, blank_line_before_nested_class_or_def: false, spaces_before_comment: 2, split_before_logical_operator: true}"
9
+ ],
10
+ "[python]": {
11
+ "editor.formatOnType": true,
12
+ "editor.codeActionsOnSave": {
13
+ "source.organizeImports": true
14
+ }
15
+ },
16
+ "editor.formatOnSave": true,
17
+ "files.insertFinalNewline": true
18
+ }
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 hysts
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Zeroscope V2 XL
3
  emoji: 🌖
4
  colorFrom: gray
5
  colorTo: purple
@@ -8,6 +8,7 @@ sdk_version: 3.35.2
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Zeroscope V2
3
  emoji: 🌖
4
  colorFrom: gray
5
  colorTo: purple
 
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
+ suggested_hardware: t4-small
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ import random
7
+ import tempfile
8
+
9
+ import gradio as gr
10
+ import imageio
11
+ import numpy as np
12
+ import torch
13
+ from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
14
+
15
+ DESCRIPTION = '# zeroscope v2'
16
+ if not torch.cuda.is_available():
17
+ DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
18
+ if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
19
+ DESCRIPTION += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
20
+
21
+ MAX_NUM_FRAMES = int(os.getenv('MAX_NUM_FRAMES', '200'))
22
+ DEFAULT_NUM_FRAMES = min(MAX_NUM_FRAMES,
23
+ int(os.getenv('DEFAULT_NUM_FRAMES', '24')))
24
+ MAX_SEED = np.iinfo(np.int32).max
25
+
26
+ pipe = DiffusionPipeline.from_pretrained('cerspense/zeroscope_v2_576w',
27
+ torch_dtype=torch.float16)
28
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
29
+ pipe.enable_model_cpu_offload()
30
+ pipe.enable_vae_slicing()
31
+
32
+
33
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
34
+ if randomize_seed:
35
+ seed = random.randint(0, MAX_SEED)
36
+ return seed
37
+
38
+
39
+ def to_video(frames: list[np.ndarray], fps: int) -> str:
40
+ out_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False)
41
+ writer = imageio.get_writer(out_file.name, format='FFMPEG', fps=fps)
42
+ for frame in frames:
43
+ writer.append_data(frame)
44
+ writer.close()
45
+ return out_file.name
46
+
47
+
48
+ def generate(prompt: str, seed: int, num_frames: int,
49
+ num_inference_steps: int) -> str:
50
+ generator = torch.Generator().manual_seed(seed)
51
+ frames = pipe(prompt,
52
+ num_inference_steps=num_inference_steps,
53
+ num_frames=num_frames,
54
+ width=576,
55
+ height=320,
56
+ generator=generator).frames
57
+ return to_video(frames, 8)
58
+
59
+
60
+ examples = [
61
+ ['An astronaut riding a horse', 0, 24, 25],
62
+ ['A panda eating bamboo on a rock', 0, 24, 25],
63
+ ['Spiderman is surfing', 0, 24, 25],
64
+ ]
65
+
66
+ with gr.Blocks(css='style.css') as demo:
67
+ gr.Markdown(DESCRIPTION)
68
+ with gr.Box():
69
+ with gr.Row():
70
+ prompt = gr.Text(label='Prompt',
71
+ show_label=False,
72
+ max_lines=1,
73
+ placeholder='Enter your prompt',
74
+ container=False)
75
+ run_button = gr.Button('Generate video', scale=0)
76
+ result = gr.Video(label='Result', show_label=False)
77
+ with gr.Accordion('Advanced options', open=False):
78
+ seed = gr.Slider(label='Seed',
79
+ minimum=0,
80
+ maximum=MAX_SEED,
81
+ step=1,
82
+ value=0)
83
+ randomize_seed = gr.Checkbox(label='Randomize seed', value=True)
84
+ num_frames = gr.Slider(
85
+ label='Number of frames',
86
+ minimum=24,
87
+ maximum=MAX_NUM_FRAMES,
88
+ step=1,
89
+ value=24,
90
+ info=
91
+ 'Note that the content of the video also changes when you change the number of frames.'
92
+ )
93
+ num_inference_steps = gr.Slider(label='Number of inference steps',
94
+ minimum=10,
95
+ maximum=50,
96
+ step=1,
97
+ value=25)
98
+
99
+ inputs = [
100
+ prompt,
101
+ seed,
102
+ num_frames,
103
+ num_inference_steps,
104
+ ]
105
+ gr.Examples(examples=examples,
106
+ inputs=inputs,
107
+ outputs=result,
108
+ fn=generate,
109
+ cache_examples=os.getenv('CACHE_EXAMPLES') == '1')
110
+
111
+ prompt.submit(
112
+ fn=randomize_seed_fn,
113
+ inputs=[seed, randomize_seed],
114
+ outputs=seed,
115
+ queue=False,
116
+ ).then(
117
+ fn=generate,
118
+ inputs=inputs,
119
+ outputs=result,
120
+ api_name='run',
121
+ )
122
+ run_button.click(
123
+ fn=randomize_seed_fn,
124
+ inputs=[seed, randomize_seed],
125
+ outputs=seed,
126
+ queue=False,
127
+ ).then(
128
+ fn=generate,
129
+ inputs=inputs,
130
+ outputs=result,
131
+ )
132
+ demo.queue(max_size=10).launch()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.20.3
2
+ diffusers==0.17.1
3
+ gradio==3.35.2
4
+ huggingface-hub==0.15.1
5
+ imageio[ffmpeg]==2.31.1
6
+ torch==2.0.1
7
+ torchvision==0.15.2
8
+ transformers==4.30.2
9
+ xformers==0.0.20
style.css ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ }
4
+
5
+ #component-0 {
6
+ max-width: 730px;
7
+ margin: auto;
8
+ padding-top: 1.5rem;
9
+ }