Spaces:
Runtime error
Runtime error
Final code cleanings
Browse files- README.md +460 -10
- app.py +19 -12
- app_canny.py +23 -11
- app_canny_db.py +20 -12
- app_pix2pix_video.py +28 -17
- app_pose.py +21 -12
- app_text_to_video.py +37 -56
- environment.yaml +45 -0
- gradio_utils.py +11 -2
- hf_utils.py +39 -0
- model.py +35 -18
- models/cldm_v15.yaml +0 -81
- models/cldm_v15_no_cf_attn.yaml +0 -81
- models/cldm_v21.yaml +0 -85
- requirements.txt +6 -5
- text_to_video/text_to_video_pipeline.py β text_to_video_pipeline.py +0 -17
README.md
CHANGED
@@ -1,13 +1,463 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
|
|
|
|
11 |
---
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
|
4 |
+
# Text2Video-Zero
|
5 |
+
|
6 |
+
This repository is the official implementation of [Text2Video-Zero](https://arxiv.org/abs/2303.13439).
|
7 |
+
|
8 |
+
|
9 |
+
**[Text2Video-Zero: Text-to-Image Diffusion Models are Zero-Shot Video Generators](https://arxiv.org/abs/2303.13439)**
|
10 |
+
</br>
|
11 |
+
Levon Khachatryan,
|
12 |
+
Andranik Movsisyan,
|
13 |
+
Vahram Tadevosyan,
|
14 |
+
Roberto Henschel,
|
15 |
+
[Zhangyang Wang](https://www.ece.utexas.edu/people/faculty/atlas-wang), Shant Navasardyan, [Humphrey Shi](https://www.humphreyshi.com)
|
16 |
+
</br>
|
17 |
+
|
18 |
+
[Paper](https://arxiv.org/abs/2303.13439) | [Video](https://www.dropbox.com/s/uv90mi2z598olsq/Text2Video-Zero.MP4?dl=0) | [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/PAIR/Text2Video-Zero) | [Project](https://text2video-zero.github.io/)
|
19 |
+
|
20 |
+
|
21 |
+
<p align="center">
|
22 |
+
<img src="__assets__/github/teaser/teaser_final.png" width="800px"/>
|
23 |
+
<br>
|
24 |
+
<em>Our method Text2Video-Zero enables zero-shot video generation using (i) a textual prompt (see rows 1, 2), (ii) a prompt combined with guidance from poses or edges (see lower right), and (iii) Video Instruct-Pix2Pix, i.e., instruction-guided video editing (see lower left).
|
25 |
+
Results are temporally consistent and follow closely the guidance and textual prompts.</em>
|
26 |
+
</p>
|
27 |
+
|
28 |
+
## News
|
29 |
+
|
30 |
+
* [03/23/2023] Paper [Text2Video-Zero](https://arxiv.org/abs/2303.13439) released!
|
31 |
+
* [03/25/2023] The [first version](https://huggingface.co/spaces/PAIR/Text2Video-Zero) of our huggingface demo (containing `zero-shot text-to-video generation` and `Video Instruct Pix2Pix`) released!
|
32 |
+
* [03/27/2023] The [full version](https://huggingface.co/spaces/PAIR/Text2Video-Zero) of our huggingface demo released! Now also included: `text and pose conditional video generation`, `text and canny-edge conditional video generation`, and
|
33 |
+
`text, canny-edge and dreambooth conditional video generation`.
|
34 |
+
* [03/28/2023] Code for all our generation methods released! We added a new low-memory setup. Minimum required GPU VRAM is currently **12 GB**. It will be further reduced in the upcoming releases.
|
35 |
+
* [03/29/2023] Improved [Huggingface demo](https://huggingface.co/spaces/PAIR/Text2Video-Zero)! (i) For text-to-video generation, **any base model for stable diffusion** and **any dreambooth model** hosted on huggingface can now be loaded! (ii) We improved the quality of Video Instruct-Pix2Pix. (iii) We added two longer examples for Video Instruct-Pix2Pix.
|
36 |
+
* [03/30/2023] New code released! It includes all improvements of our latest huggingface iteration. See the news update from `03/29/2023`. In addition, generated videos (text-to-video) can have **arbitrary length**.
|
37 |
+
|
38 |
+
|
39 |
+
## Contribute
|
40 |
+
We are on a journey to democratize AI and empower the creativity of everyone, and we believe Text2Video-Zero is a great research direction to unleash the zero-shot video generation and editing capacity of the amazing text-to-image models!
|
41 |
+
|
42 |
+
To achieve this goal, all contributions are welcome. Please check out these external implementations and extensions of Text2Video-Zero. We thank the authors for their efforts and contributions:
|
43 |
+
* https://github.com/JiauZhang/Text2Video-Zero
|
44 |
+
* https://github.com/camenduru/text2video-zero-colab
|
45 |
+
* https://github.com/SHI-Labs/Text2Video-Zero-sd-webui
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
## Setup
|
50 |
+
|
51 |
+
|
52 |
+
1. Clone this repository and enter:
|
53 |
+
|
54 |
+
```shell
|
55 |
+
git clone https://github.com/Picsart-AI-Research/Text2Video-Zero.git
|
56 |
+
cd Text2Video-Zero/
|
57 |
+
```
|
58 |
+
2. Install requirements using Python 3.9 and CUDA >= 11.6
|
59 |
+
```shell
|
60 |
+
virtualenv --system-site-packages -p python3.9 venv
|
61 |
+
source venv/bin/activate
|
62 |
+
pip install -r requirements.txt
|
63 |
+
```
|
64 |
+
|
65 |
+
|
66 |
+
<!--- Installing [xformers](https://github.com/facebookresearch/xformers) is highly recommended for more efficiency and speed on GPUs.
|
67 |
+
|
68 |
+
### Weights
|
69 |
+
|
70 |
+
#### Text-To-Video with Pose Guidance
|
71 |
+
|
72 |
+
Download the pose model weights used in [ControlNet](https://arxiv.org/abs/2302.05543):
|
73 |
+
```shell
|
74 |
+
wget -P annotator/ckpts https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/hand_pose_model.pth
|
75 |
+
wget -P annotator/ckpts https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/body_pose_model.pth
|
76 |
+
```
|
77 |
+
|
78 |
+
|
79 |
+
<!---
|
80 |
+
#### Text-To-Video
|
81 |
+
Any [Stable Diffusion](https://arxiv.org/abs/2112.10752) v1.4 model weights in huggingface format can be used and must be placed in `models/text-to-video`.
|
82 |
+
For instance:
|
83 |
+
|
84 |
+
```shell
|
85 |
+
git lfs install
|
86 |
+
git clone https://huggingface.co/CompVis/stable-diffusion-v1-4 model_weights
|
87 |
+
mv model_weights models/text-to-video
|
88 |
+
```
|
89 |
+
|
90 |
+
#### Video Instruct-Pix2Pix
|
91 |
+
From [Instruct-Pix2Pix](https://arxiv.org/pdf/2211.09800.pdf) download pretrained model files:
|
92 |
+
```shell
|
93 |
+
git lfs install
|
94 |
+
git clone https://huggingface.co/timbrooks/instruct-pix2pix models/instruct-pix2pix
|
95 |
+
```
|
96 |
+
|
97 |
+
#### Text-To-Video with Pose Guidance
|
98 |
+
From [ControlNet](https://arxiv.org/abs/2302.05543), download the open pose model file:
|
99 |
+
```shell
|
100 |
+
mkdir -p models/control
|
101 |
+
wget -P models/control https://huggingface.co/lllyasviel/ControlNet/resolve/main/models/control_sd15_openpose.pth
|
102 |
+
```
|
103 |
+
#### Text-To-Video with Edge Guidance
|
104 |
+
From [ControlNet](https://arxiv.org/abs/2302.05543), download the Canny edge model file:
|
105 |
+
```shell
|
106 |
+
mkdir -p models/control
|
107 |
+
wget -P models/control https://huggingface.co/lllyasviel/ControlNet/resolve/main/models/control_sd15_canny.pth
|
108 |
+
```
|
109 |
+
|
110 |
+
|
111 |
+
### Weights
|
112 |
+
|
113 |
+
|
114 |
+
#### Text-To-Video with Edge Guidance and Dreambooth
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
We provide already prepared model files derived from CIVITAI for `anime` (keyword `1girl`), `arcane style` (keyword `arcane style`) `avatar` (keyword `avatar style`) and `gta-5 style` (keyword `gtav style`).
|
119 |
+
--->
|
120 |
+
|
121 |
+
<!---
|
122 |
+
To this end, download the model files from [google drive](https://drive.google.com/drive/folders/1uwXNjJ-4Ws6pqyjrIWyVPWu_u4aJrqt8?usp=share_link) and extract them into `models/control_db/`.
|
123 |
+
--->
|
124 |
+
|
125 |
+
|
126 |
+
|
127 |
+
## Inference API
|
128 |
+
|
129 |
+
To run inferences create an instance of `Model` class
|
130 |
+
```python
|
131 |
+
import torch
|
132 |
+
from model import Model
|
133 |
+
|
134 |
+
model = Model(device = "cuda", dtype = torch.float16)
|
135 |
+
```
|
136 |
+
|
137 |
+
---
|
138 |
+
|
139 |
+
|
140 |
+
### Text-To-Video
|
141 |
+
To directly call our text-to-video generator, run this python command which stores the result in `tmp/text2video/A_horse_galloping_on_a_street.mp4` :
|
142 |
+
```python
|
143 |
+
prompt = "A horse galloping on a street"
|
144 |
+
params = {"t0": 44, "t1": 47 , "motion_field_strength_x" : 12, "motion_field_strength_y" : 12, "video_length": 8}
|
145 |
+
|
146 |
+
out_path, fps = f"./text2video_{prompt.replace(' ','_')}.mp4", 4
|
147 |
+
model.process_text2video(prompt, fps = fps, path = out_path, **params)
|
148 |
+
```
|
149 |
+
|
150 |
+
To use a different stable diffusion base model run this python command:
|
151 |
+
```python
|
152 |
+
from hf_utils import get_model_list
|
153 |
+
model_list = get_model_list()
|
154 |
+
for idx, name in enumerate(model_list):
|
155 |
+
print(idx, name)
|
156 |
+
idx = int(input("Select the model by the listed number: ")) # select the model of your choice
|
157 |
+
model.process_text2video(prompt, model_name = model_list[idx], fps = fps, path = out_path, **params)
|
158 |
+
```
|
159 |
+
|
160 |
+
|
161 |
+
#### Hyperparameters (Optional)
|
162 |
+
|
163 |
+
You can define the following hyperparameters:
|
164 |
+
* **Motion field strength**: `motion_field_strength_x` = $\delta_x$ and `motion_field_strength_y` = $\delta_x$ (see our paper, Sect. 3.3.1). Default: `motion_field_strength_x=motion_field_strength_y= 12`.
|
165 |
+
* $T$ and $T'$ (see our paper, Sect. 3.3.1). Define values `t0` and `t1` in the range `{0,...,50}`. Default: `t0=44`, `t1=47` (DDIM steps). Corresponds to timesteps `881` and `941`, respectively.
|
166 |
+
* **Video length**: Define the number of frames `video_length` to be generated. Default: `video_length=8`.
|
167 |
+
|
168 |
+
|
169 |
+
---
|
170 |
+
|
171 |
+
### Text-To-Video with Pose Control
|
172 |
+
To directly call our text-to-video generator with pose control, run this python command:
|
173 |
+
```python
|
174 |
+
prompt = 'an astronaut dancing in outer space'
|
175 |
+
motion_path = '__assets__/poses_skeleton_gifs/dance1_corr.mp4'
|
176 |
+
out_path = f"./text2video_pose_guidance_{prompt.replace(' ','_')}.gif"
|
177 |
+
model.process_controlnet_pose(motion_path, prompt=prompt, save_path=out_path)
|
178 |
+
```
|
179 |
+
|
180 |
+
|
181 |
+
---
|
182 |
+
|
183 |
+
### Text-To-Video with Edge Control
|
184 |
+
To directly call our text-to-video generator with edge control, run this python command:
|
185 |
+
```python
|
186 |
+
prompt = 'oil painting of a deer, a high-quality, detailed, and professional photo'
|
187 |
+
video_path = '__assets__/canny_videos_mp4/deer.mp4'
|
188 |
+
out_path = f'./text2video_edge_guidance_{prompt}.mp4'
|
189 |
+
model.process_controlnet_canny(video_path, prompt=prompt, save_path=out_path)
|
190 |
+
```
|
191 |
+
|
192 |
+
#### Hyperparameters
|
193 |
+
|
194 |
+
You can define the following hyperparameters for Canny edge detection:
|
195 |
+
* **low threshold**. Define value `low_threshold` in the range $(0, 255)$. Default: `low_threshold=100`.
|
196 |
+
* **high threshold**. Define value `high_threshold` in the range $(0, 255)$. Default: `high_threshold=200`. Make sure that `high_threshold` > `low_threshold`.
|
197 |
+
|
198 |
+
You can give hyperparameters as arguments to `model.process_controlnet_canny`
|
199 |
+
|
200 |
+
---
|
201 |
+
|
202 |
+
|
203 |
+
### Text-To-Video with Edge Guidance and Dreambooth specialization
|
204 |
+
Load a dreambooth model then proceed as described in `Text-To-Video with Edge Guidance`
|
205 |
+
```python
|
206 |
+
|
207 |
+
prompt = 'your prompt'
|
208 |
+
video_path = 'path/to/your/video'
|
209 |
+
dreambooth_model_path = 'path/to/your/dreambooth/model'
|
210 |
+
out_path = f'./text2video_edge_db_{prompt}.gif'
|
211 |
+
model.process_controlnet_canny_db(dreambooth_model_path, video_path, prompt=prompt, save_path=out_path)
|
212 |
+
```
|
213 |
+
|
214 |
+
The value `video_path` can be the path to a `mp4` file. To use one of the example videos provided, set `video_path="woman1"`, `video_path="woman2"`, `video_path="woman3"`, or `video_path="man1"`.
|
215 |
+
|
216 |
+
|
217 |
+
The value `dreambooth_model_path` can either be a link to a diffuser model file, or the name of one of the dreambooth models provided. To this end, set `dreambooth_model_path = "Anime DB"`, `dreambooth_model_path = "Avatar DB"`, `dreambooth_model_path = "GTA-5 DB"`, or `dreambooth_model_path = "Arcane DB"`. The corresponding keywords are: `1girl` (for `Anime DB`), `arcane style` (for `Arcane DB`) `avatar style` (for `Avatar DB`) and `gtav style` (for `GTA-5 DB`).
|
218 |
+
|
219 |
+
|
220 |
+
To load custom Dreambooth models, [transfer](https://github.com/lllyasviel/ControlNet/discussions/12) control to the custom model and [convert](https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py) it to diffuser format. Then, the value of `dreambooth_model_path` must link to the folder containing the diffuser file. Dreambooth models can be obtained, for instance, from [CIVITAI](https://civitai.com).
|
221 |
+
|
222 |
+
|
223 |
+
|
224 |
+
---
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
### Video Instruct-Pix2Pix
|
229 |
+
|
230 |
+
To perform pix2pix video editing, run this python command:
|
231 |
+
```python
|
232 |
+
prompt = 'make it Van Gogh Starry Night'
|
233 |
+
video_path = '__assets__/pix2pix video/camel.mp4'
|
234 |
+
out_path = f'./video_instruct_pix2pix_{prompt}.mp4'
|
235 |
+
model.process_pix2pix(video_path, prompt=prompt, save_path=out_path)
|
236 |
+
```
|
237 |
+
|
238 |
+
---
|
239 |
+
|
240 |
+
### Low Memory Inference
|
241 |
+
Each of the above introduced interface can be run in a low memory setup. In the minimal setup, a GPU with **12 GB VRAM** is sufficient.
|
242 |
+
|
243 |
+
To reduce the memory usage, add `chunk_size=k` as additional parameter when calling one of the above defined inference APIs. The integer value `k` must be in the range `{2,...,video_length}`. It defines the number of frames that are processed at once (without any loss in quality). The lower the value the less memory is needed.
|
244 |
+
|
245 |
+
When using the gradio app, set `chunk_size` in the `Advanced options`.
|
246 |
+
|
247 |
+
|
248 |
+
We plan to release soon a new version that further reduces the memory usage.
|
249 |
+
|
250 |
+
|
251 |
---
|
252 |
+
|
253 |
+
|
254 |
+
### Ablation Study
|
255 |
+
To replicate the ablation study, add additional parameters when calling the above defined inference APIs.
|
256 |
+
* To deactivate `cross-frame attention`: Add `use_cf_attn=False` to the parameter list.
|
257 |
+
* To deactivate enriching latent codes with `motion dynamics`: Add `use_motion_field=False` to the parameter list.
|
258 |
+
|
259 |
+
|
260 |
+
Note: Adding `smooth_bg=True` activates background smoothing. However, our code does not include the salient object detector necessary to run that code.
|
261 |
+
|
262 |
+
|
263 |
---
|
264 |
|
265 |
+
## Inference using Gradio
|
266 |
+
From the project root folder, run this shell command:
|
267 |
+
```shell
|
268 |
+
python app.py
|
269 |
+
```
|
270 |
+
|
271 |
+
Then access the app [locally](http://127.0.0.1:7860) with a browser.
|
272 |
+
|
273 |
+
To access the app remotely, run this shell command:
|
274 |
+
```shell
|
275 |
+
python app.py --public_access
|
276 |
+
```
|
277 |
+
For security information about public access we refer to the documentation of [gradio](https://gradio.app/sharing-your-app/#security-and-file-access).
|
278 |
+
|
279 |
+
|
280 |
+
|
281 |
+
## Results
|
282 |
+
|
283 |
+
### Text-To-Video
|
284 |
+
<table class="center">
|
285 |
+
<tr>
|
286 |
+
<td><img src="__assets__/github/results/t2v/cat_running.gif" raw=true></td>
|
287 |
+
<td><img src="__assets__/github/results/t2v/playing.gif"></td>
|
288 |
+
<td><img src="__assets__/github/results/t2v/running.gif"></td>
|
289 |
+
<td><img src="__assets__/github/results/t2v/skii.gif"></td>
|
290 |
+
</tr>
|
291 |
+
<tr>
|
292 |
+
<td width=25% align="center">"A cat is running on the grass"</td>
|
293 |
+
<td width=25% align="center">"A panda is playing guitar on times square"</td>
|
294 |
+
<td width=25% align="center">"A man is running in the snow"</td>
|
295 |
+
<td width=25% align="center">"An astronaut is skiing down the hill"</td>
|
296 |
+
</tr>
|
297 |
+
|
298 |
+
<tr>
|
299 |
+
<td><img src="__assets__/github/results/t2v/panda_surfing.gif" raw=true></td>
|
300 |
+
<td><img src="__assets__/github/results/t2v/bear_dancing.gif"></td>
|
301 |
+
<td><img src="__assets__/github/results/t2v/bicycle.gif"></td>
|
302 |
+
<td><img src="__assets__/github/results/t2v/horse_galloping.gif"></td>
|
303 |
+
</tr>
|
304 |
+
<tr>
|
305 |
+
<td width=25% align="center">"A panda surfing on a wakeboard"</td>
|
306 |
+
<td width=25% align="center">"A bear dancing on times square"</td>
|
307 |
+
<td width=25% align="center">"A man is riding a bicycle in the sunshine"</td>
|
308 |
+
<td width=25% align="center">"A horse galloping on a street"</td>
|
309 |
+
</tr>
|
310 |
+
|
311 |
+
<tr>
|
312 |
+
<td><img src="__assets__/github/results/t2v/tiger_walking.gif" raw=true></td>
|
313 |
+
<td><img src="__assets__/github/results/t2v/panda_surfing_2.gif"></td>
|
314 |
+
<td><img src="__assets__/github/results/t2v/horse_galloping_2.gif"></td>
|
315 |
+
<td><img src="__assets__/github/results/t2v/cat_walking.gif"></td>
|
316 |
+
</tr>
|
317 |
+
<tr>
|
318 |
+
<td width=25% align="center">"A tiger walking alone down the street"</td>
|
319 |
+
<td width=25% align="center">"A panda surfing on a wakeboard"</td>
|
320 |
+
<td width=25% align="center">"A horse galloping on a street"</td>
|
321 |
+
<td width=25% align="center">"A cute cat running in a beatiful meadow"</td>
|
322 |
+
</tr>
|
323 |
+
|
324 |
+
|
325 |
+
<tr>
|
326 |
+
<td><img src="__assets__/github/results/t2v/horse_galloping_3.gif" raw=true></td>
|
327 |
+
<td><img src="__assets__/github/results/t2v/panda_walking.gif"></td>
|
328 |
+
<td><img src="__assets__/github/results/t2v/dog_walking.gif"></td>
|
329 |
+
<td><img src="__assets__/github/results/t2v/astronaut.gif"></td>
|
330 |
+
</tr>
|
331 |
+
<tr>
|
332 |
+
<td width=25% align="center">"A horse galloping on a street"</td>
|
333 |
+
<td width=25% align="center">"A panda walking alone down the street"</td>
|
334 |
+
<td width=25% align="center">"A dog is walking down the street"</td>
|
335 |
+
<td width=25% align="center">"An astronaut is waving his hands on the moon"</td>
|
336 |
+
</tr>
|
337 |
+
|
338 |
+
|
339 |
+
</table>
|
340 |
+
|
341 |
+
### Text-To-Video with Pose Guidance
|
342 |
+
|
343 |
+
|
344 |
+
<table class="center">
|
345 |
+
<tr>
|
346 |
+
<td><img src="__assets__/github/results/pose2v/img_bot_left.gif" raw=true><img src="__assets__/github/results/pose2v/pose_bot_left.gif"></td>
|
347 |
+
<td><img src="__assets__/github/results/pose2v/img_bot_right.gif" raw=true><img src="__assets__/github/results/pose2v/pose_bot_right.gif"></td>
|
348 |
+
<td><img src="__assets__/github/results/pose2v/img_top_left.gif" raw=true><img src="__assets__/github/results/pose2v/pose_top_left.gif"></td>
|
349 |
+
<td><img src="__assets__/github/results/pose2v/img_top_right.gif" raw=true><img src="__assets__/github/results/pose2v/pose_top_right.gif"></td>
|
350 |
+
</tr>
|
351 |
+
<tr>
|
352 |
+
<td width=25% align="center">"A bear dancing on the concrete"</td>
|
353 |
+
<td width=25% align="center">"An alien dancing under a flying saucer"</td>
|
354 |
+
<td width=25% align="center">"A panda dancing in Antarctica"</td>
|
355 |
+
<td width=25% align="center">"An astronaut dancing in the outer space"</td>
|
356 |
+
|
357 |
+
</tr>
|
358 |
+
</table>
|
359 |
+
|
360 |
+
### Text-To-Video with Edge Guidance
|
361 |
+
|
362 |
+
|
363 |
+
|
364 |
+
<table class="center">
|
365 |
+
<tr>
|
366 |
+
<td><img src="__assets__/github/results/edge2v/butterfly.gif" raw=true><img src="__assets__/github/results/edge2v/butterfly_edge.gif"></td>
|
367 |
+
<td><img src="__assets__/github/results/edge2v/head.gif" raw=true><img src="__assets__/github/results/edge2v/head_edge.gif"></td>
|
368 |
+
<td><img src="__assets__/github/results/edge2v/jelly.gif" raw=true><img src="__assets__/github/results/edge2v/jelly_edge.gif"></td>
|
369 |
+
<td><img src="__assets__/github/results/edge2v/mask.gif" raw=true><img src="__assets__/github/results/edge2v/mask_edge.gif"></td>
|
370 |
+
</tr>
|
371 |
+
<tr>
|
372 |
+
<td width=25% align="center">"White butterfly"</td>
|
373 |
+
<td width=25% align="center">"Beautiful girl"</td>
|
374 |
+
<td width=25% align="center">"A jellyfish"</td>
|
375 |
+
<td width=25% align="center">"beautiful girl halloween style"</td>
|
376 |
+
</tr>
|
377 |
+
|
378 |
+
<tr>
|
379 |
+
<td><img src="__assets__/github/results/edge2v/fox.gif" raw=true><img src="__assets__/github/results/edge2v/fix_edge.gif"></td>
|
380 |
+
<td><img src="__assets__/github/results/edge2v/head_2.gif" raw=true><img src="__assets__/github/results/edge2v/head_2_edge.gif"></td>
|
381 |
+
<td><img src="__assets__/github/results/edge2v/santa.gif" raw=true><img src="__assets__/github/results/edge2v/santa_edge.gif"></td>
|
382 |
+
<td><img src="__assets__/github/results/edge2v/dear.gif" raw=true><img src="__assets__/github/results/edge2v/dear_edge.gif"></td>
|
383 |
+
</tr>
|
384 |
+
<tr>
|
385 |
+
<td width=25% align="center">"Wild fox is walking"</td>
|
386 |
+
<td width=25% align="center">"Oil painting of a beautiful girl close-up"</td>
|
387 |
+
<td width=25% align="center">"A santa claus"</td>
|
388 |
+
<td width=25% align="center">"A deer"</td>
|
389 |
+
</tr>
|
390 |
+
|
391 |
+
</table>
|
392 |
+
|
393 |
+
|
394 |
+
### Text-To-Video with Edge Guidance and Dreambooth specialization
|
395 |
+
|
396 |
+
|
397 |
+
|
398 |
+
|
399 |
+
<table class="center">
|
400 |
+
<tr>
|
401 |
+
<td><img src="__assets__/github/results/canny_db/anime_style.gif" raw=true><img src="__assets__/github/results/canny_db/anime_edge.gif"></td>
|
402 |
+
<td><img src="__assets__/github/results/canny_db/arcane_style.gif" raw=true><img src="__assets__/github/results/canny_db/arcane_edge.gif"></td>
|
403 |
+
<td><img src="__assets__/github/results/canny_db/gta-5_man_style.gif" raw=true><img src="__assets__/github/results/canny_db/gta-5_man_edge.gif"></td>
|
404 |
+
<td><img src="__assets__/github/results/canny_db/img_bot_right.gif" raw=true><img src="__assets__/github/results/canny_db/edge_bot_right.gif"></td>
|
405 |
+
</tr>
|
406 |
+
<tr>
|
407 |
+
<td width=25% align="center">"anime style"</td>
|
408 |
+
<td width=25% align="center">"arcane style"</td>
|
409 |
+
<td width=25% align="center">"gta-5 man"</td>
|
410 |
+
<td width=25% align="center">"avatar style"</td>
|
411 |
+
</tr>
|
412 |
+
|
413 |
+
</table>
|
414 |
+
|
415 |
+
|
416 |
+
## Video Instruct Pix2Pix
|
417 |
+
|
418 |
+
<table class="center">
|
419 |
+
<tr>
|
420 |
+
<td><img src="__assets__/github/results/Video_InstructPix2Pix/frame_1/up_left.gif" raw=true><img src="__assets__/github/results/Video_InstructPix2Pix/frame_1/bot_left.gif"></td>
|
421 |
+
<td><img src="__assets__/github/results/Video_InstructPix2Pix/frame_1/up_mid.gif" raw=true><img src="__assets__/github/results/Video_InstructPix2Pix/frame_1/bot_mid.gif"></td>
|
422 |
+
<td><img src="__assets__/github/results/Video_InstructPix2Pix/frame_1/up_right.gif" raw=true><img src="__assets__/github/results/Video_InstructPix2Pix/frame_1/bot_right.gif"></td>
|
423 |
+
</tr>
|
424 |
+
<tr>
|
425 |
+
<td width=25% align="center">"Replace man with chimpanze"</td>
|
426 |
+
<td width=25% align="center">"Make it Van Gogh Starry Night style"</td>
|
427 |
+
<td width=25% align="center">"Make it Picasso style"</td>
|
428 |
+
</tr>
|
429 |
+
|
430 |
+
<tr>
|
431 |
+
<td><img src="__assets__/github/results/Video_InstructPix2Pix/frame_2/up_left.gif" raw=true><img src="__assets__/github/results/Video_InstructPix2Pix/frame_2/bot_left.gif"></td>
|
432 |
+
<td><img src="__assets__/github/results/Video_InstructPix2Pix/frame_2/up_mid.gif" raw=true><img src="__assets__/github/results/Video_InstructPix2Pix/frame_2/bot_mid.gif"></td>
|
433 |
+
<td><img src="__assets__/github/results/Video_InstructPix2Pix/frame_2/up_right.gif" raw=true><img src="__assets__/github/results/Video_InstructPix2Pix/frame_2/bot_right.gif"></td>
|
434 |
+
</tr>
|
435 |
+
<tr>
|
436 |
+
<td width=25% align="center">"Make it Expressionism style"</td>
|
437 |
+
<td width=25% align="center">"Make it night"</td>
|
438 |
+
<td width=25% align="center">"Make it autumn"</td>
|
439 |
+
</tr>
|
440 |
+
</table>
|
441 |
+
|
442 |
+
|
443 |
+
## Related Links
|
444 |
+
|
445 |
+
* [High-Resolution Image Synthesis with Latent Diffusion Models (a.k.a. LDM & Stable Diffusion)](https://ommer-lab.com/research/latent-diffusion-models/)
|
446 |
+
* [InstructPix2Pix: Learning to Follow Image Editing Instructions](https://www.timothybrooks.com/instruct-pix2pix/)
|
447 |
+
* [Adding Conditional Control to Text-to-Image Diffusion Models (a.k.a ControlNet)](https://github.com/lllyasviel/ControlNet)
|
448 |
+
* [Diffusers](https://github.com/huggingface/diffusers)
|
449 |
+
|
450 |
+
## License
|
451 |
+
Our code is published under the CreativeML Open RAIL-M license. The license provided in this repository applies to all additions and contributions we make upon the original stable diffusion code. The original stable diffusion code is under the CreativeML Open RAIL-M license, which can found [here](https://github.com/CompVis/stable-diffusion/blob/main/LICENSE).
|
452 |
+
|
453 |
+
|
454 |
+
## BibTeX
|
455 |
+
If you use our work in your research, please cite our publication:
|
456 |
+
```
|
457 |
+
@article{text2video-zero,
|
458 |
+
title={Text2Video-Zero: Text-to-Image Diffusion Models are Zero-Shot Video Generators},
|
459 |
+
author={Khachatryan, Levon and Movsisyan, Andranik and Tadevosyan, Vahram and Henschel, Roberto and Wang, Zhangyang and Navasardyan, Shant and Shi, Humphrey},
|
460 |
+
journal={arXiv preprint arXiv:2303.13439},
|
461 |
+
year={2023}
|
462 |
+
}
|
463 |
+
```
|
app.py
CHANGED
@@ -2,15 +2,21 @@ import gradio as gr
|
|
2 |
import torch
|
3 |
|
4 |
from model import Model, ModelType
|
5 |
-
|
6 |
from app_canny import create_demo as create_demo_canny
|
7 |
from app_pose import create_demo as create_demo_pose
|
8 |
from app_text_to_video import create_demo as create_demo_text_to_video
|
9 |
from app_pix2pix_video import create_demo as create_demo_pix2pix_video
|
10 |
from app_canny_db import create_demo as create_demo_canny_db
|
|
|
|
|
11 |
|
12 |
-
|
13 |
model = Model(device='cuda', dtype=torch.float16)
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
with gr.Blocks(css='style.css') as demo:
|
16 |
gr.HTML(
|
@@ -38,13 +44,13 @@ with gr.Blocks(css='style.css') as demo:
|
|
38 |
</div>
|
39 |
""")
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
|
49 |
with gr.Tab('Zero-Shot Text2Video'):
|
50 |
create_demo_text_to_video(model)
|
@@ -78,6 +84,7 @@ with gr.Blocks(css='style.css') as demo:
|
|
78 |
</div>
|
79 |
""")
|
80 |
|
81 |
-
|
82 |
-
demo.
|
83 |
-
|
|
|
|
2 |
import torch
|
3 |
|
4 |
from model import Model, ModelType
|
|
|
5 |
from app_canny import create_demo as create_demo_canny
|
6 |
from app_pose import create_demo as create_demo_pose
|
7 |
from app_text_to_video import create_demo as create_demo_text_to_video
|
8 |
from app_pix2pix_video import create_demo as create_demo_pix2pix_video
|
9 |
from app_canny_db import create_demo as create_demo_canny_db
|
10 |
+
import argparse
|
11 |
+
import os
|
12 |
|
13 |
+
on_huggingspace = os.environ.get("SPACE_AUTHOR_NAME") == "PAIR"
|
14 |
model = Model(device='cuda', dtype=torch.float16)
|
15 |
+
parser = argparse.ArgumentParser()
|
16 |
+
parser.add_argument('--public_access', action='store_true',
|
17 |
+
help="if enabled, the app can be access from a public url", default=False)
|
18 |
+
args = parser.parse_args()
|
19 |
+
|
20 |
|
21 |
with gr.Blocks(css='style.css') as demo:
|
22 |
gr.HTML(
|
|
|
44 |
</div>
|
45 |
""")
|
46 |
|
47 |
+
if on_huggingspace:
|
48 |
+
gr.HTML("""
|
49 |
+
<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
|
50 |
+
<br/>
|
51 |
+
<a href="https://huggingface.co/spaces/PAIR/Text2Video-Zero?duplicate=true">
|
52 |
+
<img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
|
53 |
+
</p>""")
|
54 |
|
55 |
with gr.Tab('Zero-Shot Text2Video'):
|
56 |
create_demo_text_to_video(model)
|
|
|
84 |
</div>
|
85 |
""")
|
86 |
|
87 |
+
|
88 |
+
_, _, link = demo.queue(api_open=False).launch(
|
89 |
+
file_directories=['temporal'], share=args.public_access or on_huggingspace)
|
90 |
+
print(link)
|
app_canny.py
CHANGED
@@ -1,16 +1,26 @@
|
|
1 |
import gradio as gr
|
2 |
from model import Model
|
|
|
|
|
|
|
3 |
|
4 |
def create_demo(model: Model):
|
5 |
|
6 |
examples = [
|
7 |
-
["__assets__/canny_videos_edge_2fps/butterfly.mp4",
|
8 |
-
|
9 |
-
["__assets__/canny_videos_edge_2fps/
|
10 |
-
|
11 |
-
["__assets__/canny_videos_edge_2fps/
|
12 |
-
|
13 |
-
["__assets__/canny_videos_edge_2fps/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
]
|
15 |
|
16 |
with gr.Blocks() as demo:
|
@@ -21,7 +31,7 @@ def create_demo(model: Model):
|
|
21 |
"""
|
22 |
<div style="text-align: left; auto;">
|
23 |
<h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
|
24 |
-
Description: For performance purposes, our current preview release supports any input videos but caps output videos
|
25 |
</h3>
|
26 |
</div>
|
27 |
""")
|
@@ -34,8 +44,10 @@ def create_demo(model: Model):
|
|
34 |
prompt = gr.Textbox(label='Prompt')
|
35 |
run_button = gr.Button(label='Run')
|
36 |
with gr.Accordion('Advanced options', open=False):
|
37 |
-
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
38 |
-
|
|
|
|
|
39 |
with gr.Column():
|
40 |
result = gr.Video(label="Generated Video").style(height="auto")
|
41 |
|
@@ -50,7 +62,7 @@ def create_demo(model: Model):
|
|
50 |
inputs=inputs,
|
51 |
outputs=result,
|
52 |
fn=model.process_controlnet_canny,
|
53 |
-
cache_examples
|
54 |
run_on_click=False,
|
55 |
)
|
56 |
|
|
|
1 |
import gradio as gr
|
2 |
from model import Model
|
3 |
+
import os
|
4 |
+
on_huggingspace = os.environ.get("SPACE_AUTHOR_NAME") == "PAIR"
|
5 |
+
|
6 |
|
7 |
def create_demo(model: Model):
|
8 |
|
9 |
examples = [
|
10 |
+
["__assets__/canny_videos_edge_2fps/butterfly.mp4",
|
11 |
+
"white butterfly, a high-quality, detailed, and professional photo"],
|
12 |
+
["__assets__/canny_videos_edge_2fps/deer.mp4",
|
13 |
+
"oil painting of a deer, a high-quality, detailed, and professional photo"],
|
14 |
+
["__assets__/canny_videos_edge_2fps/fox.mp4",
|
15 |
+
"wild red fox is walking on the grass, a high-quality, detailed, and professional photo"],
|
16 |
+
["__assets__/canny_videos_edge_2fps/girl_dancing.mp4",
|
17 |
+
"oil painting of a girl dancing close-up, masterpiece, a high-quality, detailed, and professional photo"],
|
18 |
+
["__assets__/canny_videos_edge_2fps/girl_turning.mp4",
|
19 |
+
"oil painting of a beautiful girl, a high-quality, detailed, and professional photo"],
|
20 |
+
["__assets__/canny_videos_edge_2fps/halloween.mp4",
|
21 |
+
"beautiful girl halloween style, a high-quality, detailed, and professional photo"],
|
22 |
+
["__assets__/canny_videos_edge_2fps/santa.mp4",
|
23 |
+
"a santa claus, a high-quality, detailed, and professional photo"],
|
24 |
]
|
25 |
|
26 |
with gr.Blocks() as demo:
|
|
|
31 |
"""
|
32 |
<div style="text-align: left; auto;">
|
33 |
<h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
|
34 |
+
Description: For performance purposes, our current preview release supports any input videos but caps output videos after 80 frames and the input videos are scaled down before processing.
|
35 |
</h3>
|
36 |
</div>
|
37 |
""")
|
|
|
44 |
prompt = gr.Textbox(label='Prompt')
|
45 |
run_button = gr.Button(label='Run')
|
46 |
with gr.Accordion('Advanced options', open=False):
|
47 |
+
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
48 |
+
"None"], label="Watermark", value='Picsart AI Research')
|
49 |
+
chunk_size = gr.Slider(
|
50 |
+
label="Chunk size", minimum=2, maximum=16, value=12 if on_huggingspace else 8, step=1, visible=not on_huggingspace)
|
51 |
with gr.Column():
|
52 |
result = gr.Video(label="Generated Video").style(height="auto")
|
53 |
|
|
|
62 |
inputs=inputs,
|
63 |
outputs=result,
|
64 |
fn=model.process_controlnet_canny,
|
65 |
+
cache_examples=on_huggingspace,
|
66 |
run_on_click=False,
|
67 |
)
|
68 |
|
app_canny_db.py
CHANGED
@@ -1,6 +1,9 @@
|
|
1 |
import gradio as gr
|
2 |
from model import Model
|
3 |
import gradio_utils
|
|
|
|
|
|
|
4 |
|
5 |
examples = [
|
6 |
['Anime DB', "woman1", "Portrait of detailed 1girl, feminine, soldier cinematic shot on canon 5d ultra realistic skin intricate clothes accurate hands Rory Lewis Artgerm WLOP Jeremy Lipking Jane Ansell studio lighting"],
|
@@ -25,7 +28,8 @@ def create_demo(model: Model):
|
|
25 |
|
26 |
with gr.Blocks() as demo:
|
27 |
with gr.Row():
|
28 |
-
gr.Markdown(
|
|
|
29 |
with gr.Row():
|
30 |
gr.HTML(
|
31 |
"""
|
@@ -35,7 +39,6 @@ def create_demo(model: Model):
|
|
35 |
</h3>
|
36 |
</div>
|
37 |
""")
|
38 |
-
|
39 |
with gr.Row():
|
40 |
with gr.Column():
|
41 |
# input_video_path = gr.Video(source='upload', format="mp4", visible=False)
|
@@ -45,25 +48,30 @@ def create_demo(model: Model):
|
|
45 |
prompt = gr.Textbox(label='Prompt')
|
46 |
run_button = gr.Button(label='Run')
|
47 |
with gr.Accordion('Advanced options', open=False):
|
48 |
-
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
49 |
-
|
|
|
|
|
50 |
with gr.Column():
|
51 |
result = gr.Image(label="Generated Video").style(height=400)
|
52 |
|
53 |
with gr.Row():
|
54 |
-
gallery_db = gr.Gallery(label="Db models", value=[('__assets__/db_files/anime.jpg', "anime"), ('__assets__/db_files/arcane.jpg', "Arcane"), (
|
|
|
55 |
with gr.Row():
|
56 |
-
gallery_canny = gr.Gallery(label="Motions", value=[('__assets__/db_files/woman1.gif', "woman1"), ('__assets__/db_files/woman2.gif', "woman2"), (
|
57 |
-
|
58 |
|
59 |
db_selection = gr.Textbox(label="DB Model", visible=False)
|
60 |
-
canny_selection = gr.Textbox(
|
|
|
61 |
|
62 |
gallery_db.select(load_db_model, None, db_selection)
|
63 |
gallery_canny.select(canny_select, None, canny_selection)
|
64 |
|
65 |
-
db_selection.change(on_db_selection_update,None,db_text_field)
|
66 |
-
canny_selection.change(on_canny_selection_update,
|
|
|
67 |
|
68 |
inputs = [
|
69 |
db_selection,
|
@@ -77,7 +85,7 @@ def create_demo(model: Model):
|
|
77 |
inputs=inputs,
|
78 |
outputs=result,
|
79 |
fn=model.process_controlnet_canny_db,
|
80 |
-
cache_examples
|
81 |
)
|
82 |
|
83 |
run_button.click(fn=model.process_controlnet_canny_db,
|
@@ -86,7 +94,7 @@ def create_demo(model: Model):
|
|
86 |
return demo
|
87 |
|
88 |
|
89 |
-
def on_db_selection_update(evt
|
90 |
|
91 |
return f"DB model: **{evt._data}**"
|
92 |
|
|
|
1 |
import gradio as gr
|
2 |
from model import Model
|
3 |
import gradio_utils
|
4 |
+
import os
|
5 |
+
on_huggingspace = os.environ.get("SPACE_AUTHOR_NAME") == "PAIR"
|
6 |
+
|
7 |
|
8 |
examples = [
|
9 |
['Anime DB', "woman1", "Portrait of detailed 1girl, feminine, soldier cinematic shot on canon 5d ultra realistic skin intricate clothes accurate hands Rory Lewis Artgerm WLOP Jeremy Lipking Jane Ansell studio lighting"],
|
|
|
28 |
|
29 |
with gr.Blocks() as demo:
|
30 |
with gr.Row():
|
31 |
+
gr.Markdown(
|
32 |
+
'## Text, Canny-Edge and DreamBooth Conditional Video Generation')
|
33 |
with gr.Row():
|
34 |
gr.HTML(
|
35 |
"""
|
|
|
39 |
</h3>
|
40 |
</div>
|
41 |
""")
|
|
|
42 |
with gr.Row():
|
43 |
with gr.Column():
|
44 |
# input_video_path = gr.Video(source='upload', format="mp4", visible=False)
|
|
|
48 |
prompt = gr.Textbox(label='Prompt')
|
49 |
run_button = gr.Button(label='Run')
|
50 |
with gr.Accordion('Advanced options', open=False):
|
51 |
+
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
52 |
+
"None"], label="Watermark", value='Picsart AI Research')
|
53 |
+
chunk_size = gr.Slider(
|
54 |
+
label="Chunk size", minimum=2, maximum=16, value=12 if on_huggingspace else 8, step=1, visible=not on_huggingspace)
|
55 |
with gr.Column():
|
56 |
result = gr.Image(label="Generated Video").style(height=400)
|
57 |
|
58 |
with gr.Row():
|
59 |
+
gallery_db = gr.Gallery(label="Db models", value=[('__assets__/db_files/anime.jpg', "anime"), ('__assets__/db_files/arcane.jpg', "Arcane"), (
|
60 |
+
'__assets__/db_files/gta.jpg', "GTA-5 (Man)"), ('__assets__/db_files/avatar.jpg', "Avatar DB")]).style(grid=[4], height=50)
|
61 |
with gr.Row():
|
62 |
+
gallery_canny = gr.Gallery(label="Motions", value=[('__assets__/db_files/woman1.gif', "woman1"), ('__assets__/db_files/woman2.gif', "woman2"), (
|
63 |
+
'__assets__/db_files/man1.gif', "man1"), ('__assets__/db_files/woman3.gif', "woman3")]).style(grid=[4], height=50)
|
64 |
|
65 |
db_selection = gr.Textbox(label="DB Model", visible=False)
|
66 |
+
canny_selection = gr.Textbox(
|
67 |
+
label="One of the above defined motions", visible=False)
|
68 |
|
69 |
gallery_db.select(load_db_model, None, db_selection)
|
70 |
gallery_canny.select(canny_select, None, canny_selection)
|
71 |
|
72 |
+
db_selection.change(on_db_selection_update, None, db_text_field)
|
73 |
+
canny_selection.change(on_canny_selection_update,
|
74 |
+
None, canny_text_field)
|
75 |
|
76 |
inputs = [
|
77 |
db_selection,
|
|
|
85 |
inputs=inputs,
|
86 |
outputs=result,
|
87 |
fn=model.process_controlnet_canny_db,
|
88 |
+
cache_examples=on_huggingspace,
|
89 |
)
|
90 |
|
91 |
run_button.click(fn=model.process_controlnet_canny_db,
|
|
|
94 |
return demo
|
95 |
|
96 |
|
97 |
+
def on_db_selection_update(evt: gr.EventData):
|
98 |
|
99 |
return f"DB model: **{evt._data}**"
|
100 |
|
app_pix2pix_video.py
CHANGED
@@ -1,15 +1,23 @@
|
|
1 |
import gradio as gr
|
2 |
from model import Model
|
|
|
|
|
3 |
|
4 |
|
5 |
def create_demo(model: Model):
|
6 |
examples = [
|
7 |
-
['__assets__/pix2pix_video_2fps/camel.mp4',
|
8 |
-
|
9 |
-
['__assets__/pix2pix_video_2fps/
|
10 |
-
|
11 |
-
['__assets__/pix2pix_video_2fps/
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
]
|
14 |
with gr.Blocks() as demo:
|
15 |
with gr.Row():
|
@@ -19,19 +27,21 @@ def create_demo(model: Model):
|
|
19 |
"""
|
20 |
<div style="text-align: left; auto;">
|
21 |
<h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
|
22 |
-
Description: For performance purposes, our current preview release supports any input videos but caps output videos
|
23 |
</h3>
|
24 |
</div>
|
25 |
""")
|
26 |
|
27 |
with gr.Row():
|
28 |
with gr.Column():
|
29 |
-
input_image = gr.Video(label="Input Video",source='upload',
|
|
|
30 |
with gr.Column():
|
31 |
prompt = gr.Textbox(label='Prompt')
|
32 |
run_button = gr.Button(label='Run')
|
33 |
with gr.Accordion('Advanced options', open=False):
|
34 |
-
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
|
|
35 |
image_resolution = gr.Slider(label='Image Resolution',
|
36 |
minimum=256,
|
37 |
maximum=1024,
|
@@ -43,18 +53,18 @@ def create_demo(model: Model):
|
|
43 |
value=0,
|
44 |
step=1)
|
45 |
image_guidance = gr.Slider(label='Image guidance scale',
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
start_t = gr.Slider(label='Starting time in seconds',
|
51 |
minimum=0,
|
52 |
-
maximum=
|
53 |
value=0,
|
54 |
step=1)
|
55 |
end_t = gr.Slider(label='End time in seconds (-1 corresponds to uploaded video duration)',
|
56 |
minimum=0,
|
57 |
-
maximum=
|
58 |
value=-1,
|
59 |
step=1)
|
60 |
out_fps = gr.Slider(label='Output video fps (-1 corresponds to uploaded video fps)',
|
@@ -62,7 +72,8 @@ def create_demo(model: Model):
|
|
62 |
maximum=30,
|
63 |
value=-1,
|
64 |
step=1)
|
65 |
-
chunk_size = gr.Slider(
|
|
|
66 |
with gr.Column():
|
67 |
result = gr.Video(label='Output', show_label=True)
|
68 |
inputs = [
|
@@ -82,7 +93,7 @@ def create_demo(model: Model):
|
|
82 |
inputs=inputs,
|
83 |
outputs=result,
|
84 |
fn=model.process_pix2pix,
|
85 |
-
cache_examples=
|
86 |
run_on_click=False,
|
87 |
)
|
88 |
|
|
|
1 |
import gradio as gr
|
2 |
from model import Model
|
3 |
+
import os
|
4 |
+
on_huggingspace = os.environ.get("SPACE_AUTHOR_NAME") == "PAIR"
|
5 |
|
6 |
|
7 |
def create_demo(model: Model):
|
8 |
examples = [
|
9 |
+
['__assets__/pix2pix_video_2fps/camel.mp4',
|
10 |
+
'make it Van Gogh Starry Night style', 512, 0, 1.0],
|
11 |
+
['__assets__/pix2pix_video_2fps/mini-cooper.mp4',
|
12 |
+
'make it Picasso style', 512, 0, 1.5],
|
13 |
+
['__assets__/pix2pix_video_2fps/snowboard.mp4',
|
14 |
+
'replace man with robot', 512, 0, 1.0],
|
15 |
+
['__assets__/pix2pix_video_2fps/white-swan.mp4',
|
16 |
+
'replace swan with mallard', 512, 0, 1.5],
|
17 |
+
['__assets__/pix2pix_video_2fps/boat.mp4',
|
18 |
+
'add city skyline in the background', 512, 0, 1.5],
|
19 |
+
['__assets__/pix2pix_video_2fps/ballet.mp4',
|
20 |
+
'make her a golden sculpture', 512, 0, 1.0],
|
21 |
]
|
22 |
with gr.Blocks() as demo:
|
23 |
with gr.Row():
|
|
|
27 |
"""
|
28 |
<div style="text-align: left; auto;">
|
29 |
<h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
|
30 |
+
Description: For performance purposes, our current preview release supports any input videos but caps output videos after 80 frames and the input videos are scaled down before processing. For faster inference you can choose lower output frames per seconds from Advanced Options.
|
31 |
</h3>
|
32 |
</div>
|
33 |
""")
|
34 |
|
35 |
with gr.Row():
|
36 |
with gr.Column():
|
37 |
+
input_image = gr.Video(label="Input Video", source='upload',
|
38 |
+
type='numpy', format="mp4", visible=True).style(height="auto")
|
39 |
with gr.Column():
|
40 |
prompt = gr.Textbox(label='Prompt')
|
41 |
run_button = gr.Button(label='Run')
|
42 |
with gr.Accordion('Advanced options', open=False):
|
43 |
+
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
44 |
+
"None"], label="Watermark", value='Picsart AI Research')
|
45 |
image_resolution = gr.Slider(label='Image Resolution',
|
46 |
minimum=256,
|
47 |
maximum=1024,
|
|
|
53 |
value=0,
|
54 |
step=1)
|
55 |
image_guidance = gr.Slider(label='Image guidance scale',
|
56 |
+
minimum=0.5,
|
57 |
+
maximum=2,
|
58 |
+
value=1.0,
|
59 |
+
step=0.1)
|
60 |
start_t = gr.Slider(label='Starting time in seconds',
|
61 |
minimum=0,
|
62 |
+
maximum=10,
|
63 |
value=0,
|
64 |
step=1)
|
65 |
end_t = gr.Slider(label='End time in seconds (-1 corresponds to uploaded video duration)',
|
66 |
minimum=0,
|
67 |
+
maximum=10,
|
68 |
value=-1,
|
69 |
step=1)
|
70 |
out_fps = gr.Slider(label='Output video fps (-1 corresponds to uploaded video fps)',
|
|
|
72 |
maximum=30,
|
73 |
value=-1,
|
74 |
step=1)
|
75 |
+
chunk_size = gr.Slider(
|
76 |
+
label="Chunk size", minimum=2, maximum=16, value=12 if on_huggingspace else 8, step=1, visible=not on_huggingspace)
|
77 |
with gr.Column():
|
78 |
result = gr.Video(label='Output', show_label=True)
|
79 |
inputs = [
|
|
|
93 |
inputs=inputs,
|
94 |
outputs=result,
|
95 |
fn=model.process_pix2pix,
|
96 |
+
cache_examples=on_huggingspace,
|
97 |
run_on_click=False,
|
98 |
)
|
99 |
|
app_pose.py
CHANGED
@@ -1,7 +1,7 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
-
|
4 |
-
from model import Model
|
5 |
|
6 |
examples = [
|
7 |
['Motion 1', "An astronaut dancing in the outer space"],
|
@@ -11,29 +11,38 @@ examples = [
|
|
11 |
['Motion 5', "An astronaut dancing in the outer space"],
|
12 |
]
|
13 |
|
|
|
14 |
def create_demo(model: Model):
|
15 |
with gr.Blocks() as demo:
|
16 |
with gr.Row():
|
17 |
gr.Markdown('## Text and Pose Conditional Video Generation')
|
18 |
|
19 |
with gr.Row():
|
20 |
-
gr.Markdown(
|
|
|
21 |
with gr.Column():
|
22 |
-
gallery_pose_sequence = gr.Gallery(label="Pose Sequence", value=[('__assets__/poses_skeleton_gifs/dance1.gif', "Motion 1"), ('__assets__/poses_skeleton_gifs/dance2.gif', "Motion 2"), (
|
23 |
-
|
|
|
|
|
24 |
gr.Markdown("## Selection")
|
25 |
-
pose_sequence_selector = gr.Markdown(
|
|
|
26 |
with gr.Column():
|
27 |
prompt = gr.Textbox(label='Prompt')
|
28 |
run_button = gr.Button(label='Run')
|
29 |
with gr.Accordion('Advanced options', open=False):
|
30 |
-
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
31 |
-
|
|
|
|
|
32 |
with gr.Column():
|
33 |
result = gr.Image(label="Generated Video")
|
34 |
|
35 |
-
input_video_path.change(on_video_path_update,
|
36 |
-
|
|
|
|
|
37 |
inputs = [
|
38 |
input_video_path,
|
39 |
prompt,
|
@@ -45,7 +54,7 @@ def create_demo(model: Model):
|
|
45 |
inputs=inputs,
|
46 |
outputs=result,
|
47 |
fn=model.process_controlnet_pose,
|
48 |
-
cache_examples
|
49 |
run_on_click=False,
|
50 |
)
|
51 |
|
@@ -61,4 +70,4 @@ def on_video_path_update(evt: gr.EventData):
|
|
61 |
|
62 |
|
63 |
def pose_gallery_callback(evt: gr.SelectData):
|
64 |
-
return f"Motion {evt.index+1}"
|
|
|
1 |
+
from model import Model
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
+
on_huggingspace = os.environ.get("SPACE_AUTHOR_NAME") == "PAIR"
|
|
|
5 |
|
6 |
examples = [
|
7 |
['Motion 1', "An astronaut dancing in the outer space"],
|
|
|
11 |
['Motion 5', "An astronaut dancing in the outer space"],
|
12 |
]
|
13 |
|
14 |
+
|
15 |
def create_demo(model: Model):
|
16 |
with gr.Blocks() as demo:
|
17 |
with gr.Row():
|
18 |
gr.Markdown('## Text and Pose Conditional Video Generation')
|
19 |
|
20 |
with gr.Row():
|
21 |
+
gr.Markdown(
|
22 |
+
'Selection: **one motion** and a **prompt**, or use the examples below.')
|
23 |
with gr.Column():
|
24 |
+
gallery_pose_sequence = gr.Gallery(label="Pose Sequence", value=[('__assets__/poses_skeleton_gifs/dance1.gif', "Motion 1"), ('__assets__/poses_skeleton_gifs/dance2.gif', "Motion 2"), (
|
25 |
+
'__assets__/poses_skeleton_gifs/dance3.gif', "Motion 3"), ('__assets__/poses_skeleton_gifs/dance4.gif', "Motion 4"), ('__assets__/poses_skeleton_gifs/dance5.gif', "Motion 5")]).style(grid=[2], height="auto")
|
26 |
+
input_video_path = gr.Textbox(
|
27 |
+
label="Pose Sequence", visible=False, value="Motion 1")
|
28 |
gr.Markdown("## Selection")
|
29 |
+
pose_sequence_selector = gr.Markdown(
|
30 |
+
'Pose Sequence: **Motion 1**')
|
31 |
with gr.Column():
|
32 |
prompt = gr.Textbox(label='Prompt')
|
33 |
run_button = gr.Button(label='Run')
|
34 |
with gr.Accordion('Advanced options', open=False):
|
35 |
+
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
36 |
+
"None"], label="Watermark", value='Picsart AI Research')
|
37 |
+
chunk_size = gr.Slider(
|
38 |
+
label="Chunk size", minimum=2, maximum=16, value=12 if on_huggingspace else 8, step=1, visible=not on_huggingspace)
|
39 |
with gr.Column():
|
40 |
result = gr.Image(label="Generated Video")
|
41 |
|
42 |
+
input_video_path.change(on_video_path_update,
|
43 |
+
None, pose_sequence_selector)
|
44 |
+
gallery_pose_sequence.select(
|
45 |
+
pose_gallery_callback, None, input_video_path)
|
46 |
inputs = [
|
47 |
input_video_path,
|
48 |
prompt,
|
|
|
54 |
inputs=inputs,
|
55 |
outputs=result,
|
56 |
fn=model.process_controlnet_pose,
|
57 |
+
cache_examples=on_huggingspace,
|
58 |
run_on_click=False,
|
59 |
)
|
60 |
|
|
|
70 |
|
71 |
|
72 |
def pose_gallery_callback(evt: gr.SelectData):
|
73 |
+
return f"Motion {evt.index+1}"
|
app_text_to_video.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
import gradio as gr
|
2 |
from model import Model
|
3 |
-
|
4 |
-
from
|
5 |
-
|
|
|
6 |
|
7 |
examples = [
|
8 |
["an astronaut waving the arm on the moon"],
|
@@ -14,40 +15,8 @@ examples = [
|
|
14 |
["a gorilla walking alone down the street"],
|
15 |
["a gorilla dancing on times square"],
|
16 |
["A panda dancing dancing like crazy on Times Square"],
|
17 |
-
]
|
18 |
-
|
19 |
-
|
20 |
-
def model_url_list():
|
21 |
-
url_list = []
|
22 |
-
for i in range(0, 5):
|
23 |
-
url_list.append(f"https://huggingface.co/models?p={i}&sort=downloads&search=dreambooth")
|
24 |
-
return url_list
|
25 |
-
|
26 |
-
def data_scraping(url_list):
|
27 |
-
model_list = []
|
28 |
-
for url in url_list:
|
29 |
-
response = requests.get(url)
|
30 |
-
soup = BeautifulSoup(response.text, "html.parser")
|
31 |
-
div_class = 'grid grid-cols-1 gap-5 2xl:grid-cols-2'
|
32 |
-
div = soup.find('div', {'class': div_class})
|
33 |
-
for a in div.find_all('a', href=True):
|
34 |
-
model_list.append(a['href'])
|
35 |
-
return model_list
|
36 |
-
|
37 |
-
model_list = data_scraping(model_url_list())
|
38 |
-
for i in range(len(model_list)):
|
39 |
-
model_list[i] = model_list[i][1:]
|
40 |
-
|
41 |
-
best_model_list = [
|
42 |
-
"dreamlike-art/dreamlike-photoreal-2.0",
|
43 |
-
"dreamlike-art/dreamlike-diffusion-1.0",
|
44 |
-
"runwayml/stable-diffusion-v1-5",
|
45 |
-
"CompVis/stable-diffusion-v1-4",
|
46 |
-
"prompthero/openjourney",
|
47 |
]
|
48 |
|
49 |
-
model_list = best_model_list + model_list
|
50 |
-
|
51 |
|
52 |
def create_demo(model: Model):
|
53 |
|
@@ -59,7 +28,7 @@ def create_demo(model: Model):
|
|
59 |
"""
|
60 |
<div style="text-align: left; auto;">
|
61 |
<h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
|
62 |
-
Description: Simply input <b>any textual prompt</b> to generate videos right away and unleash your creativity and imagination! You can also select from the examples below. For performance purposes, our current preview release
|
63 |
</h3>
|
64 |
</div>
|
65 |
""")
|
@@ -68,27 +37,39 @@ def create_demo(model: Model):
|
|
68 |
with gr.Column():
|
69 |
model_name = gr.Dropdown(
|
70 |
label="Model",
|
71 |
-
choices=
|
72 |
value="dreamlike-art/dreamlike-photoreal-2.0",
|
73 |
)
|
74 |
prompt = gr.Textbox(label='Prompt')
|
75 |
run_button = gr.Button(label='Run')
|
76 |
with gr.Accordion('Advanced options', open=False):
|
77 |
-
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
with gr.Column():
|
90 |
result = gr.Video(label="Generated Video")
|
91 |
-
|
92 |
inputs = [
|
93 |
prompt,
|
94 |
model_name,
|
@@ -103,12 +84,12 @@ def create_demo(model: Model):
|
|
103 |
]
|
104 |
|
105 |
gr.Examples(examples=examples,
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
|
113 |
run_button.click(fn=model.process_text2video,
|
114 |
inputs=inputs,
|
|
|
1 |
import gradio as gr
|
2 |
from model import Model
|
3 |
+
import os
|
4 |
+
from hf_utils import get_model_list
|
5 |
+
|
6 |
+
on_huggingspace = os.environ.get("SPACE_AUTHOR_NAME") == "PAIR"
|
7 |
|
8 |
examples = [
|
9 |
["an astronaut waving the arm on the moon"],
|
|
|
15 |
["a gorilla walking alone down the street"],
|
16 |
["a gorilla dancing on times square"],
|
17 |
["A panda dancing dancing like crazy on Times Square"],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
]
|
19 |
|
|
|
|
|
20 |
|
21 |
def create_demo(model: Model):
|
22 |
|
|
|
28 |
"""
|
29 |
<div style="text-align: left; auto;">
|
30 |
<h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
|
31 |
+
Description: Simply input <b>any textual prompt</b> to generate videos right away and unleash your creativity and imagination! You can also select from the examples below. For performance purposes, our current preview release allows to generate up to 16 frames, which can be configured in the Advanced Options.
|
32 |
</h3>
|
33 |
</div>
|
34 |
""")
|
|
|
37 |
with gr.Column():
|
38 |
model_name = gr.Dropdown(
|
39 |
label="Model",
|
40 |
+
choices=get_model_list(),
|
41 |
value="dreamlike-art/dreamlike-photoreal-2.0",
|
42 |
)
|
43 |
prompt = gr.Textbox(label='Prompt')
|
44 |
run_button = gr.Button(label='Run')
|
45 |
with gr.Accordion('Advanced options', open=False):
|
46 |
+
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
47 |
+
"None"], label="Watermark", value='Picsart AI Research')
|
48 |
+
|
49 |
+
if on_huggingspace:
|
50 |
+
video_length = gr.Slider(
|
51 |
+
label="Video length", minimum=8, maximum=16, step=1)
|
52 |
+
else:
|
53 |
+
video_length = gr.Number(
|
54 |
+
label="Video length", value=8, precision=0)
|
55 |
+
chunk_size = gr.Slider(
|
56 |
+
label="Chunk size", minimum=2, maximum=16, value=12 if on_huggingspace else 8, step=1, visible=not on_huggingspace)
|
57 |
+
|
58 |
+
motion_field_strength_x = gr.Slider(
|
59 |
+
label='Global Translation $\delta_{x}$', minimum=-20, maximum=20, value=12, step=1)
|
60 |
+
motion_field_strength_y = gr.Slider(
|
61 |
+
label='Global Translation $\delta_{y}$', minimum=-20, maximum=20, value=12, step=1)
|
62 |
+
|
63 |
+
t0 = gr.Slider(label="Timestep t0", minimum=0,
|
64 |
+
maximum=49, value=44, step=1)
|
65 |
+
t1 = gr.Slider(label="Timestep t1", minimum=0,
|
66 |
+
maximum=49, value=47, step=1)
|
67 |
+
|
68 |
+
n_prompt = gr.Textbox(
|
69 |
+
label="Optional Negative Prompt", value='')
|
70 |
with gr.Column():
|
71 |
result = gr.Video(label="Generated Video")
|
72 |
+
|
73 |
inputs = [
|
74 |
prompt,
|
75 |
model_name,
|
|
|
84 |
]
|
85 |
|
86 |
gr.Examples(examples=examples,
|
87 |
+
inputs=inputs,
|
88 |
+
outputs=result,
|
89 |
+
fn=model.process_text2video,
|
90 |
+
run_on_click=False,
|
91 |
+
cache_examples=on_huggingspace,
|
92 |
+
)
|
93 |
|
94 |
run_button.click(fn=model.process_text2video,
|
95 |
inputs=inputs,
|
environment.yaml
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: T2VZeroNew
|
2 |
+
channels:
|
3 |
+
- pytorch
|
4 |
+
- defaults
|
5 |
+
dependencies:
|
6 |
+
- python=3.8.5
|
7 |
+
- pip=22.3.1
|
8 |
+
- cudatoolkit=11.3
|
9 |
+
- pytorch=1.12.1
|
10 |
+
- torchvision=0.13.1
|
11 |
+
- pip:
|
12 |
+
- gradio==3.22.1
|
13 |
+
- albumentations==1.3.0
|
14 |
+
- opencv-contrib-python==4.3.0.36
|
15 |
+
- imageio==2.9.0
|
16 |
+
- imageio-ffmpeg==0.4.2
|
17 |
+
- pytorch-lightning==1.5.0
|
18 |
+
- omegaconf==2.3.0
|
19 |
+
- test-tube>=0.7.5
|
20 |
+
- streamlit==1.12.1
|
21 |
+
- einops==0.6.0
|
22 |
+
- transformers==4.26.0
|
23 |
+
- webdataset==0.2.5
|
24 |
+
- kornia==0.6
|
25 |
+
- open_clip_torch==2.16.0
|
26 |
+
- invisible-watermark>=0.1.5
|
27 |
+
- streamlit-drawable-canvas==0.8.0
|
28 |
+
- torchmetrics==0.6.0
|
29 |
+
- timm==0.6.12
|
30 |
+
- addict==2.4.0
|
31 |
+
- yapf==0.32.0
|
32 |
+
- prettytable==3.6.0
|
33 |
+
- safetensors==0.2.7
|
34 |
+
- basicsr==1.4.2
|
35 |
+
- accelerate==0.16.0
|
36 |
+
- decord==0.6.0
|
37 |
+
- diffusers==0.14.0
|
38 |
+
- moviepy==1.0.3
|
39 |
+
- opencv_python==4.7.0.68
|
40 |
+
- Pillow==9.4.0
|
41 |
+
- scikit_image==0.19.3
|
42 |
+
- scipy==1.10.1
|
43 |
+
- tensorboardX==2.6
|
44 |
+
- tqdm==4.64.1
|
45 |
+
- numpy==1.24.1
|
gradio_utils.py
CHANGED
@@ -1,4 +1,8 @@
|
|
|
|
|
|
1 |
# App Canny utils
|
|
|
|
|
2 |
def edge_path_to_video_path(edge_path):
|
3 |
video_path = edge_path
|
4 |
|
@@ -17,9 +21,12 @@ def edge_path_to_video_path(edge_path):
|
|
17 |
video_path = "__assets__/canny_videos_mp4_2fps/halloween.mp4"
|
18 |
elif vid_name == "santa.mp4":
|
19 |
video_path = "__assets__/canny_videos_mp4_2fps/santa.mp4"
|
|
|
|
|
20 |
return video_path
|
21 |
|
22 |
|
|
|
23 |
def motion_to_video_path(motion):
|
24 |
videos = [
|
25 |
"__assets__/poses_skeleton_gifs/dance1_corr.mp4",
|
@@ -49,8 +56,9 @@ def get_video_from_canny_selection(canny_selection):
|
|
49 |
elif canny_selection == "woman3":
|
50 |
input_video_path = "__assets__/db_files_2fps/woman3.mp4"
|
51 |
else:
|
52 |
-
|
53 |
|
|
|
54 |
return input_video_path
|
55 |
|
56 |
|
@@ -64,7 +72,8 @@ def get_model_from_db_selection(db_selection):
|
|
64 |
elif db_selection == "Arcane DB":
|
65 |
input_video_path = 'PAIR/controlnet-canny-arcane'
|
66 |
else:
|
67 |
-
|
|
|
68 |
return input_video_path
|
69 |
|
70 |
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
# App Canny utils
|
4 |
+
|
5 |
+
|
6 |
def edge_path_to_video_path(edge_path):
|
7 |
video_path = edge_path
|
8 |
|
|
|
21 |
video_path = "__assets__/canny_videos_mp4_2fps/halloween.mp4"
|
22 |
elif vid_name == "santa.mp4":
|
23 |
video_path = "__assets__/canny_videos_mp4_2fps/santa.mp4"
|
24 |
+
|
25 |
+
assert os.path.isfile(video_path)
|
26 |
return video_path
|
27 |
|
28 |
|
29 |
+
# App Pose utils
|
30 |
def motion_to_video_path(motion):
|
31 |
videos = [
|
32 |
"__assets__/poses_skeleton_gifs/dance1_corr.mp4",
|
|
|
56 |
elif canny_selection == "woman3":
|
57 |
input_video_path = "__assets__/db_files_2fps/woman3.mp4"
|
58 |
else:
|
59 |
+
input_video_path = canny_selection
|
60 |
|
61 |
+
assert os.path.isfile(input_video_path)
|
62 |
return input_video_path
|
63 |
|
64 |
|
|
|
72 |
elif db_selection == "Arcane DB":
|
73 |
input_video_path = 'PAIR/controlnet-canny-arcane'
|
74 |
else:
|
75 |
+
input_video_path = db_selection
|
76 |
+
|
77 |
return input_video_path
|
78 |
|
79 |
|
hf_utils.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from bs4 import BeautifulSoup
|
2 |
+
import requests
|
3 |
+
|
4 |
+
|
5 |
+
def model_url_list():
|
6 |
+
url_list = []
|
7 |
+
for i in range(0, 5):
|
8 |
+
url_list.append(
|
9 |
+
f"https://huggingface.co/models?p={i}&sort=downloads&search=dreambooth")
|
10 |
+
return url_list
|
11 |
+
|
12 |
+
|
13 |
+
def data_scraping(url_list):
|
14 |
+
model_list = []
|
15 |
+
for url in url_list:
|
16 |
+
response = requests.get(url)
|
17 |
+
soup = BeautifulSoup(response.text, "html.parser")
|
18 |
+
div_class = 'grid grid-cols-1 gap-5 2xl:grid-cols-2'
|
19 |
+
div = soup.find('div', {'class': div_class})
|
20 |
+
for a in div.find_all('a', href=True):
|
21 |
+
model_list.append(a['href'])
|
22 |
+
return model_list
|
23 |
+
|
24 |
+
|
25 |
+
def get_model_list():
|
26 |
+
model_list = data_scraping(model_url_list())
|
27 |
+
for i in range(len(model_list)):
|
28 |
+
model_list[i] = model_list[i][1:]
|
29 |
+
|
30 |
+
best_model_list = [
|
31 |
+
"dreamlike-art/dreamlike-photoreal-2.0",
|
32 |
+
"dreamlike-art/dreamlike-diffusion-1.0",
|
33 |
+
"runwayml/stable-diffusion-v1-5",
|
34 |
+
"CompVis/stable-diffusion-v1-4",
|
35 |
+
"prompthero/openjourney",
|
36 |
+
]
|
37 |
+
|
38 |
+
model_list = best_model_list + model_list
|
39 |
+
return model_list
|
model.py
CHANGED
@@ -3,15 +3,15 @@ import gc
|
|
3 |
import numpy as np
|
4 |
|
5 |
import torch
|
6 |
-
|
7 |
from diffusers import StableDiffusionInstructPix2PixPipeline, StableDiffusionControlNetPipeline, ControlNetModel, UNet2DConditionModel
|
8 |
from diffusers.schedulers import EulerAncestralDiscreteScheduler, DDIMScheduler
|
9 |
-
from
|
10 |
|
11 |
import utils
|
12 |
import gradio_utils
|
13 |
-
|
14 |
-
|
15 |
|
16 |
|
17 |
class ModelType(Enum):
|
@@ -34,9 +34,12 @@ class Model:
|
|
34 |
ModelType.ControlNetCannyDB: StableDiffusionControlNetPipeline,
|
35 |
ModelType.ControlNetPose: StableDiffusionControlNetPipeline,
|
36 |
}
|
37 |
-
self.controlnet_attn_proc = utils.CrossFrameAttnProcessor(
|
38 |
-
|
39 |
-
self.
|
|
|
|
|
|
|
40 |
|
41 |
self.pipe = None
|
42 |
self.model_type = None
|
@@ -49,7 +52,8 @@ class Model:
|
|
49 |
torch.cuda.empty_cache()
|
50 |
gc.collect()
|
51 |
safety_checker = kwargs.pop('safety_checker', None)
|
52 |
-
self.pipe = self.pipe_dict[model_type].from_pretrained(
|
|
|
53 |
self.model_type = model_type
|
54 |
|
55 |
def inference_chunk(self, frame_ids, **kwargs):
|
@@ -90,6 +94,8 @@ class Model:
|
|
90 |
prompt = [kwargs.pop('prompt')] * f
|
91 |
negative_prompt = [kwargs.pop('negative_prompt', '')] * f
|
92 |
|
|
|
|
|
93 |
# Processing chunk-by-chunk
|
94 |
if split_to_chunks:
|
95 |
chunk_ids = np.arange(0, f, chunk_size - 1)
|
@@ -104,6 +110,9 @@ class Model:
|
|
104 |
prompt=prompt,
|
105 |
negative_prompt=negative_prompt,
|
106 |
**kwargs).images[1:])
|
|
|
|
|
|
|
107 |
result = np.concatenate(result)
|
108 |
return result
|
109 |
else:
|
@@ -126,8 +135,10 @@ class Model:
|
|
126 |
save_path=None):
|
127 |
video_path = gradio_utils.edge_path_to_video_path(video_path)
|
128 |
if self.model_type != ModelType.ControlNetCanny:
|
129 |
-
controlnet = ControlNetModel.from_pretrained(
|
130 |
-
|
|
|
|
|
131 |
self.pipe.scheduler = DDIMScheduler.from_config(
|
132 |
self.pipe.scheduler.config)
|
133 |
if use_cf_attn:
|
@@ -140,7 +151,7 @@ class Model:
|
|
140 |
negative_prompts = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
141 |
|
142 |
video, fps = utils.prepare_video(
|
143 |
-
video_path, resolution, self.device, self.dtype, False
|
144 |
control = utils.pre_process_canny(
|
145 |
video, low_threshold, high_threshold).to(self.device).to(self.dtype)
|
146 |
f, _, h, w = video.shape
|
@@ -180,8 +191,10 @@ class Model:
|
|
180 |
save_path=None):
|
181 |
video_path = gradio_utils.motion_to_video_path(video_path)
|
182 |
if self.model_type != ModelType.ControlNetPose:
|
183 |
-
controlnet = ControlNetModel.from_pretrained(
|
184 |
-
|
|
|
|
|
185 |
self.pipe.scheduler = DDIMScheduler.from_config(
|
186 |
self.pipe.scheduler.config)
|
187 |
if use_cf_attn:
|
@@ -242,8 +255,10 @@ class Model:
|
|
242 |
video_path = gradio_utils.get_video_from_canny_selection(video_path)
|
243 |
# Load db and controlnet weights
|
244 |
if 'db_path' not in self.states or db_path != self.states['db_path']:
|
245 |
-
controlnet = ControlNetModel.from_pretrained(
|
246 |
-
|
|
|
|
|
247 |
self.pipe.scheduler = DDIMScheduler.from_config(
|
248 |
self.pipe.scheduler.config)
|
249 |
self.states['db_path'] = db_path
|
@@ -320,7 +335,7 @@ class Model:
|
|
320 |
|
321 |
def process_text2video(self,
|
322 |
prompt,
|
323 |
-
model_name,
|
324 |
motion_field_strength_x=12,
|
325 |
motion_field_strength_y=12,
|
326 |
t0=44,
|
@@ -340,8 +355,10 @@ class Model:
|
|
340 |
path=None):
|
341 |
|
342 |
if self.model_type != ModelType.Text2Video:
|
343 |
-
unet = UNet2DConditionModel.from_pretrained(
|
344 |
-
|
|
|
|
|
345 |
self.pipe.scheduler = DDIMScheduler.from_config(
|
346 |
self.pipe.scheduler.config)
|
347 |
if use_cf_attn:
|
|
|
3 |
import numpy as np
|
4 |
|
5 |
import torch
|
6 |
+
|
7 |
from diffusers import StableDiffusionInstructPix2PixPipeline, StableDiffusionControlNetPipeline, ControlNetModel, UNet2DConditionModel
|
8 |
from diffusers.schedulers import EulerAncestralDiscreteScheduler, DDIMScheduler
|
9 |
+
from text_to_video_pipeline import TextToVideoPipeline
|
10 |
|
11 |
import utils
|
12 |
import gradio_utils
|
13 |
+
import os
|
14 |
+
on_huggingspace = os.environ.get("SPACE_AUTHOR_NAME") == "PAIR"
|
15 |
|
16 |
|
17 |
class ModelType(Enum):
|
|
|
34 |
ModelType.ControlNetCannyDB: StableDiffusionControlNetPipeline,
|
35 |
ModelType.ControlNetPose: StableDiffusionControlNetPipeline,
|
36 |
}
|
37 |
+
self.controlnet_attn_proc = utils.CrossFrameAttnProcessor(
|
38 |
+
unet_chunk_size=2)
|
39 |
+
self.pix2pix_attn_proc = utils.CrossFrameAttnProcessor(
|
40 |
+
unet_chunk_size=3)
|
41 |
+
self.text2video_attn_proc = utils.CrossFrameAttnProcessor(
|
42 |
+
unet_chunk_size=2)
|
43 |
|
44 |
self.pipe = None
|
45 |
self.model_type = None
|
|
|
52 |
torch.cuda.empty_cache()
|
53 |
gc.collect()
|
54 |
safety_checker = kwargs.pop('safety_checker', None)
|
55 |
+
self.pipe = self.pipe_dict[model_type].from_pretrained(
|
56 |
+
model_id, safety_checker=safety_checker, **kwargs).to(self.device).to(self.dtype)
|
57 |
self.model_type = model_type
|
58 |
|
59 |
def inference_chunk(self, frame_ids, **kwargs):
|
|
|
94 |
prompt = [kwargs.pop('prompt')] * f
|
95 |
negative_prompt = [kwargs.pop('negative_prompt', '')] * f
|
96 |
|
97 |
+
frames_counter = 0
|
98 |
+
|
99 |
# Processing chunk-by-chunk
|
100 |
if split_to_chunks:
|
101 |
chunk_ids = np.arange(0, f, chunk_size - 1)
|
|
|
110 |
prompt=prompt,
|
111 |
negative_prompt=negative_prompt,
|
112 |
**kwargs).images[1:])
|
113 |
+
frames_counter += len(chunk_ids)-1
|
114 |
+
if on_huggingspace and frames_counter >= 80:
|
115 |
+
break
|
116 |
result = np.concatenate(result)
|
117 |
return result
|
118 |
else:
|
|
|
135 |
save_path=None):
|
136 |
video_path = gradio_utils.edge_path_to_video_path(video_path)
|
137 |
if self.model_type != ModelType.ControlNetCanny:
|
138 |
+
controlnet = ControlNetModel.from_pretrained(
|
139 |
+
"lllyasviel/sd-controlnet-canny")
|
140 |
+
self.set_model(ModelType.ControlNetCanny,
|
141 |
+
model_id="runwayml/stable-diffusion-v1-5", controlnet=controlnet)
|
142 |
self.pipe.scheduler = DDIMScheduler.from_config(
|
143 |
self.pipe.scheduler.config)
|
144 |
if use_cf_attn:
|
|
|
151 |
negative_prompts = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
152 |
|
153 |
video, fps = utils.prepare_video(
|
154 |
+
video_path, resolution, self.device, self.dtype, False)
|
155 |
control = utils.pre_process_canny(
|
156 |
video, low_threshold, high_threshold).to(self.device).to(self.dtype)
|
157 |
f, _, h, w = video.shape
|
|
|
191 |
save_path=None):
|
192 |
video_path = gradio_utils.motion_to_video_path(video_path)
|
193 |
if self.model_type != ModelType.ControlNetPose:
|
194 |
+
controlnet = ControlNetModel.from_pretrained(
|
195 |
+
"fusing/stable-diffusion-v1-5-controlnet-openpose")
|
196 |
+
self.set_model(ModelType.ControlNetPose,
|
197 |
+
model_id="runwayml/stable-diffusion-v1-5", controlnet=controlnet)
|
198 |
self.pipe.scheduler = DDIMScheduler.from_config(
|
199 |
self.pipe.scheduler.config)
|
200 |
if use_cf_attn:
|
|
|
255 |
video_path = gradio_utils.get_video_from_canny_selection(video_path)
|
256 |
# Load db and controlnet weights
|
257 |
if 'db_path' not in self.states or db_path != self.states['db_path']:
|
258 |
+
controlnet = ControlNetModel.from_pretrained(
|
259 |
+
"lllyasviel/sd-controlnet-canny")
|
260 |
+
self.set_model(ModelType.ControlNetCannyDB,
|
261 |
+
model_id=db_path, controlnet=controlnet)
|
262 |
self.pipe.scheduler = DDIMScheduler.from_config(
|
263 |
self.pipe.scheduler.config)
|
264 |
self.states['db_path'] = db_path
|
|
|
335 |
|
336 |
def process_text2video(self,
|
337 |
prompt,
|
338 |
+
model_name="dreamlike-art/dreamlike-photoreal-2.0",
|
339 |
motion_field_strength_x=12,
|
340 |
motion_field_strength_y=12,
|
341 |
t0=44,
|
|
|
355 |
path=None):
|
356 |
|
357 |
if self.model_type != ModelType.Text2Video:
|
358 |
+
unet = UNet2DConditionModel.from_pretrained(
|
359 |
+
model_name, subfolder="unet")
|
360 |
+
self.set_model(ModelType.Text2Video,
|
361 |
+
model_id=model_name, unet=unet)
|
362 |
self.pipe.scheduler = DDIMScheduler.from_config(
|
363 |
self.pipe.scheduler.config)
|
364 |
if use_cf_attn:
|
models/cldm_v15.yaml
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
model:
|
2 |
-
target: cldm.cldm.ControlLDM
|
3 |
-
params:
|
4 |
-
linear_start: 0.00085
|
5 |
-
linear_end: 0.0120
|
6 |
-
num_timesteps_cond: 1
|
7 |
-
log_every_t: 200
|
8 |
-
timesteps: 1000
|
9 |
-
first_stage_key: "jpg"
|
10 |
-
cond_stage_key: "txt"
|
11 |
-
control_key: "hint"
|
12 |
-
image_size: 64
|
13 |
-
channels: 4
|
14 |
-
cond_stage_trainable: false
|
15 |
-
conditioning_key: crossattn
|
16 |
-
monitor: val/loss_simple_ema
|
17 |
-
scale_factor: 0.18215
|
18 |
-
use_ema: False
|
19 |
-
only_mid_control: False
|
20 |
-
|
21 |
-
control_stage_config:
|
22 |
-
target: cldm.cldm.ControlNet
|
23 |
-
params:
|
24 |
-
image_size: 32 # unused
|
25 |
-
in_channels: 4
|
26 |
-
hint_channels: 3
|
27 |
-
model_channels: 320
|
28 |
-
attention_resolutions: [ 4, 2, 1 ]
|
29 |
-
num_res_blocks: 2
|
30 |
-
channel_mult: [ 1, 2, 4, 4 ]
|
31 |
-
num_heads: 8
|
32 |
-
use_spatial_transformer: True
|
33 |
-
transformer_depth: 1
|
34 |
-
context_dim: 768
|
35 |
-
use_checkpoint: True
|
36 |
-
use_cf_attn: True
|
37 |
-
legacy: False
|
38 |
-
|
39 |
-
unet_config:
|
40 |
-
target: cldm.cldm.ControlledUnetModel
|
41 |
-
params:
|
42 |
-
image_size: 32 # unused
|
43 |
-
in_channels: 4
|
44 |
-
out_channels: 4
|
45 |
-
model_channels: 320
|
46 |
-
attention_resolutions: [ 4, 2, 1 ]
|
47 |
-
num_res_blocks: 2
|
48 |
-
channel_mult: [ 1, 2, 4, 4 ]
|
49 |
-
num_heads: 8
|
50 |
-
use_spatial_transformer: True
|
51 |
-
transformer_depth: 1
|
52 |
-
context_dim: 768
|
53 |
-
use_checkpoint: True
|
54 |
-
legacy: False
|
55 |
-
use_cf_attn: True
|
56 |
-
|
57 |
-
first_stage_config:
|
58 |
-
target: ldm.models.autoencoder.AutoencoderKL
|
59 |
-
params:
|
60 |
-
embed_dim: 4
|
61 |
-
monitor: val/rec_loss
|
62 |
-
ddconfig:
|
63 |
-
double_z: true
|
64 |
-
z_channels: 4
|
65 |
-
resolution: 256
|
66 |
-
in_channels: 3
|
67 |
-
out_ch: 3
|
68 |
-
ch: 128
|
69 |
-
ch_mult:
|
70 |
-
- 1
|
71 |
-
- 2
|
72 |
-
- 4
|
73 |
-
- 4
|
74 |
-
num_res_blocks: 2
|
75 |
-
attn_resolutions: []
|
76 |
-
dropout: 0.0
|
77 |
-
lossconfig:
|
78 |
-
target: torch.nn.Identity
|
79 |
-
|
80 |
-
cond_stage_config:
|
81 |
-
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
models/cldm_v15_no_cf_attn.yaml
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
model:
|
2 |
-
target: cldm.cldm.ControlLDM
|
3 |
-
params:
|
4 |
-
linear_start: 0.00085
|
5 |
-
linear_end: 0.0120
|
6 |
-
num_timesteps_cond: 1
|
7 |
-
log_every_t: 200
|
8 |
-
timesteps: 1000
|
9 |
-
first_stage_key: "jpg"
|
10 |
-
cond_stage_key: "txt"
|
11 |
-
control_key: "hint"
|
12 |
-
image_size: 64
|
13 |
-
channels: 4
|
14 |
-
cond_stage_trainable: false
|
15 |
-
conditioning_key: crossattn
|
16 |
-
monitor: val/loss_simple_ema
|
17 |
-
scale_factor: 0.18215
|
18 |
-
use_ema: False
|
19 |
-
only_mid_control: False
|
20 |
-
|
21 |
-
control_stage_config:
|
22 |
-
target: cldm.cldm.ControlNet
|
23 |
-
params:
|
24 |
-
image_size: 32 # unused
|
25 |
-
in_channels: 4
|
26 |
-
hint_channels: 3
|
27 |
-
model_channels: 320
|
28 |
-
attention_resolutions: [ 4, 2, 1 ]
|
29 |
-
num_res_blocks: 2
|
30 |
-
channel_mult: [ 1, 2, 4, 4 ]
|
31 |
-
num_heads: 8
|
32 |
-
use_spatial_transformer: True
|
33 |
-
transformer_depth: 1
|
34 |
-
context_dim: 768
|
35 |
-
use_checkpoint: True
|
36 |
-
use_cf_attn: False
|
37 |
-
legacy: False
|
38 |
-
|
39 |
-
unet_config:
|
40 |
-
target: cldm.cldm.ControlledUnetModel
|
41 |
-
params:
|
42 |
-
image_size: 32 # unused
|
43 |
-
in_channels: 4
|
44 |
-
out_channels: 4
|
45 |
-
model_channels: 320
|
46 |
-
attention_resolutions: [ 4, 2, 1 ]
|
47 |
-
num_res_blocks: 2
|
48 |
-
channel_mult: [ 1, 2, 4, 4 ]
|
49 |
-
num_heads: 8
|
50 |
-
use_spatial_transformer: True
|
51 |
-
transformer_depth: 1
|
52 |
-
context_dim: 768
|
53 |
-
use_checkpoint: True
|
54 |
-
legacy: False
|
55 |
-
use_cf_attn: False
|
56 |
-
|
57 |
-
first_stage_config:
|
58 |
-
target: ldm.models.autoencoder.AutoencoderKL
|
59 |
-
params:
|
60 |
-
embed_dim: 4
|
61 |
-
monitor: val/rec_loss
|
62 |
-
ddconfig:
|
63 |
-
double_z: true
|
64 |
-
z_channels: 4
|
65 |
-
resolution: 256
|
66 |
-
in_channels: 3
|
67 |
-
out_ch: 3
|
68 |
-
ch: 128
|
69 |
-
ch_mult:
|
70 |
-
- 1
|
71 |
-
- 2
|
72 |
-
- 4
|
73 |
-
- 4
|
74 |
-
num_res_blocks: 2
|
75 |
-
attn_resolutions: []
|
76 |
-
dropout: 0.0
|
77 |
-
lossconfig:
|
78 |
-
target: torch.nn.Identity
|
79 |
-
|
80 |
-
cond_stage_config:
|
81 |
-
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
models/cldm_v21.yaml
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
model:
|
2 |
-
target: cldm.cldm.ControlLDM
|
3 |
-
params:
|
4 |
-
linear_start: 0.00085
|
5 |
-
linear_end: 0.0120
|
6 |
-
num_timesteps_cond: 1
|
7 |
-
log_every_t: 200
|
8 |
-
timesteps: 1000
|
9 |
-
first_stage_key: "jpg"
|
10 |
-
cond_stage_key: "txt"
|
11 |
-
control_key: "hint"
|
12 |
-
image_size: 64
|
13 |
-
channels: 4
|
14 |
-
cond_stage_trainable: false
|
15 |
-
conditioning_key: crossattn
|
16 |
-
monitor: val/loss_simple_ema
|
17 |
-
scale_factor: 0.18215
|
18 |
-
use_ema: False
|
19 |
-
only_mid_control: False
|
20 |
-
|
21 |
-
control_stage_config:
|
22 |
-
target: cldm.cldm.ControlNet
|
23 |
-
params:
|
24 |
-
use_checkpoint: True
|
25 |
-
image_size: 32 # unused
|
26 |
-
in_channels: 4
|
27 |
-
hint_channels: 3
|
28 |
-
model_channels: 320
|
29 |
-
attention_resolutions: [ 4, 2, 1 ]
|
30 |
-
num_res_blocks: 2
|
31 |
-
channel_mult: [ 1, 2, 4, 4 ]
|
32 |
-
num_head_channels: 64 # need to fix for flash-attn
|
33 |
-
use_spatial_transformer: True
|
34 |
-
use_linear_in_transformer: True
|
35 |
-
transformer_depth: 1
|
36 |
-
context_dim: 1024
|
37 |
-
legacy: False
|
38 |
-
|
39 |
-
unet_config:
|
40 |
-
target: cldm.cldm.ControlledUnetModel
|
41 |
-
params:
|
42 |
-
use_checkpoint: True
|
43 |
-
image_size: 32 # unused
|
44 |
-
in_channels: 4
|
45 |
-
out_channels: 4
|
46 |
-
model_channels: 320
|
47 |
-
attention_resolutions: [ 4, 2, 1 ]
|
48 |
-
num_res_blocks: 2
|
49 |
-
channel_mult: [ 1, 2, 4, 4 ]
|
50 |
-
num_head_channels: 64 # need to fix for flash-attn
|
51 |
-
use_spatial_transformer: True
|
52 |
-
use_linear_in_transformer: True
|
53 |
-
transformer_depth: 1
|
54 |
-
context_dim: 1024
|
55 |
-
legacy: False
|
56 |
-
|
57 |
-
first_stage_config:
|
58 |
-
target: ldm.models.autoencoder.AutoencoderKL
|
59 |
-
params:
|
60 |
-
embed_dim: 4
|
61 |
-
monitor: val/rec_loss
|
62 |
-
ddconfig:
|
63 |
-
#attn_type: "vanilla-xformers"
|
64 |
-
double_z: true
|
65 |
-
z_channels: 4
|
66 |
-
resolution: 256
|
67 |
-
in_channels: 3
|
68 |
-
out_ch: 3
|
69 |
-
ch: 128
|
70 |
-
ch_mult:
|
71 |
-
- 1
|
72 |
-
- 2
|
73 |
-
- 4
|
74 |
-
- 4
|
75 |
-
num_res_blocks: 2
|
76 |
-
attn_resolutions: []
|
77 |
-
dropout: 0.0
|
78 |
-
lossconfig:
|
79 |
-
target: torch.nn.Identity
|
80 |
-
|
81 |
-
cond_stage_config:
|
82 |
-
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
83 |
-
params:
|
84 |
-
freeze: True
|
85 |
-
layer: "penultimate"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -14,14 +14,17 @@ moviepy==1.0.3
|
|
14 |
numpy==1.24.1
|
15 |
omegaconf==2.3.0
|
16 |
open_clip_torch==2.16.0
|
17 |
-
opencv_python==4.7.0.
|
18 |
-
opencv-contrib-python==4.
|
19 |
Pillow==9.4.0
|
20 |
pytorch_lightning==1.5.0
|
21 |
prettytable==3.6.0
|
22 |
scikit_image==0.19.3
|
23 |
scipy==1.10.1
|
24 |
tensorboardX==2.6
|
|
|
|
|
|
|
25 |
tqdm==4.64.1
|
26 |
timm==0.6.12
|
27 |
transformers==4.26.0
|
@@ -29,7 +32,5 @@ test-tube>=0.7.5
|
|
29 |
webdataset==0.2.5
|
30 |
yapf==0.32.0
|
31 |
safetensors==0.2.7
|
32 |
-
huggingface-hub==0.13.0
|
33 |
-
torch==1.13.1
|
34 |
-
torchvision==0.14.1
|
35 |
beautifulsoup4
|
|
|
|
14 |
numpy==1.24.1
|
15 |
omegaconf==2.3.0
|
16 |
open_clip_torch==2.16.0
|
17 |
+
opencv_python==4.7.0.72
|
18 |
+
opencv-contrib-python==4.7.0.72
|
19 |
Pillow==9.4.0
|
20 |
pytorch_lightning==1.5.0
|
21 |
prettytable==3.6.0
|
22 |
scikit_image==0.19.3
|
23 |
scipy==1.10.1
|
24 |
tensorboardX==2.6
|
25 |
+
torch==1.13.1
|
26 |
+
torchvision==0.14.1
|
27 |
+
torchmetrics==0.6.0
|
28 |
tqdm==4.64.1
|
29 |
timm==0.6.12
|
30 |
transformers==4.26.0
|
|
|
32 |
webdataset==0.2.5
|
33 |
yapf==0.32.0
|
34 |
safetensors==0.2.7
|
|
|
|
|
|
|
35 |
beautifulsoup4
|
36 |
+
bs4
|
text_to_video/text_to_video_pipeline.py β text_to_video_pipeline.py
RENAMED
@@ -89,23 +89,6 @@ class TextToVideoPipeline(StableDiffusionPipeline):
|
|
89 |
latents = latents * self.scheduler.init_noise_sigma
|
90 |
return latents
|
91 |
|
92 |
-
def warp_latents_from_f0(self, latents, reference_flow):
|
93 |
-
_, _, H, W = reference_flow.size()
|
94 |
-
b, c, f, h, w = latents.size()
|
95 |
-
coords0 = coords_grid(f, H, W, device=latents.device).to(latents.dtype)
|
96 |
-
coords_t0 = coords0 + reference_flow
|
97 |
-
coords_t0[:, 0] /= W
|
98 |
-
coords_t0[:, 1] /= H
|
99 |
-
coords_t0 = coords_t0 * 2.0 - 1.0
|
100 |
-
coords_t0 = T.Resize((h, w))(coords_t0)
|
101 |
-
coords_t0 = rearrange(coords_t0, 'f c h w -> f h w c')
|
102 |
-
latents_0 = latents[:, :, 0]
|
103 |
-
latents_0 = latents_0.repeat(f, 1, 1, 1)
|
104 |
-
warped = grid_sample(latents_0, coords_t0,
|
105 |
-
mode='nearest', padding_mode='reflection')
|
106 |
-
warped = rearrange(warped, '(b f) c h w -> b c f h w', f=f)
|
107 |
-
return warped
|
108 |
-
|
109 |
def warp_latents_independently(self, latents, reference_flow, inject_noise=False):
|
110 |
_, _, H, W = reference_flow.size()
|
111 |
b, _, f, h, w = latents.size()
|
|
|
89 |
latents = latents * self.scheduler.init_noise_sigma
|
90 |
return latents
|
91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
def warp_latents_independently(self, latents, reference_flow, inject_noise=False):
|
93 |
_, _, H, W = reference_flow.size()
|
94 |
b, _, f, h, w = latents.size()
|