Spaces:
Runtime error
Runtime error
First model version
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- CITATION.cff +9 -0
- LICENSE +21 -0
- README.md +176 -13
- app.py +246 -0
- danbooru2017_guided_log/ema_0.9999_360000.pt +3 -0
- danbooru2017_guided_sr_log/ema_0.9999_360000.pt +3 -0
- data/.gitkeep +0 -0
- datasets/README.md +37 -0
- datasets/cifar10.py +43 -0
- datasets/lsun_bedroom.py +54 -0
- docs/imgs/anime/1000000.png +0 -0
- docs/imgs/anime/1002000.png +0 -0
- docs/imgs/anime/1003000.png +0 -0
- docs/imgs/anime/1004000.png +0 -0
- docs/imgs/anime/1006000.png +0 -0
- docs/imgs/anime/1012000.png +0 -0
- docs/imgs/anime_sketch/1000000.png +0 -0
- docs/imgs/anime_sketch/1002000.png +0 -0
- docs/imgs/anime_sketch/1003000.png +0 -0
- docs/imgs/anime_sketch/1004000.png +0 -0
- docs/imgs/anime_sketch/1006000.png +0 -0
- docs/imgs/anime_sketch/1012000.png +0 -0
- docs/imgs/sample.png +0 -0
- openai.LICENSE +21 -0
- packages.txt +1 -0
- pixel_guide_diffusion/__init__.py +3 -0
- pixel_guide_diffusion/dist_util.py +82 -0
- pixel_guide_diffusion/fp16_util.py +76 -0
- pixel_guide_diffusion/gaussian_diffusion.py +841 -0
- pixel_guide_diffusion/image_datasets.py +173 -0
- pixel_guide_diffusion/logger.py +495 -0
- pixel_guide_diffusion/losses.py +77 -0
- pixel_guide_diffusion/nn.py +191 -0
- pixel_guide_diffusion/resample.py +154 -0
- pixel_guide_diffusion/respace.py +122 -0
- pixel_guide_diffusion/script_util.py +537 -0
- pixel_guide_diffusion/train_util.py +356 -0
- pixel_guide_diffusion/unet.py +594 -0
- requirements.txt +6 -0
- scripts/cascaded_pixel_guide_sample.py +148 -0
- scripts/image_nll.py +96 -0
- scripts/image_sample.py +106 -0
- scripts/image_train.py +83 -0
- scripts/pixel_guide_sample.py +111 -0
- scripts/pixel_guide_super_res_sample.py +133 -0
- scripts/pixel_guide_super_res_train.py +108 -0
- scripts/pixel_guide_train.py +89 -0
- scripts/super_res_sample.py +117 -0
- scripts/super_res_train.py +98 -0
- setup.py +7 -0
CITATION.cff
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cff-version: 1.2.0
|
2 |
+
message: "If you use this code, please cite it as below."
|
3 |
+
authors:
|
4 |
+
- family-names: "Wu"
|
5 |
+
given-names: "Hecong"
|
6 |
+
title: "Pixel Guide Diffusion For Anime Colorization"
|
7 |
+
version: 1.0.0
|
8 |
+
date-released: 2021-10-26
|
9 |
+
url: "https://github.com/HighCWu/pixel-guide-diffusion-for-anime-colorization"
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2021 Wu Hecong
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
CHANGED
@@ -1,13 +1,176 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Pixel Guide Diffusion For Anime Colorization
|
2 |
+
|
3 |
+
![avatar](docs/imgs/sample.png)
|
4 |
+
|
5 |
+
Use denoising diffusion probabilistic model to do the anime colorization task.
|
6 |
+
|
7 |
+
v1 test result is in branch [v1_result](https://github.com/HighCWu/pixel-guide-diffusion-for-anime-colorization/tree/v1_result).
|
8 |
+
|
9 |
+
The dataset is not clean enough and the sketch as the guide is generated using sketch2keras, so the generalization is not good.
|
10 |
+
|
11 |
+
In the future, I may try to use only anime portraits as the target images, and look for some more diverse sketch models.
|
12 |
+
|
13 |
+
# Introduction and Usage
|
14 |
+
|
15 |
+
Pixel Guide Denoising Diffusion Probabilistic Models ( One Channel Guide Version )
|
16 |
+
|
17 |
+
This repo is modified from [improved-diffusion](https://github.com/openai/improved-diffusion).
|
18 |
+
|
19 |
+
Use [danbooru-sketch-pair-128x](https://www.kaggle.com/wuhecong/danbooru-sketch-pair-128x) as the dataset. Maybe you should move folders in the dataset first to make guide-target pair dataset.
|
20 |
+
|
21 |
+
Modify `train_danbooru*.sh`, `test_danbooru*.sh` to meet your needs.
|
22 |
+
|
23 |
+
The model is divided into a 32px part and a super-divided part, which can be cascaded during testing to get the final result. But there is no cascade during training.
|
24 |
+
|
25 |
+
QQ Group: 1044867291
|
26 |
+
|
27 |
+
Discord: https://discord.gg/YwWcAS47qb
|
28 |
+
|
29 |
+
# Original README
|
30 |
+
|
31 |
+
# improved-diffusion
|
32 |
+
|
33 |
+
This is the codebase for [Improved Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2102.09672).
|
34 |
+
|
35 |
+
# Usage
|
36 |
+
|
37 |
+
This section of the README walks through how to train and sample from a model.
|
38 |
+
|
39 |
+
## Installation
|
40 |
+
|
41 |
+
Clone this repository and navigate to it in your terminal. Then run:
|
42 |
+
|
43 |
+
```
|
44 |
+
pip install -e .
|
45 |
+
```
|
46 |
+
|
47 |
+
This should install the ~~`improved_diffusion`~~ `pixel_guide_diffusion` python package that the scripts depend on.
|
48 |
+
|
49 |
+
## Preparing Data
|
50 |
+
|
51 |
+
The training code reads images from a directory of image files. In the [datasets](datasets) folder, we have provided instructions/scripts for preparing these directories for ImageNet, LSUN bedrooms, and CIFAR-10.
|
52 |
+
|
53 |
+
For creating your own dataset, simply dump all of your images into a directory with ".jpg", ".jpeg", or ".png" extensions. If you wish to train a class-conditional model, name the files like "mylabel1_XXX.jpg", "mylabel2_YYY.jpg", etc., so that the data loader knows that "mylabel1" and "mylabel2" are the labels. Subdirectories will automatically be enumerated as well, so the images can be organized into a recursive structure (although the directory names will be ignored, and the underscore prefixes are used as names).
|
54 |
+
|
55 |
+
The images will automatically be scaled and center-cropped by the data-loading pipeline. Simply pass `--data_dir path/to/images` to the training script, and it will take care of the rest.
|
56 |
+
|
57 |
+
## Training
|
58 |
+
|
59 |
+
To train your model, you should first decide some hyperparameters. We will split up our hyperparameters into three groups: model architecture, diffusion process, and training flags. Here are some reasonable defaults for a baseline:
|
60 |
+
|
61 |
+
```
|
62 |
+
MODEL_FLAGS="--image_size 64 --num_channels 128 --num_res_blocks 3"
|
63 |
+
DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule linear"
|
64 |
+
TRAIN_FLAGS="--lr 1e-4 --batch_size 128"
|
65 |
+
```
|
66 |
+
|
67 |
+
Here are some changes we experiment with, and how to set them in the flags:
|
68 |
+
|
69 |
+
* **Learned sigmas:** add `--learn_sigma True` to `MODEL_FLAGS`
|
70 |
+
* **Cosine schedule:** change `--noise_schedule linear` to `--noise_schedule cosine`
|
71 |
+
* **Reweighted VLB:** add `--use_kl True` to `DIFFUSION_FLAGS` and add `--schedule_sampler loss-second-moment` to `TRAIN_FLAGS`.
|
72 |
+
* **Class-conditional:** add `--class_cond True` to `MODEL_FLAGS`.
|
73 |
+
|
74 |
+
Once you have setup your hyper-parameters, you can run an experiment like so:
|
75 |
+
|
76 |
+
```
|
77 |
+
python scripts/image_train.py --data_dir path/to/images $MODEL_FLAGS $DIFFUSION_FLAGS $TRAIN_FLAGS
|
78 |
+
```
|
79 |
+
|
80 |
+
You may also want to train in a distributed manner. In this case, run the same command with `mpiexec`:
|
81 |
+
|
82 |
+
```
|
83 |
+
mpiexec -n $NUM_GPUS python scripts/image_train.py --data_dir path/to/images $MODEL_FLAGS $DIFFUSION_FLAGS $TRAIN_FLAGS
|
84 |
+
```
|
85 |
+
|
86 |
+
When training in a distributed manner, you must manually divide the `--batch_size` argument by the number of ranks. In lieu of distributed training, you may use `--microbatch 16` (or `--microbatch 1` in extreme memory-limited cases) to reduce memory usage.
|
87 |
+
|
88 |
+
The logs and saved models will be written to a logging directory determined by the `OPENAI_LOGDIR` environment variable. If it is not set, then a temporary directory will be created in `/tmp`.
|
89 |
+
|
90 |
+
## Sampling
|
91 |
+
|
92 |
+
The above training script saves checkpoints to `.pt` files in the logging directory. These checkpoints will have names like `ema_0.9999_200000.pt` and `model200000.pt`. You will likely want to sample from the EMA models, since those produce much better samples.
|
93 |
+
|
94 |
+
Once you have a path to your model, you can generate a large batch of samples like so:
|
95 |
+
|
96 |
+
```
|
97 |
+
python scripts/image_sample.py --model_path /path/to/model.pt $MODEL_FLAGS $DIFFUSION_FLAGS
|
98 |
+
```
|
99 |
+
|
100 |
+
Again, this will save results to a logging directory. Samples are saved as a large `npz` file, where `arr_0` in the file is a large batch of samples.
|
101 |
+
|
102 |
+
Just like for training, you can run `image_sample.py` through MPI to use multiple GPUs and machines.
|
103 |
+
|
104 |
+
You can change the number of sampling steps using the `--timestep_respacing` argument. For example, `--timestep_respacing 250` uses 250 steps to sample. Passing `--timestep_respacing ddim250` is similar, but uses the uniform stride from the [DDIM paper](https://arxiv.org/abs/2010.02502) rather than our stride.
|
105 |
+
|
106 |
+
To sample using [DDIM](https://arxiv.org/abs/2010.02502), pass `--use_ddim True`.
|
107 |
+
|
108 |
+
## Models and Hyperparameters
|
109 |
+
|
110 |
+
This section includes model checkpoints and run flags for the main models in the paper.
|
111 |
+
|
112 |
+
Note that the batch sizes are specified for single-GPU training, even though most of these runs will not naturally fit on a single GPU. To address this, either set `--microbatch` to a small value (e.g. 4) to train on one GPU, or run with MPI and divide `--batch_size` by the number of GPUs.
|
113 |
+
|
114 |
+
Unconditional ImageNet-64 with our `L_hybrid` objective and cosine noise schedule [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/imagenet64_uncond_100M_1500K.pt)]:
|
115 |
+
|
116 |
+
```bash
|
117 |
+
MODEL_FLAGS="--image_size 64 --num_channels 128 --num_res_blocks 3 --learn_sigma True"
|
118 |
+
DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine"
|
119 |
+
TRAIN_FLAGS="--lr 1e-4 --batch_size 128"
|
120 |
+
```
|
121 |
+
|
122 |
+
Unconditional CIFAR-10 with our `L_hybrid` objective and cosine noise schedule [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/cifar10_uncond_50M_500K.pt)]:
|
123 |
+
|
124 |
+
```bash
|
125 |
+
MODEL_FLAGS="--image_size 32 --num_channels 128 --num_res_blocks 3 --learn_sigma True --dropout 0.3"
|
126 |
+
DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine"
|
127 |
+
TRAIN_FLAGS="--lr 1e-4 --batch_size 128"
|
128 |
+
```
|
129 |
+
|
130 |
+
Class-conditional ImageNet-64 model (270M parameters, trained for 250K iterations) [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/imagenet64_cond_270M_250K.pt)]:
|
131 |
+
|
132 |
+
```bash
|
133 |
+
MODEL_FLAGS="--image_size 64 --num_channels 192 --num_res_blocks 3 --learn_sigma True --class_cond True"
|
134 |
+
DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine --rescale_learned_sigmas False --rescale_timesteps False"
|
135 |
+
TRAIN_FLAGS="--lr 3e-4 --batch_size 2048"
|
136 |
+
```
|
137 |
+
|
138 |
+
Upsampling 256x256 model (280M parameters, trained for 500K iterations) [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/upsample_cond_500K.pt)]:
|
139 |
+
|
140 |
+
```bash
|
141 |
+
MODEL_FLAGS="--num_channels 192 --num_res_blocks 2 --learn_sigma True --class_cond True"
|
142 |
+
DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule linear --rescale_learned_sigmas False --rescale_timesteps False"
|
143 |
+
TRAIN_FLAGS="--lr 3e-4 --batch_size 256"
|
144 |
+
```
|
145 |
+
|
146 |
+
LSUN bedroom model (lr=1e-4) [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/lsun_uncond_100M_1200K_bs128.pt)]:
|
147 |
+
|
148 |
+
```bash
|
149 |
+
MODEL_FLAGS="--image_size 256 --num_channels 128 --num_res_blocks 2 --num_heads 1 --learn_sigma True --use_scale_shift_norm False --attention_resolutions 16"
|
150 |
+
DIFFUSION_FLAGS="--diffusion_steps 1000 --noise_schedule linear --rescale_learned_sigmas False --rescale_timesteps False"
|
151 |
+
TRAIN_FLAGS="--lr 1e-4 --batch_size 128"
|
152 |
+
```
|
153 |
+
|
154 |
+
LSUN bedroom model (lr=2e-5) [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/lsun_uncond_100M_2400K_bs64.pt)]:
|
155 |
+
|
156 |
+
```bash
|
157 |
+
MODEL_FLAGS="--image_size 256 --num_channels 128 --num_res_blocks 2 --num_heads 1 --learn_sigma True --use_scale_shift_norm False --attention_resolutions 16"
|
158 |
+
DIFFUSION_FLAGS="--diffusion_steps 1000 --noise_schedule linear --rescale_learned_sigmas False --rescale_timesteps False --use_scale_shift_norm False"
|
159 |
+
TRAIN_FLAGS="--lr 2e-5 --batch_size 128"
|
160 |
+
```
|
161 |
+
|
162 |
+
Unconditional ImageNet-64 with the `L_vlb` objective and cosine noise schedule [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/imagenet64_uncond_vlb_100M_1500K.pt)]:
|
163 |
+
|
164 |
+
```bash
|
165 |
+
MODEL_FLAGS="--image_size 64 --num_channels 128 --num_res_blocks 3 --learn_sigma True"
|
166 |
+
DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine"
|
167 |
+
TRAIN_FLAGS="--lr 1e-4 --batch_size 128 --schedule_sampler loss-second-moment"
|
168 |
+
```
|
169 |
+
|
170 |
+
Unconditional CIFAR-10 with the `L_vlb` objective and cosine noise schedule [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/cifar10_uncond_vlb_50M_500K.pt)]:
|
171 |
+
|
172 |
+
```bash
|
173 |
+
MODEL_FLAGS="--image_size 32 --num_channels 128 --num_res_blocks 3 --learn_sigma True --dropout 0.3"
|
174 |
+
DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine"
|
175 |
+
TRAIN_FLAGS="--lr 1e-4 --batch_size 128 --schedule_sampler loss-second-moment"
|
176 |
+
```
|
app.py
ADDED
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
A Gradio Blocks Demo App.
|
3 |
+
Generate a large batch of samples from a super resolution model, given a batch
|
4 |
+
of samples from a regular model from image_sample.py.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import gradio as gr
|
8 |
+
import argparse
|
9 |
+
import os
|
10 |
+
import glob
|
11 |
+
|
12 |
+
import blobfile as bf
|
13 |
+
import numpy as np
|
14 |
+
import torch as th
|
15 |
+
import torch.distributed as dist
|
16 |
+
|
17 |
+
from PIL import Image, ImageDraw
|
18 |
+
from torchvision import utils
|
19 |
+
from pixel_guide_diffusion import dist_util, logger
|
20 |
+
from pixel_guide_diffusion.image_datasets import load_data
|
21 |
+
from pixel_guide_diffusion.script_util import (
|
22 |
+
pg_model_and_diffusion_defaults,
|
23 |
+
pg_create_model_and_diffusion,
|
24 |
+
pgsr_model_and_diffusion_defaults,
|
25 |
+
pgsr_create_model_and_diffusion,
|
26 |
+
args_to_dict,
|
27 |
+
add_dict_to_argparser,
|
28 |
+
)
|
29 |
+
|
30 |
+
MODEL_FLAGS="--image_size=32 --small_size=32 --large_size=128 --guide_size=128 --num_channels=128 --num_channels2=64 --num_res_blocks=3 --learn_sigma=True --dropout=0.0 --use_attention2=False"
|
31 |
+
DIFFUSION_FLAGS="--diffusion_steps=4000 --noise_schedule=cosine"
|
32 |
+
TEST_FLAGS="--batch_size=1 --seed=233 --num_samples=4"
|
33 |
+
OTHER_FLAGS = '''\
|
34 |
+
--timestep_respacing=16 \
|
35 |
+
--use_ddim=False \
|
36 |
+
--model_path=./danbooru2017_guided_log/ema_0.9999_360000.pt \
|
37 |
+
--model_path2=./danbooru2017_guided_sr_log/ema_0.9999_360000.pt'''
|
38 |
+
OTHER_FLAGS = OTHER_FLAGS.replace('\r\n', ' ').replace('\n', ' ')
|
39 |
+
flags = OTHER_FLAGS.split(' ') + MODEL_FLAGS.split(' ') + DIFFUSION_FLAGS.split(' ') + TEST_FLAGS.split(' ')
|
40 |
+
|
41 |
+
|
42 |
+
def norm_size(img, size=128, add_edges=True):
|
43 |
+
img = img.convert('L')
|
44 |
+
w, h = img.size
|
45 |
+
if w != h:
|
46 |
+
scale = 1024 / max(img.size)
|
47 |
+
img = img.resize([int(round(s*scale)) for s in img.size])
|
48 |
+
w, h = img.size
|
49 |
+
max_size = max(w, h)
|
50 |
+
x0 = (max_size - w) // 2
|
51 |
+
y0 = (max_size - h) // 2
|
52 |
+
x1 = x0 + w
|
53 |
+
y1 = y0 + h
|
54 |
+
canvas = Image.new('L', (max_size,max_size), 255)
|
55 |
+
canvas.paste(img, (x0,y0,x1,y1))
|
56 |
+
|
57 |
+
if add_edges:
|
58 |
+
draw = ImageDraw.Draw(canvas)
|
59 |
+
draw.line((x0-5,0,x0-1,max_size), fill=0)
|
60 |
+
draw.line((0,y0-5,max_size,y0-1), fill=0)
|
61 |
+
draw.line((x1+1,0,x1+5,max_size), fill=0)
|
62 |
+
draw.line((0,y1+1,max_size,y1+5), fill=0)
|
63 |
+
|
64 |
+
img = canvas
|
65 |
+
img = img.resize((size,size), resample=Image.LANCZOS)
|
66 |
+
|
67 |
+
return img
|
68 |
+
|
69 |
+
|
70 |
+
def create_argparser():
|
71 |
+
defaults = dict(
|
72 |
+
data_dir="",
|
73 |
+
guide_dir="",
|
74 |
+
clip_denoised=True,
|
75 |
+
num_samples=100,
|
76 |
+
batch_size=4,
|
77 |
+
use_ddim=False,
|
78 |
+
base_samples="",
|
79 |
+
model_path="",
|
80 |
+
seed=-1,
|
81 |
+
)
|
82 |
+
defaults.update(pg_model_and_diffusion_defaults())
|
83 |
+
defaults.update(pgsr_model_and_diffusion_defaults())
|
84 |
+
defaults.update(dict(
|
85 |
+
num_channels2=128,
|
86 |
+
use_attention2=True,
|
87 |
+
model_path2="",
|
88 |
+
))
|
89 |
+
parser = argparse.ArgumentParser()
|
90 |
+
add_dict_to_argparser(parser, defaults)
|
91 |
+
return parser
|
92 |
+
|
93 |
+
|
94 |
+
@th.inference_mode()
|
95 |
+
def main():
|
96 |
+
args = create_argparser().parse_args(flags)
|
97 |
+
|
98 |
+
dist_util.setup_dist()
|
99 |
+
logger.configure()
|
100 |
+
|
101 |
+
logger.log("creating model...")
|
102 |
+
model, diffusion = pg_create_model_and_diffusion(
|
103 |
+
**args_to_dict(args, pg_model_and_diffusion_defaults().keys())
|
104 |
+
)
|
105 |
+
model.load_state_dict(
|
106 |
+
dist_util.load_state_dict(args.model_path, map_location="cpu")
|
107 |
+
)
|
108 |
+
model.to(dist_util.dev())
|
109 |
+
model.eval()
|
110 |
+
|
111 |
+
logger.log("creating model2...")
|
112 |
+
args.num_channels = args.num_channels2
|
113 |
+
args.use_attention = args.use_attention2
|
114 |
+
model2, diffusion2 = pgsr_create_model_and_diffusion(
|
115 |
+
**args_to_dict(args, pgsr_model_and_diffusion_defaults().keys())
|
116 |
+
)
|
117 |
+
model2.load_state_dict(
|
118 |
+
dist_util.load_state_dict(args.model_path2, map_location="cpu")
|
119 |
+
)
|
120 |
+
model2.to(dist_util.dev())
|
121 |
+
model2.eval()
|
122 |
+
|
123 |
+
def inference(img, seed, add_edges):
|
124 |
+
th.manual_seed(int(seed))
|
125 |
+
sketch = sketch_out = norm_size(img, size=128, add_edges=add_edges)
|
126 |
+
sketch = np.asarray(sketch).astype(np.float32) / 127.5 - 1
|
127 |
+
sketch = th.from_numpy(sketch).float()[None,None].to(dist_util.dev())
|
128 |
+
model_kwargs = { "guide": sketch }
|
129 |
+
sample_fn = (
|
130 |
+
diffusion.p_sample_loop if not args.use_ddim else diffusion.ddim_sample_loop
|
131 |
+
)
|
132 |
+
sample = sample_fn(
|
133 |
+
model,
|
134 |
+
(args.batch_size, 3, args.image_size, args.image_size),
|
135 |
+
clip_denoised=args.clip_denoised,
|
136 |
+
model_kwargs=model_kwargs,
|
137 |
+
)
|
138 |
+
|
139 |
+
model_kwargs["low_res"] = sample
|
140 |
+
sample_fn2 = (
|
141 |
+
diffusion2.p_sample_loop if not args.use_ddim else diffusion2.ddim_sample_loop
|
142 |
+
)
|
143 |
+
sample2 = sample_fn2(
|
144 |
+
model2,
|
145 |
+
(args.batch_size, 3, args.large_size, args.large_size),
|
146 |
+
clip_denoised=args.clip_denoised,
|
147 |
+
model_kwargs=model_kwargs,
|
148 |
+
)
|
149 |
+
out = (sample2[0].clamp(-1,1).cpu().numpy() + 1) / 2 * 255
|
150 |
+
out = np.uint8(out)
|
151 |
+
out = out.transpose([1,2,0])
|
152 |
+
out = Image.fromarray(out)
|
153 |
+
|
154 |
+
return sketch_out, out
|
155 |
+
|
156 |
+
with gr.Blocks() as demo:
|
157 |
+
gr.Markdown('''<center><h1>Anime-Colorization</h1></center>
|
158 |
+
<h2>Colorize your anime sketches with this app.</h2>
|
159 |
+
This is a Gradio Blocks app of
|
160 |
+
<a href="https://github.com/HighCWu/pixel-guide-diffusion-for-anime-colorization">
|
161 |
+
HighCWu/pixel-guide-diffusion-for-anime-colorization
|
162 |
+
</a>.<br />
|
163 |
+
(PS: Training Datasets are made from <a href="https://www.kaggle.com/datasets/wuhecong/danbooru-sketch-pair-128x">
|
164 |
+
HighCWu/danbooru-sketch-pair-128x
|
165 |
+
</a> which processed real anime images to sketches by
|
166 |
+
<a href="https://github.com/lllyasviel/sketchKeras">SketchKeras</a>.
|
167 |
+
So the model is not very sensitive to some different styles of sketches,
|
168 |
+
and the colorized results of such sketches are not very good.)
|
169 |
+
''')
|
170 |
+
with gr.Row():
|
171 |
+
with gr.Box():
|
172 |
+
with gr.Column():
|
173 |
+
with gr.Row():
|
174 |
+
seed_in = gr.Number(
|
175 |
+
value=233,
|
176 |
+
label='Seed'
|
177 |
+
)
|
178 |
+
with gr.Row():
|
179 |
+
edges_in = gr.Checkbox(
|
180 |
+
label="Add Edges"
|
181 |
+
)
|
182 |
+
with gr.Row():
|
183 |
+
sketch_in = gr.Image(
|
184 |
+
type="pil",
|
185 |
+
label="Sketch"
|
186 |
+
)
|
187 |
+
with gr.Row():
|
188 |
+
generate_button = gr.Button('Generate')
|
189 |
+
with gr.Row():
|
190 |
+
gr.Markdown('Click to add example as input.👇')
|
191 |
+
with gr.Row():
|
192 |
+
example_sketch_paths = [[p] for p in sorted(glob.glob('docs/imgs/anime_sketch/*.png'))]
|
193 |
+
example_sketch = gr.Dataset(
|
194 |
+
components=[sketch_in],
|
195 |
+
samples=example_sketch_paths
|
196 |
+
)
|
197 |
+
with gr.Row():
|
198 |
+
gr.Markdown('These are expect real outputs.👇')
|
199 |
+
with gr.Row():
|
200 |
+
example_real_paths = [[p] for p in sorted(glob.glob('docs/imgs/anime/*.png'))]
|
201 |
+
example_real = gr.Dataset(
|
202 |
+
components=[sketch_in],
|
203 |
+
samples=example_real_paths
|
204 |
+
)
|
205 |
+
|
206 |
+
with gr.Box():
|
207 |
+
with gr.Column():
|
208 |
+
with gr.Row():
|
209 |
+
with gr.Column():
|
210 |
+
sketch_out = gr.Image(
|
211 |
+
type="pil",
|
212 |
+
label="Input"
|
213 |
+
)
|
214 |
+
with gr.Column():
|
215 |
+
colorized_out = gr.Image(
|
216 |
+
type="pil",
|
217 |
+
label="Colorization Result"
|
218 |
+
)
|
219 |
+
with gr.Row():
|
220 |
+
gr.Markdown(
|
221 |
+
'Here are some samples 👇 [top: sketch, center: generated, bottom: real]'
|
222 |
+
)
|
223 |
+
with gr.Row():
|
224 |
+
gr.Image(
|
225 |
+
value="docs/imgs/sample.png",
|
226 |
+
type="filepath",
|
227 |
+
interactive=False,
|
228 |
+
label="Samples"
|
229 |
+
)
|
230 |
+
gr.Markdown(
|
231 |
+
'<center><img src="https://visitor-badge.glitch.me/badge?page_id=gradio-blocks.anime-colorization" alt="visitor badge"/></center>'
|
232 |
+
)
|
233 |
+
|
234 |
+
generate_button.click(
|
235 |
+
inference, inputs=[sketch_in, seed_in, edges_in], outputs=[sketch_out, colorized_out]
|
236 |
+
)
|
237 |
+
example_sketch.click(
|
238 |
+
fn=lambda examples: gr.Image.update(value=examples[0]),
|
239 |
+
inputs=example_sketch,
|
240 |
+
outputs=example_sketch.components
|
241 |
+
)
|
242 |
+
|
243 |
+
demo.launch()
|
244 |
+
|
245 |
+
if __name__ == '__main__':
|
246 |
+
main()
|
danbooru2017_guided_log/ema_0.9999_360000.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b614305acf2d30b7c63bcbc56f646a3ee06579a8430f9c55c8de5014c977f397
|
3 |
+
size 210354744
|
danbooru2017_guided_sr_log/ema_0.9999_360000.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cc1c29c293ad2625cf616f0d1a33e2d708c061ac7f439d3df53e9b22cafe36d7
|
3 |
+
size 48757368
|
data/.gitkeep
ADDED
File without changes
|
datasets/README.md
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Downloading datasets
|
2 |
+
|
3 |
+
This directory includes instructions and scripts for downloading ImageNet, LSUN bedrooms, and CIFAR-10 for use in this codebase.
|
4 |
+
|
5 |
+
## ImageNet-64
|
6 |
+
|
7 |
+
To download unconditional ImageNet-64, go to [this page on image-net.org](http://www.image-net.org/small/download.php) and click on "Train (64x64)". Simply download the file and unzip it, and use the resulting directory as the data directory (the `--data_dir` argument for the training script).
|
8 |
+
|
9 |
+
## Class-conditional ImageNet
|
10 |
+
|
11 |
+
For our class-conditional models, we use the official ILSVRC2012 dataset with manual center cropping and downsampling. To obtain this dataset, navigate to [this page on image-net.org](http://www.image-net.org/challenges/LSVRC/2012/downloads) and sign in (or create an account if you do not already have one). Then click on the link reading "Training images (Task 1 & 2)". This is a 138GB tar file containing 1000 sub-tar files, one per class.
|
12 |
+
|
13 |
+
Once the file is downloaded, extract it and look inside. You should see 1000 `.tar` files. You need to extract each of these, which may be impractical to do by hand on your operating system. To automate the process on a Unix-based system, you can `cd` into the directory and run this short shell script:
|
14 |
+
|
15 |
+
```
|
16 |
+
for file in *.tar; do tar xf "$file"; rm "$file"; done
|
17 |
+
```
|
18 |
+
|
19 |
+
This will extract and remove each tar file in turn.
|
20 |
+
|
21 |
+
Once all of the images have been extracted, the resulting directory should be usable as a data directory (the `--data_dir` argument for the training script). The filenames should all start with WNID (class ids) followed by underscores, like `n01440764_2708.JPEG`. Conveniently (but not by accident) this is how the automated data-loader expects to discover class labels.
|
22 |
+
|
23 |
+
## CIFAR-10
|
24 |
+
|
25 |
+
For CIFAR-10, we created a script [cifar10.py](cifar10.py) that creates `cifar_train` and `cifar_test` directories. These directories contain files named like `truck_49997.png`, so that the class name is discernable to the data loader.
|
26 |
+
|
27 |
+
The `cifar_train` and `cifar_test` directories can be passed directly to the training scripts via the `--data_dir` argument.
|
28 |
+
|
29 |
+
## LSUN bedroom
|
30 |
+
|
31 |
+
To download and pre-process LSUN bedroom, clone [fyu/lsun](https://github.com/fyu/lsun) on GitHub and run their download script `python3 download.py bedroom`. The result will be an "lmdb" database named like `bedroom_train_lmdb`. You can pass this to our [lsun_bedroom.py](lsun_bedroom.py) script like so:
|
32 |
+
|
33 |
+
```
|
34 |
+
python lsun_bedroom.py bedroom_train_lmdb lsun_train_output_dir
|
35 |
+
```
|
36 |
+
|
37 |
+
This creates a directory called `lsun_train_output_dir`. This directory can be passed to the training scripts via the `--data_dir` argument.
|
datasets/cifar10.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import tempfile
|
3 |
+
|
4 |
+
import torchvision
|
5 |
+
from tqdm.auto import tqdm
|
6 |
+
|
7 |
+
CLASSES = (
|
8 |
+
"plane",
|
9 |
+
"car",
|
10 |
+
"bird",
|
11 |
+
"cat",
|
12 |
+
"deer",
|
13 |
+
"dog",
|
14 |
+
"frog",
|
15 |
+
"horse",
|
16 |
+
"ship",
|
17 |
+
"truck",
|
18 |
+
)
|
19 |
+
|
20 |
+
|
21 |
+
def main():
|
22 |
+
for split in ["train", "test"]:
|
23 |
+
out_dir = f"cifar_{split}"
|
24 |
+
if os.path.exists(out_dir):
|
25 |
+
print(f"skipping split {split} since {out_dir} already exists.")
|
26 |
+
continue
|
27 |
+
|
28 |
+
print("downloading...")
|
29 |
+
with tempfile.TemporaryDirectory() as tmp_dir:
|
30 |
+
dataset = torchvision.datasets.CIFAR10(
|
31 |
+
root=tmp_dir, train=split == "train", download=True
|
32 |
+
)
|
33 |
+
|
34 |
+
print("dumping images...")
|
35 |
+
os.mkdir(out_dir)
|
36 |
+
for i in tqdm(range(len(dataset))):
|
37 |
+
image, label = dataset[i]
|
38 |
+
filename = os.path.join(out_dir, f"{CLASSES[label]}_{i:05d}.png")
|
39 |
+
image.save(filename)
|
40 |
+
|
41 |
+
|
42 |
+
if __name__ == "__main__":
|
43 |
+
main()
|
datasets/lsun_bedroom.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Convert an LSUN lmdb database into a directory of images.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import argparse
|
6 |
+
import io
|
7 |
+
import os
|
8 |
+
|
9 |
+
from PIL import Image
|
10 |
+
import lmdb
|
11 |
+
import numpy as np
|
12 |
+
|
13 |
+
|
14 |
+
def read_images(lmdb_path, image_size):
|
15 |
+
env = lmdb.open(lmdb_path, map_size=1099511627776, max_readers=100, readonly=True)
|
16 |
+
with env.begin(write=False) as transaction:
|
17 |
+
cursor = transaction.cursor()
|
18 |
+
for _, webp_data in cursor:
|
19 |
+
img = Image.open(io.BytesIO(webp_data))
|
20 |
+
width, height = img.size
|
21 |
+
scale = image_size / min(width, height)
|
22 |
+
img = img.resize(
|
23 |
+
(int(round(scale * width)), int(round(scale * height))),
|
24 |
+
resample=Image.BOX,
|
25 |
+
)
|
26 |
+
arr = np.array(img)
|
27 |
+
h, w, _ = arr.shape
|
28 |
+
h_off = (h - image_size) // 2
|
29 |
+
w_off = (w - image_size) // 2
|
30 |
+
arr = arr[h_off : h_off + image_size, w_off : w_off + image_size]
|
31 |
+
yield arr
|
32 |
+
|
33 |
+
|
34 |
+
def dump_images(out_dir, images, prefix):
|
35 |
+
if not os.path.exists(out_dir):
|
36 |
+
os.mkdir(out_dir)
|
37 |
+
for i, img in enumerate(images):
|
38 |
+
Image.fromarray(img).save(os.path.join(out_dir, f"{prefix}_{i:07d}.png"))
|
39 |
+
|
40 |
+
|
41 |
+
def main():
|
42 |
+
parser = argparse.ArgumentParser()
|
43 |
+
parser.add_argument("--image-size", help="new image size", type=int, default=256)
|
44 |
+
parser.add_argument("--prefix", help="class name", type=str, default="bedroom")
|
45 |
+
parser.add_argument("lmdb_path", help="path to an LSUN lmdb database")
|
46 |
+
parser.add_argument("out_dir", help="path to output directory")
|
47 |
+
args = parser.parse_args()
|
48 |
+
|
49 |
+
images = read_images(args.lmdb_path, args.image_size)
|
50 |
+
dump_images(args.out_dir, images, args.prefix)
|
51 |
+
|
52 |
+
|
53 |
+
if __name__ == "__main__":
|
54 |
+
main()
|
docs/imgs/anime/1000000.png
ADDED
docs/imgs/anime/1002000.png
ADDED
docs/imgs/anime/1003000.png
ADDED
docs/imgs/anime/1004000.png
ADDED
docs/imgs/anime/1006000.png
ADDED
docs/imgs/anime/1012000.png
ADDED
docs/imgs/anime_sketch/1000000.png
ADDED
docs/imgs/anime_sketch/1002000.png
ADDED
docs/imgs/anime_sketch/1003000.png
ADDED
docs/imgs/anime_sketch/1004000.png
ADDED
docs/imgs/anime_sketch/1006000.png
ADDED
docs/imgs/anime_sketch/1012000.png
ADDED
docs/imgs/sample.png
ADDED
openai.LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2021 OpenAI
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
packages.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
libopenmpi-dev
|
pixel_guide_diffusion/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Codebase for "Improved Denoising Diffusion Probabilistic Models".
|
3 |
+
"""
|
pixel_guide_diffusion/dist_util.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Helpers for distributed training.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import io
|
6 |
+
import os
|
7 |
+
import socket
|
8 |
+
|
9 |
+
import blobfile as bf
|
10 |
+
from mpi4py import MPI
|
11 |
+
import torch as th
|
12 |
+
import torch.distributed as dist
|
13 |
+
|
14 |
+
# Change this to reflect your cluster layout.
|
15 |
+
# The GPU for a given rank is (rank % GPUS_PER_NODE).
|
16 |
+
GPUS_PER_NODE = 8
|
17 |
+
|
18 |
+
SETUP_RETRY_COUNT = 3
|
19 |
+
|
20 |
+
|
21 |
+
def setup_dist():
|
22 |
+
"""
|
23 |
+
Setup a distributed process group.
|
24 |
+
"""
|
25 |
+
if dist.is_initialized():
|
26 |
+
return
|
27 |
+
|
28 |
+
comm = MPI.COMM_WORLD
|
29 |
+
backend = "gloo" if not th.cuda.is_available() else "nccl"
|
30 |
+
|
31 |
+
if backend == "gloo":
|
32 |
+
hostname = "localhost"
|
33 |
+
else:
|
34 |
+
hostname = socket.gethostbyname(socket.getfqdn())
|
35 |
+
os.environ["MASTER_ADDR"] = comm.bcast(hostname, root=0)
|
36 |
+
os.environ["RANK"] = str(comm.rank)
|
37 |
+
os.environ["WORLD_SIZE"] = str(comm.size)
|
38 |
+
|
39 |
+
port = comm.bcast(_find_free_port(), root=0)
|
40 |
+
os.environ["MASTER_PORT"] = str(port)
|
41 |
+
dist.init_process_group(backend=backend, init_method="env://")
|
42 |
+
|
43 |
+
|
44 |
+
def dev():
|
45 |
+
"""
|
46 |
+
Get the device to use for torch.distributed.
|
47 |
+
"""
|
48 |
+
if th.cuda.is_available():
|
49 |
+
return th.device(f"cuda:{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}")
|
50 |
+
return th.device("cpu")
|
51 |
+
|
52 |
+
|
53 |
+
def load_state_dict(path, **kwargs):
|
54 |
+
"""
|
55 |
+
Load a PyTorch file without redundant fetches across MPI ranks.
|
56 |
+
"""
|
57 |
+
if MPI.COMM_WORLD.Get_rank() == 0:
|
58 |
+
with bf.BlobFile(path, "rb") as f:
|
59 |
+
data = f.read()
|
60 |
+
else:
|
61 |
+
data = None
|
62 |
+
data = MPI.COMM_WORLD.bcast(data)
|
63 |
+
return th.load(io.BytesIO(data), **kwargs)
|
64 |
+
|
65 |
+
|
66 |
+
def sync_params(params):
|
67 |
+
"""
|
68 |
+
Synchronize a sequence of Tensors across ranks from rank 0.
|
69 |
+
"""
|
70 |
+
for p in params:
|
71 |
+
with th.no_grad():
|
72 |
+
dist.broadcast(p, 0)
|
73 |
+
|
74 |
+
|
75 |
+
def _find_free_port():
|
76 |
+
try:
|
77 |
+
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
78 |
+
s.bind(("", 0))
|
79 |
+
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
80 |
+
return s.getsockname()[1]
|
81 |
+
finally:
|
82 |
+
s.close()
|
pixel_guide_diffusion/fp16_util.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Helpers to train with 16-bit precision.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import torch.nn as nn
|
6 |
+
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
|
7 |
+
|
8 |
+
|
9 |
+
def convert_module_to_f16(l):
|
10 |
+
"""
|
11 |
+
Convert primitive modules to float16.
|
12 |
+
"""
|
13 |
+
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
|
14 |
+
l.weight.data = l.weight.data.half()
|
15 |
+
l.bias.data = l.bias.data.half()
|
16 |
+
|
17 |
+
|
18 |
+
def convert_module_to_f32(l):
|
19 |
+
"""
|
20 |
+
Convert primitive modules to float32, undoing convert_module_to_f16().
|
21 |
+
"""
|
22 |
+
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
|
23 |
+
l.weight.data = l.weight.data.float()
|
24 |
+
l.bias.data = l.bias.data.float()
|
25 |
+
|
26 |
+
|
27 |
+
def make_master_params(model_params):
|
28 |
+
"""
|
29 |
+
Copy model parameters into a (differently-shaped) list of full-precision
|
30 |
+
parameters.
|
31 |
+
"""
|
32 |
+
master_params = _flatten_dense_tensors(
|
33 |
+
[param.detach().float() for param in model_params]
|
34 |
+
)
|
35 |
+
master_params = nn.Parameter(master_params)
|
36 |
+
master_params.requires_grad = True
|
37 |
+
return [master_params]
|
38 |
+
|
39 |
+
|
40 |
+
def model_grads_to_master_grads(model_params, master_params):
|
41 |
+
"""
|
42 |
+
Copy the gradients from the model parameters into the master parameters
|
43 |
+
from make_master_params().
|
44 |
+
"""
|
45 |
+
master_params[0].grad = _flatten_dense_tensors(
|
46 |
+
[param.grad.data.detach().float() for param in model_params]
|
47 |
+
)
|
48 |
+
|
49 |
+
|
50 |
+
def master_params_to_model_params(model_params, master_params):
|
51 |
+
"""
|
52 |
+
Copy the master parameter data back into the model parameters.
|
53 |
+
"""
|
54 |
+
# Without copying to a list, if a generator is passed, this will
|
55 |
+
# silently not copy any parameters.
|
56 |
+
model_params = list(model_params)
|
57 |
+
|
58 |
+
for param, master_param in zip(
|
59 |
+
model_params, unflatten_master_params(model_params, master_params)
|
60 |
+
):
|
61 |
+
param.detach().copy_(master_param)
|
62 |
+
|
63 |
+
|
64 |
+
def unflatten_master_params(model_params, master_params):
|
65 |
+
"""
|
66 |
+
Unflatten the master parameters to look like model_params.
|
67 |
+
"""
|
68 |
+
return _unflatten_dense_tensors(master_params[0].detach(), model_params)
|
69 |
+
|
70 |
+
|
71 |
+
def zero_grad(model_params):
|
72 |
+
for param in model_params:
|
73 |
+
# Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group
|
74 |
+
if param.grad is not None:
|
75 |
+
param.grad.detach_()
|
76 |
+
param.grad.zero_()
|
pixel_guide_diffusion/gaussian_diffusion.py
ADDED
@@ -0,0 +1,841 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This code started out as a PyTorch port of Ho et al's diffusion models:
|
3 |
+
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
|
4 |
+
|
5 |
+
Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
|
6 |
+
"""
|
7 |
+
|
8 |
+
import enum
|
9 |
+
import math
|
10 |
+
|
11 |
+
import numpy as np
|
12 |
+
import torch as th
|
13 |
+
|
14 |
+
from .nn import mean_flat
|
15 |
+
from .losses import normal_kl, discretized_gaussian_log_likelihood
|
16 |
+
|
17 |
+
|
18 |
+
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
|
19 |
+
"""
|
20 |
+
Get a pre-defined beta schedule for the given name.
|
21 |
+
|
22 |
+
The beta schedule library consists of beta schedules which remain similar
|
23 |
+
in the limit of num_diffusion_timesteps.
|
24 |
+
Beta schedules may be added, but should not be removed or changed once
|
25 |
+
they are committed to maintain backwards compatibility.
|
26 |
+
"""
|
27 |
+
if schedule_name == "linear":
|
28 |
+
# Linear schedule from Ho et al, extended to work for any number of
|
29 |
+
# diffusion steps.
|
30 |
+
scale = 1000 / num_diffusion_timesteps
|
31 |
+
beta_start = scale * 0.0001
|
32 |
+
beta_end = scale * 0.02
|
33 |
+
return np.linspace(
|
34 |
+
beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
|
35 |
+
)
|
36 |
+
elif schedule_name == "cosine":
|
37 |
+
return betas_for_alpha_bar(
|
38 |
+
num_diffusion_timesteps,
|
39 |
+
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
|
40 |
+
)
|
41 |
+
else:
|
42 |
+
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
|
43 |
+
|
44 |
+
|
45 |
+
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
|
46 |
+
"""
|
47 |
+
Create a beta schedule that discretizes the given alpha_t_bar function,
|
48 |
+
which defines the cumulative product of (1-beta) over time from t = [0,1].
|
49 |
+
|
50 |
+
:param num_diffusion_timesteps: the number of betas to produce.
|
51 |
+
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
|
52 |
+
produces the cumulative product of (1-beta) up to that
|
53 |
+
part of the diffusion process.
|
54 |
+
:param max_beta: the maximum beta to use; use values lower than 1 to
|
55 |
+
prevent singularities.
|
56 |
+
"""
|
57 |
+
betas = []
|
58 |
+
for i in range(num_diffusion_timesteps):
|
59 |
+
t1 = i / num_diffusion_timesteps
|
60 |
+
t2 = (i + 1) / num_diffusion_timesteps
|
61 |
+
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
|
62 |
+
return np.array(betas)
|
63 |
+
|
64 |
+
|
65 |
+
class ModelMeanType(enum.Enum):
|
66 |
+
"""
|
67 |
+
Which type of output the model predicts.
|
68 |
+
"""
|
69 |
+
|
70 |
+
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
|
71 |
+
START_X = enum.auto() # the model predicts x_0
|
72 |
+
EPSILON = enum.auto() # the model predicts epsilon
|
73 |
+
|
74 |
+
|
75 |
+
class ModelVarType(enum.Enum):
|
76 |
+
"""
|
77 |
+
What is used as the model's output variance.
|
78 |
+
|
79 |
+
The LEARNED_RANGE option has been added to allow the model to predict
|
80 |
+
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
|
81 |
+
"""
|
82 |
+
|
83 |
+
LEARNED = enum.auto()
|
84 |
+
FIXED_SMALL = enum.auto()
|
85 |
+
FIXED_LARGE = enum.auto()
|
86 |
+
LEARNED_RANGE = enum.auto()
|
87 |
+
|
88 |
+
|
89 |
+
class LossType(enum.Enum):
|
90 |
+
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
|
91 |
+
RESCALED_MSE = (
|
92 |
+
enum.auto()
|
93 |
+
) # use raw MSE loss (with RESCALED_KL when learning variances)
|
94 |
+
KL = enum.auto() # use the variational lower-bound
|
95 |
+
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
|
96 |
+
|
97 |
+
def is_vb(self):
|
98 |
+
return self == LossType.KL or self == LossType.RESCALED_KL
|
99 |
+
|
100 |
+
|
101 |
+
class GaussianDiffusion:
|
102 |
+
"""
|
103 |
+
Utilities for training and sampling diffusion models.
|
104 |
+
|
105 |
+
Ported directly from here, and then adapted over time to further experimentation.
|
106 |
+
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
|
107 |
+
|
108 |
+
:param betas: a 1-D numpy array of betas for each diffusion timestep,
|
109 |
+
starting at T and going to 1.
|
110 |
+
:param model_mean_type: a ModelMeanType determining what the model outputs.
|
111 |
+
:param model_var_type: a ModelVarType determining how variance is output.
|
112 |
+
:param loss_type: a LossType determining the loss function to use.
|
113 |
+
:param rescale_timesteps: if True, pass floating point timesteps into the
|
114 |
+
model so that they are always scaled like in the
|
115 |
+
original paper (0 to 1000).
|
116 |
+
"""
|
117 |
+
|
118 |
+
def __init__(
|
119 |
+
self,
|
120 |
+
*,
|
121 |
+
betas,
|
122 |
+
model_mean_type,
|
123 |
+
model_var_type,
|
124 |
+
loss_type,
|
125 |
+
rescale_timesteps=False,
|
126 |
+
):
|
127 |
+
self.model_mean_type = model_mean_type
|
128 |
+
self.model_var_type = model_var_type
|
129 |
+
self.loss_type = loss_type
|
130 |
+
self.rescale_timesteps = rescale_timesteps
|
131 |
+
|
132 |
+
# Use float64 for accuracy.
|
133 |
+
betas = np.array(betas, dtype=np.float64)
|
134 |
+
self.betas = betas
|
135 |
+
assert len(betas.shape) == 1, "betas must be 1-D"
|
136 |
+
assert (betas > 0).all() and (betas <= 1).all()
|
137 |
+
|
138 |
+
self.num_timesteps = int(betas.shape[0])
|
139 |
+
|
140 |
+
alphas = 1.0 - betas
|
141 |
+
self.alphas_cumprod = np.cumprod(alphas, axis=0)
|
142 |
+
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
|
143 |
+
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
|
144 |
+
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
|
145 |
+
|
146 |
+
# calculations for diffusion q(x_t | x_{t-1}) and others
|
147 |
+
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
|
148 |
+
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
|
149 |
+
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
|
150 |
+
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
|
151 |
+
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
|
152 |
+
|
153 |
+
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
154 |
+
self.posterior_variance = (
|
155 |
+
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
|
156 |
+
)
|
157 |
+
# log calculation clipped because the posterior variance is 0 at the
|
158 |
+
# beginning of the diffusion chain.
|
159 |
+
self.posterior_log_variance_clipped = np.log(
|
160 |
+
np.append(self.posterior_variance[1], self.posterior_variance[1:])
|
161 |
+
)
|
162 |
+
self.posterior_mean_coef1 = (
|
163 |
+
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
|
164 |
+
)
|
165 |
+
self.posterior_mean_coef2 = (
|
166 |
+
(1.0 - self.alphas_cumprod_prev)
|
167 |
+
* np.sqrt(alphas)
|
168 |
+
/ (1.0 - self.alphas_cumprod)
|
169 |
+
)
|
170 |
+
|
171 |
+
def q_mean_variance(self, x_start, t):
|
172 |
+
"""
|
173 |
+
Get the distribution q(x_t | x_0).
|
174 |
+
|
175 |
+
:param x_start: the [N x C x ...] tensor of noiseless inputs.
|
176 |
+
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
|
177 |
+
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
|
178 |
+
"""
|
179 |
+
mean = (
|
180 |
+
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
|
181 |
+
)
|
182 |
+
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
|
183 |
+
log_variance = _extract_into_tensor(
|
184 |
+
self.log_one_minus_alphas_cumprod, t, x_start.shape
|
185 |
+
)
|
186 |
+
return mean, variance, log_variance
|
187 |
+
|
188 |
+
def q_sample(self, x_start, t, noise=None):
|
189 |
+
"""
|
190 |
+
Diffuse the data for a given number of diffusion steps.
|
191 |
+
|
192 |
+
In other words, sample from q(x_t | x_0).
|
193 |
+
|
194 |
+
:param x_start: the initial data batch.
|
195 |
+
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
|
196 |
+
:param noise: if specified, the split-out normal noise.
|
197 |
+
:return: A noisy version of x_start.
|
198 |
+
"""
|
199 |
+
if noise is None:
|
200 |
+
noise = th.randn_like(x_start)
|
201 |
+
assert noise.shape == x_start.shape
|
202 |
+
return (
|
203 |
+
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
|
204 |
+
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
|
205 |
+
* noise
|
206 |
+
)
|
207 |
+
|
208 |
+
def q_posterior_mean_variance(self, x_start, x_t, t):
|
209 |
+
"""
|
210 |
+
Compute the mean and variance of the diffusion posterior:
|
211 |
+
|
212 |
+
q(x_{t-1} | x_t, x_0)
|
213 |
+
|
214 |
+
"""
|
215 |
+
assert x_start.shape == x_t.shape
|
216 |
+
posterior_mean = (
|
217 |
+
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
|
218 |
+
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
|
219 |
+
)
|
220 |
+
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
|
221 |
+
posterior_log_variance_clipped = _extract_into_tensor(
|
222 |
+
self.posterior_log_variance_clipped, t, x_t.shape
|
223 |
+
)
|
224 |
+
assert (
|
225 |
+
posterior_mean.shape[0]
|
226 |
+
== posterior_variance.shape[0]
|
227 |
+
== posterior_log_variance_clipped.shape[0]
|
228 |
+
== x_start.shape[0]
|
229 |
+
)
|
230 |
+
return posterior_mean, posterior_variance, posterior_log_variance_clipped
|
231 |
+
|
232 |
+
def p_mean_variance(
|
233 |
+
self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None
|
234 |
+
):
|
235 |
+
"""
|
236 |
+
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
|
237 |
+
the initial x, x_0.
|
238 |
+
|
239 |
+
:param model: the model, which takes a signal and a batch of timesteps
|
240 |
+
as input.
|
241 |
+
:param x: the [N x C x ...] tensor at time t.
|
242 |
+
:param t: a 1-D Tensor of timesteps.
|
243 |
+
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
|
244 |
+
:param denoised_fn: if not None, a function which applies to the
|
245 |
+
x_start prediction before it is used to sample. Applies before
|
246 |
+
clip_denoised.
|
247 |
+
:param model_kwargs: if not None, a dict of extra keyword arguments to
|
248 |
+
pass to the model. This can be used for conditioning.
|
249 |
+
:return: a dict with the following keys:
|
250 |
+
- 'mean': the model mean output.
|
251 |
+
- 'variance': the model variance output.
|
252 |
+
- 'log_variance': the log of 'variance'.
|
253 |
+
- 'pred_xstart': the prediction for x_0.
|
254 |
+
"""
|
255 |
+
if model_kwargs is None:
|
256 |
+
model_kwargs = {}
|
257 |
+
|
258 |
+
B, C = x.shape[:2]
|
259 |
+
assert t.shape == (B,)
|
260 |
+
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
|
261 |
+
|
262 |
+
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
|
263 |
+
assert model_output.shape == (B, C * 2, *x.shape[2:])
|
264 |
+
model_output, model_var_values = th.split(model_output, C, dim=1)
|
265 |
+
if self.model_var_type == ModelVarType.LEARNED:
|
266 |
+
model_log_variance = model_var_values
|
267 |
+
model_variance = th.exp(model_log_variance)
|
268 |
+
else:
|
269 |
+
min_log = _extract_into_tensor(
|
270 |
+
self.posterior_log_variance_clipped, t, x.shape
|
271 |
+
)
|
272 |
+
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
|
273 |
+
# The model_var_values is [-1, 1] for [min_var, max_var].
|
274 |
+
frac = (model_var_values + 1) / 2
|
275 |
+
model_log_variance = frac * max_log + (1 - frac) * min_log
|
276 |
+
model_variance = th.exp(model_log_variance)
|
277 |
+
else:
|
278 |
+
model_variance, model_log_variance = {
|
279 |
+
# for fixedlarge, we set the initial (log-)variance like so
|
280 |
+
# to get a better decoder log likelihood.
|
281 |
+
ModelVarType.FIXED_LARGE: (
|
282 |
+
np.append(self.posterior_variance[1], self.betas[1:]),
|
283 |
+
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
|
284 |
+
),
|
285 |
+
ModelVarType.FIXED_SMALL: (
|
286 |
+
self.posterior_variance,
|
287 |
+
self.posterior_log_variance_clipped,
|
288 |
+
),
|
289 |
+
}[self.model_var_type]
|
290 |
+
model_variance = _extract_into_tensor(model_variance, t, x.shape)
|
291 |
+
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
|
292 |
+
|
293 |
+
def process_xstart(x):
|
294 |
+
if denoised_fn is not None:
|
295 |
+
x = denoised_fn(x)
|
296 |
+
if clip_denoised:
|
297 |
+
return x.clamp(-1, 1)
|
298 |
+
return x
|
299 |
+
|
300 |
+
if self.model_mean_type == ModelMeanType.PREVIOUS_X:
|
301 |
+
pred_xstart = process_xstart(
|
302 |
+
self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
|
303 |
+
)
|
304 |
+
model_mean = model_output
|
305 |
+
elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
|
306 |
+
if self.model_mean_type == ModelMeanType.START_X:
|
307 |
+
pred_xstart = process_xstart(model_output)
|
308 |
+
else:
|
309 |
+
pred_xstart = process_xstart(
|
310 |
+
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
|
311 |
+
)
|
312 |
+
model_mean, _, _ = self.q_posterior_mean_variance(
|
313 |
+
x_start=pred_xstart, x_t=x, t=t
|
314 |
+
)
|
315 |
+
else:
|
316 |
+
raise NotImplementedError(self.model_mean_type)
|
317 |
+
|
318 |
+
assert (
|
319 |
+
model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
|
320 |
+
)
|
321 |
+
return {
|
322 |
+
"mean": model_mean,
|
323 |
+
"variance": model_variance,
|
324 |
+
"log_variance": model_log_variance,
|
325 |
+
"pred_xstart": pred_xstart,
|
326 |
+
}
|
327 |
+
|
328 |
+
def _predict_xstart_from_eps(self, x_t, t, eps):
|
329 |
+
assert x_t.shape == eps.shape
|
330 |
+
return (
|
331 |
+
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
|
332 |
+
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
|
333 |
+
)
|
334 |
+
|
335 |
+
def _predict_xstart_from_xprev(self, x_t, t, xprev):
|
336 |
+
assert x_t.shape == xprev.shape
|
337 |
+
return ( # (xprev - coef2*x_t) / coef1
|
338 |
+
_extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
|
339 |
+
- _extract_into_tensor(
|
340 |
+
self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
|
341 |
+
)
|
342 |
+
* x_t
|
343 |
+
)
|
344 |
+
|
345 |
+
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
|
346 |
+
return (
|
347 |
+
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
|
348 |
+
- pred_xstart
|
349 |
+
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
|
350 |
+
|
351 |
+
def _scale_timesteps(self, t):
|
352 |
+
if self.rescale_timesteps:
|
353 |
+
return t.float() * (1000.0 / self.num_timesteps)
|
354 |
+
return t
|
355 |
+
|
356 |
+
def p_sample(
|
357 |
+
self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None
|
358 |
+
):
|
359 |
+
"""
|
360 |
+
Sample x_{t-1} from the model at the given timestep.
|
361 |
+
|
362 |
+
:param model: the model to sample from.
|
363 |
+
:param x: the current tensor at x_{t-1}.
|
364 |
+
:param t: the value of t, starting at 0 for the first diffusion step.
|
365 |
+
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
|
366 |
+
:param denoised_fn: if not None, a function which applies to the
|
367 |
+
x_start prediction before it is used to sample.
|
368 |
+
:param model_kwargs: if not None, a dict of extra keyword arguments to
|
369 |
+
pass to the model. This can be used for conditioning.
|
370 |
+
:return: a dict containing the following keys:
|
371 |
+
- 'sample': a random sample from the model.
|
372 |
+
- 'pred_xstart': a prediction of x_0.
|
373 |
+
"""
|
374 |
+
out = self.p_mean_variance(
|
375 |
+
model,
|
376 |
+
x,
|
377 |
+
t,
|
378 |
+
clip_denoised=clip_denoised,
|
379 |
+
denoised_fn=denoised_fn,
|
380 |
+
model_kwargs=model_kwargs,
|
381 |
+
)
|
382 |
+
noise = th.randn_like(x)
|
383 |
+
nonzero_mask = (
|
384 |
+
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
|
385 |
+
) # no noise when t == 0
|
386 |
+
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
|
387 |
+
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
|
388 |
+
|
389 |
+
def p_sample_loop(
|
390 |
+
self,
|
391 |
+
model,
|
392 |
+
shape,
|
393 |
+
noise=None,
|
394 |
+
clip_denoised=True,
|
395 |
+
denoised_fn=None,
|
396 |
+
model_kwargs=None,
|
397 |
+
device=None,
|
398 |
+
progress=False,
|
399 |
+
):
|
400 |
+
"""
|
401 |
+
Generate samples from the model.
|
402 |
+
|
403 |
+
:param model: the model module.
|
404 |
+
:param shape: the shape of the samples, (N, C, H, W).
|
405 |
+
:param noise: if specified, the noise from the encoder to sample.
|
406 |
+
Should be of the same shape as `shape`.
|
407 |
+
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
|
408 |
+
:param denoised_fn: if not None, a function which applies to the
|
409 |
+
x_start prediction before it is used to sample.
|
410 |
+
:param model_kwargs: if not None, a dict of extra keyword arguments to
|
411 |
+
pass to the model. This can be used for conditioning.
|
412 |
+
:param device: if specified, the device to create the samples on.
|
413 |
+
If not specified, use a model parameter's device.
|
414 |
+
:param progress: if True, show a tqdm progress bar.
|
415 |
+
:return: a non-differentiable batch of samples.
|
416 |
+
"""
|
417 |
+
final = None
|
418 |
+
for sample in self.p_sample_loop_progressive(
|
419 |
+
model,
|
420 |
+
shape,
|
421 |
+
noise=noise,
|
422 |
+
clip_denoised=clip_denoised,
|
423 |
+
denoised_fn=denoised_fn,
|
424 |
+
model_kwargs=model_kwargs,
|
425 |
+
device=device,
|
426 |
+
progress=progress,
|
427 |
+
):
|
428 |
+
final = sample
|
429 |
+
return final["sample"]
|
430 |
+
|
431 |
+
def p_sample_loop_progressive(
|
432 |
+
self,
|
433 |
+
model,
|
434 |
+
shape,
|
435 |
+
noise=None,
|
436 |
+
clip_denoised=True,
|
437 |
+
denoised_fn=None,
|
438 |
+
model_kwargs=None,
|
439 |
+
device=None,
|
440 |
+
progress=False,
|
441 |
+
):
|
442 |
+
"""
|
443 |
+
Generate samples from the model and yield intermediate samples from
|
444 |
+
each timestep of diffusion.
|
445 |
+
|
446 |
+
Arguments are the same as p_sample_loop().
|
447 |
+
Returns a generator over dicts, where each dict is the return value of
|
448 |
+
p_sample().
|
449 |
+
"""
|
450 |
+
if device is None:
|
451 |
+
device = next(model.parameters()).device
|
452 |
+
assert isinstance(shape, (tuple, list))
|
453 |
+
if noise is not None:
|
454 |
+
img = noise
|
455 |
+
else:
|
456 |
+
img = th.randn(*shape, device=device)
|
457 |
+
indices = list(range(self.num_timesteps))[::-1]
|
458 |
+
|
459 |
+
if progress:
|
460 |
+
# Lazy import so that we don't depend on tqdm.
|
461 |
+
from tqdm.auto import tqdm
|
462 |
+
|
463 |
+
indices = tqdm(indices)
|
464 |
+
|
465 |
+
for i in indices:
|
466 |
+
t = th.tensor([i] * shape[0], device=device)
|
467 |
+
with th.no_grad():
|
468 |
+
out = self.p_sample(
|
469 |
+
model,
|
470 |
+
img,
|
471 |
+
t,
|
472 |
+
clip_denoised=clip_denoised,
|
473 |
+
denoised_fn=denoised_fn,
|
474 |
+
model_kwargs=model_kwargs,
|
475 |
+
)
|
476 |
+
yield out
|
477 |
+
img = out["sample"]
|
478 |
+
|
479 |
+
def ddim_sample(
|
480 |
+
self,
|
481 |
+
model,
|
482 |
+
x,
|
483 |
+
t,
|
484 |
+
clip_denoised=True,
|
485 |
+
denoised_fn=None,
|
486 |
+
model_kwargs=None,
|
487 |
+
eta=0.0,
|
488 |
+
):
|
489 |
+
"""
|
490 |
+
Sample x_{t-1} from the model using DDIM.
|
491 |
+
|
492 |
+
Same usage as p_sample().
|
493 |
+
"""
|
494 |
+
out = self.p_mean_variance(
|
495 |
+
model,
|
496 |
+
x,
|
497 |
+
t,
|
498 |
+
clip_denoised=clip_denoised,
|
499 |
+
denoised_fn=denoised_fn,
|
500 |
+
model_kwargs=model_kwargs,
|
501 |
+
)
|
502 |
+
# Usually our model outputs epsilon, but we re-derive it
|
503 |
+
# in case we used x_start or x_prev prediction.
|
504 |
+
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
|
505 |
+
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
|
506 |
+
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
|
507 |
+
sigma = (
|
508 |
+
eta
|
509 |
+
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
|
510 |
+
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
|
511 |
+
)
|
512 |
+
# Equation 12.
|
513 |
+
noise = th.randn_like(x)
|
514 |
+
mean_pred = (
|
515 |
+
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
|
516 |
+
+ th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
|
517 |
+
)
|
518 |
+
nonzero_mask = (
|
519 |
+
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
|
520 |
+
) # no noise when t == 0
|
521 |
+
sample = mean_pred + nonzero_mask * sigma * noise
|
522 |
+
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
|
523 |
+
|
524 |
+
def ddim_reverse_sample(
|
525 |
+
self,
|
526 |
+
model,
|
527 |
+
x,
|
528 |
+
t,
|
529 |
+
clip_denoised=True,
|
530 |
+
denoised_fn=None,
|
531 |
+
model_kwargs=None,
|
532 |
+
eta=0.0,
|
533 |
+
):
|
534 |
+
"""
|
535 |
+
Sample x_{t+1} from the model using DDIM reverse ODE.
|
536 |
+
"""
|
537 |
+
assert eta == 0.0, "Reverse ODE only for deterministic path"
|
538 |
+
out = self.p_mean_variance(
|
539 |
+
model,
|
540 |
+
x,
|
541 |
+
t,
|
542 |
+
clip_denoised=clip_denoised,
|
543 |
+
denoised_fn=denoised_fn,
|
544 |
+
model_kwargs=model_kwargs,
|
545 |
+
)
|
546 |
+
# Usually our model outputs epsilon, but we re-derive it
|
547 |
+
# in case we used x_start or x_prev prediction.
|
548 |
+
eps = (
|
549 |
+
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
|
550 |
+
- out["pred_xstart"]
|
551 |
+
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
|
552 |
+
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
|
553 |
+
|
554 |
+
# Equation 12. reversed
|
555 |
+
mean_pred = (
|
556 |
+
out["pred_xstart"] * th.sqrt(alpha_bar_next)
|
557 |
+
+ th.sqrt(1 - alpha_bar_next) * eps
|
558 |
+
)
|
559 |
+
|
560 |
+
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
|
561 |
+
|
562 |
+
def ddim_sample_loop(
|
563 |
+
self,
|
564 |
+
model,
|
565 |
+
shape,
|
566 |
+
noise=None,
|
567 |
+
clip_denoised=True,
|
568 |
+
denoised_fn=None,
|
569 |
+
model_kwargs=None,
|
570 |
+
device=None,
|
571 |
+
progress=False,
|
572 |
+
eta=0.0,
|
573 |
+
):
|
574 |
+
"""
|
575 |
+
Generate samples from the model using DDIM.
|
576 |
+
|
577 |
+
Same usage as p_sample_loop().
|
578 |
+
"""
|
579 |
+
final = None
|
580 |
+
for sample in self.ddim_sample_loop_progressive(
|
581 |
+
model,
|
582 |
+
shape,
|
583 |
+
noise=noise,
|
584 |
+
clip_denoised=clip_denoised,
|
585 |
+
denoised_fn=denoised_fn,
|
586 |
+
model_kwargs=model_kwargs,
|
587 |
+
device=device,
|
588 |
+
progress=progress,
|
589 |
+
eta=eta,
|
590 |
+
):
|
591 |
+
final = sample
|
592 |
+
return final["sample"]
|
593 |
+
|
594 |
+
def ddim_sample_loop_progressive(
|
595 |
+
self,
|
596 |
+
model,
|
597 |
+
shape,
|
598 |
+
noise=None,
|
599 |
+
clip_denoised=True,
|
600 |
+
denoised_fn=None,
|
601 |
+
model_kwargs=None,
|
602 |
+
device=None,
|
603 |
+
progress=False,
|
604 |
+
eta=0.0,
|
605 |
+
):
|
606 |
+
"""
|
607 |
+
Use DDIM to sample from the model and yield intermediate samples from
|
608 |
+
each timestep of DDIM.
|
609 |
+
|
610 |
+
Same usage as p_sample_loop_progressive().
|
611 |
+
"""
|
612 |
+
if device is None:
|
613 |
+
device = next(model.parameters()).device
|
614 |
+
assert isinstance(shape, (tuple, list))
|
615 |
+
if noise is not None:
|
616 |
+
img = noise
|
617 |
+
else:
|
618 |
+
img = th.randn(*shape, device=device)
|
619 |
+
indices = list(range(self.num_timesteps))[::-1]
|
620 |
+
|
621 |
+
if progress:
|
622 |
+
# Lazy import so that we don't depend on tqdm.
|
623 |
+
from tqdm.auto import tqdm
|
624 |
+
|
625 |
+
indices = tqdm(indices)
|
626 |
+
|
627 |
+
for i in indices:
|
628 |
+
t = th.tensor([i] * shape[0], device=device)
|
629 |
+
with th.no_grad():
|
630 |
+
out = self.ddim_sample(
|
631 |
+
model,
|
632 |
+
img,
|
633 |
+
t,
|
634 |
+
clip_denoised=clip_denoised,
|
635 |
+
denoised_fn=denoised_fn,
|
636 |
+
model_kwargs=model_kwargs,
|
637 |
+
eta=eta,
|
638 |
+
)
|
639 |
+
yield out
|
640 |
+
img = out["sample"]
|
641 |
+
|
642 |
+
def _vb_terms_bpd(
|
643 |
+
self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
|
644 |
+
):
|
645 |
+
"""
|
646 |
+
Get a term for the variational lower-bound.
|
647 |
+
|
648 |
+
The resulting units are bits (rather than nats, as one might expect).
|
649 |
+
This allows for comparison to other papers.
|
650 |
+
|
651 |
+
:return: a dict with the following keys:
|
652 |
+
- 'output': a shape [N] tensor of NLLs or KLs.
|
653 |
+
- 'pred_xstart': the x_0 predictions.
|
654 |
+
"""
|
655 |
+
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
|
656 |
+
x_start=x_start, x_t=x_t, t=t
|
657 |
+
)
|
658 |
+
out = self.p_mean_variance(
|
659 |
+
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
|
660 |
+
)
|
661 |
+
kl = normal_kl(
|
662 |
+
true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
|
663 |
+
)
|
664 |
+
kl = mean_flat(kl) / np.log(2.0)
|
665 |
+
|
666 |
+
decoder_nll = -discretized_gaussian_log_likelihood(
|
667 |
+
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
|
668 |
+
)
|
669 |
+
assert decoder_nll.shape == x_start.shape
|
670 |
+
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
|
671 |
+
|
672 |
+
# At the first timestep return the decoder NLL,
|
673 |
+
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
|
674 |
+
output = th.where((t == 0), decoder_nll, kl)
|
675 |
+
return {"output": output, "pred_xstart": out["pred_xstart"]}
|
676 |
+
|
677 |
+
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
|
678 |
+
"""
|
679 |
+
Compute training losses for a single timestep.
|
680 |
+
|
681 |
+
:param model: the model to evaluate loss on.
|
682 |
+
:param x_start: the [N x C x ...] tensor of inputs.
|
683 |
+
:param t: a batch of timestep indices.
|
684 |
+
:param model_kwargs: if not None, a dict of extra keyword arguments to
|
685 |
+
pass to the model. This can be used for conditioning.
|
686 |
+
:param noise: if specified, the specific Gaussian noise to try to remove.
|
687 |
+
:return: a dict with the key "loss" containing a tensor of shape [N].
|
688 |
+
Some mean or variance settings may also have other keys.
|
689 |
+
"""
|
690 |
+
if model_kwargs is None:
|
691 |
+
model_kwargs = {}
|
692 |
+
if noise is None:
|
693 |
+
noise = th.randn_like(x_start)
|
694 |
+
x_t = self.q_sample(x_start, t, noise=noise)
|
695 |
+
|
696 |
+
terms = {}
|
697 |
+
|
698 |
+
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
|
699 |
+
terms["loss"] = self._vb_terms_bpd(
|
700 |
+
model=model,
|
701 |
+
x_start=x_start,
|
702 |
+
x_t=x_t,
|
703 |
+
t=t,
|
704 |
+
clip_denoised=False,
|
705 |
+
model_kwargs=model_kwargs,
|
706 |
+
)["output"]
|
707 |
+
if self.loss_type == LossType.RESCALED_KL:
|
708 |
+
terms["loss"] *= self.num_timesteps
|
709 |
+
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
|
710 |
+
model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
|
711 |
+
|
712 |
+
if self.model_var_type in [
|
713 |
+
ModelVarType.LEARNED,
|
714 |
+
ModelVarType.LEARNED_RANGE,
|
715 |
+
]:
|
716 |
+
B, C = x_t.shape[:2]
|
717 |
+
assert model_output.shape == (B, C * 2, *x_t.shape[2:])
|
718 |
+
model_output, model_var_values = th.split(model_output, C, dim=1)
|
719 |
+
# Learn the variance using the variational bound, but don't let
|
720 |
+
# it affect our mean prediction.
|
721 |
+
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
|
722 |
+
terms["vb"] = self._vb_terms_bpd(
|
723 |
+
model=lambda *args, r=frozen_out: r,
|
724 |
+
x_start=x_start,
|
725 |
+
x_t=x_t,
|
726 |
+
t=t,
|
727 |
+
clip_denoised=False,
|
728 |
+
)["output"]
|
729 |
+
if self.loss_type == LossType.RESCALED_MSE:
|
730 |
+
# Divide by 1000 for equivalence with initial implementation.
|
731 |
+
# Without a factor of 1/1000, the VB term hurts the MSE term.
|
732 |
+
terms["vb"] *= self.num_timesteps / 1000.0
|
733 |
+
|
734 |
+
target = {
|
735 |
+
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
|
736 |
+
x_start=x_start, x_t=x_t, t=t
|
737 |
+
)[0],
|
738 |
+
ModelMeanType.START_X: x_start,
|
739 |
+
ModelMeanType.EPSILON: noise,
|
740 |
+
}[self.model_mean_type]
|
741 |
+
assert model_output.shape == target.shape == x_start.shape
|
742 |
+
terms["mse"] = mean_flat((target - model_output) ** 2)
|
743 |
+
if "vb" in terms:
|
744 |
+
terms["loss"] = terms["mse"] + terms["vb"]
|
745 |
+
else:
|
746 |
+
terms["loss"] = terms["mse"]
|
747 |
+
else:
|
748 |
+
raise NotImplementedError(self.loss_type)
|
749 |
+
|
750 |
+
return terms
|
751 |
+
|
752 |
+
def _prior_bpd(self, x_start):
|
753 |
+
"""
|
754 |
+
Get the prior KL term for the variational lower-bound, measured in
|
755 |
+
bits-per-dim.
|
756 |
+
|
757 |
+
This term can't be optimized, as it only depends on the encoder.
|
758 |
+
|
759 |
+
:param x_start: the [N x C x ...] tensor of inputs.
|
760 |
+
:return: a batch of [N] KL values (in bits), one per batch element.
|
761 |
+
"""
|
762 |
+
batch_size = x_start.shape[0]
|
763 |
+
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
|
764 |
+
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
|
765 |
+
kl_prior = normal_kl(
|
766 |
+
mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
|
767 |
+
)
|
768 |
+
return mean_flat(kl_prior) / np.log(2.0)
|
769 |
+
|
770 |
+
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
|
771 |
+
"""
|
772 |
+
Compute the entire variational lower-bound, measured in bits-per-dim,
|
773 |
+
as well as other related quantities.
|
774 |
+
|
775 |
+
:param model: the model to evaluate loss on.
|
776 |
+
:param x_start: the [N x C x ...] tensor of inputs.
|
777 |
+
:param clip_denoised: if True, clip denoised samples.
|
778 |
+
:param model_kwargs: if not None, a dict of extra keyword arguments to
|
779 |
+
pass to the model. This can be used for conditioning.
|
780 |
+
|
781 |
+
:return: a dict containing the following keys:
|
782 |
+
- total_bpd: the total variational lower-bound, per batch element.
|
783 |
+
- prior_bpd: the prior term in the lower-bound.
|
784 |
+
- vb: an [N x T] tensor of terms in the lower-bound.
|
785 |
+
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
|
786 |
+
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
|
787 |
+
"""
|
788 |
+
device = x_start.device
|
789 |
+
batch_size = x_start.shape[0]
|
790 |
+
|
791 |
+
vb = []
|
792 |
+
xstart_mse = []
|
793 |
+
mse = []
|
794 |
+
for t in list(range(self.num_timesteps))[::-1]:
|
795 |
+
t_batch = th.tensor([t] * batch_size, device=device)
|
796 |
+
noise = th.randn_like(x_start)
|
797 |
+
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
|
798 |
+
# Calculate VLB term at the current timestep
|
799 |
+
with th.no_grad():
|
800 |
+
out = self._vb_terms_bpd(
|
801 |
+
model,
|
802 |
+
x_start=x_start,
|
803 |
+
x_t=x_t,
|
804 |
+
t=t_batch,
|
805 |
+
clip_denoised=clip_denoised,
|
806 |
+
model_kwargs=model_kwargs,
|
807 |
+
)
|
808 |
+
vb.append(out["output"])
|
809 |
+
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
|
810 |
+
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
|
811 |
+
mse.append(mean_flat((eps - noise) ** 2))
|
812 |
+
|
813 |
+
vb = th.stack(vb, dim=1)
|
814 |
+
xstart_mse = th.stack(xstart_mse, dim=1)
|
815 |
+
mse = th.stack(mse, dim=1)
|
816 |
+
|
817 |
+
prior_bpd = self._prior_bpd(x_start)
|
818 |
+
total_bpd = vb.sum(dim=1) + prior_bpd
|
819 |
+
return {
|
820 |
+
"total_bpd": total_bpd,
|
821 |
+
"prior_bpd": prior_bpd,
|
822 |
+
"vb": vb,
|
823 |
+
"xstart_mse": xstart_mse,
|
824 |
+
"mse": mse,
|
825 |
+
}
|
826 |
+
|
827 |
+
|
828 |
+
def _extract_into_tensor(arr, timesteps, broadcast_shape):
|
829 |
+
"""
|
830 |
+
Extract values from a 1-D numpy array for a batch of indices.
|
831 |
+
|
832 |
+
:param arr: the 1-D numpy array.
|
833 |
+
:param timesteps: a tensor of indices into the array to extract.
|
834 |
+
:param broadcast_shape: a larger shape of K dimensions with the batch
|
835 |
+
dimension equal to the length of timesteps.
|
836 |
+
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
|
837 |
+
"""
|
838 |
+
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
|
839 |
+
while len(res.shape) < len(broadcast_shape):
|
840 |
+
res = res[..., None]
|
841 |
+
return res.expand(broadcast_shape)
|
pixel_guide_diffusion/image_datasets.py
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image
|
2 |
+
import blobfile as bf
|
3 |
+
from mpi4py import MPI
|
4 |
+
import numpy as np
|
5 |
+
from torch.utils.data import DataLoader, Dataset
|
6 |
+
|
7 |
+
import PIL.ImageFile
|
8 |
+
PIL.ImageFile.LOAD_TRUNCATED_IMAGES = True
|
9 |
+
|
10 |
+
|
11 |
+
def load_data(
|
12 |
+
*, data_dir, batch_size, image_size, class_cond=False, guide_size=0, guide_dir=None, crop_size=0, deterministic=False
|
13 |
+
):
|
14 |
+
"""
|
15 |
+
For a dataset, create a generator over (images, kwargs) pairs.
|
16 |
+
|
17 |
+
Each images is an NCHW float tensor, and the kwargs dict contains zero or
|
18 |
+
more keys, each of which map to a batched Tensor of their own.
|
19 |
+
The kwargs dict can be used for class labels, in which case the key is "y"
|
20 |
+
and the values are integer tensors of class labels.
|
21 |
+
|
22 |
+
:param data_dir: a dataset directory.
|
23 |
+
:param batch_size: the batch size of each returned pair.
|
24 |
+
:param image_size: the size to which images are resized.
|
25 |
+
:param class_cond: if True, include a "y" key in returned dicts for class
|
26 |
+
label. If classes are not available and this is true, an
|
27 |
+
exception will be raised.
|
28 |
+
:param guide_size: the size to which images are resized for guide tensors.
|
29 |
+
:param guide_dir: a dataset directory for guide tensors.
|
30 |
+
:param crop_size: the size to which images are resized and cropped.
|
31 |
+
:param deterministic: if True, yield results in a deterministic order.
|
32 |
+
"""
|
33 |
+
if not data_dir:
|
34 |
+
raise ValueError("unspecified data directory")
|
35 |
+
all_files = _list_image_files_recursively(data_dir)
|
36 |
+
guide_files = None
|
37 |
+
if guide_dir:
|
38 |
+
guide_files = _list_image_files_recursively(guide_dir)
|
39 |
+
guide_files2 = _list_image_files_recursively('data/danbooru2017/anime_sketch_noise')
|
40 |
+
classes = None
|
41 |
+
if class_cond:
|
42 |
+
# Assume classes are the first part of the filename,
|
43 |
+
# before an underscore.
|
44 |
+
class_names = [bf.basename(path).split("_")[0] for path in all_files]
|
45 |
+
sorted_classes = {x: i for i, x in enumerate(sorted(set(class_names)))}
|
46 |
+
classes = [sorted_classes[x] for x in class_names]
|
47 |
+
dataset = ImageDataset(
|
48 |
+
image_size,
|
49 |
+
all_files,
|
50 |
+
guide_resolution=guide_size,
|
51 |
+
guide_paths=guide_files,
|
52 |
+
guide_paths2=guide_files2,
|
53 |
+
crop_resolution=crop_size,
|
54 |
+
classes=classes,
|
55 |
+
shard=MPI.COMM_WORLD.Get_rank(),
|
56 |
+
num_shards=MPI.COMM_WORLD.Get_size(),
|
57 |
+
)
|
58 |
+
if deterministic:
|
59 |
+
loader = DataLoader(
|
60 |
+
dataset, batch_size=batch_size, shuffle=False, num_workers=1, drop_last=True
|
61 |
+
)
|
62 |
+
else:
|
63 |
+
loader = DataLoader(
|
64 |
+
dataset, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=True
|
65 |
+
)
|
66 |
+
while True:
|
67 |
+
yield from loader
|
68 |
+
|
69 |
+
|
70 |
+
def _list_image_files_recursively(data_dir):
|
71 |
+
results = []
|
72 |
+
for entry in sorted(bf.listdir(data_dir)):
|
73 |
+
full_path = bf.join(data_dir, entry)
|
74 |
+
ext = entry.split(".")[-1]
|
75 |
+
if "." in entry and ext.lower() in ["jpg", "jpeg", "png", "gif"]:
|
76 |
+
results.append(full_path)
|
77 |
+
elif bf.isdir(full_path):
|
78 |
+
results.extend(_list_image_files_recursively(full_path))
|
79 |
+
return sorted(results)
|
80 |
+
|
81 |
+
|
82 |
+
class ImageDataset(Dataset):
|
83 |
+
def __init__(self, resolution, image_paths, guide_resolution=0, guide_paths=None, guide_paths2=None, crop_resolution=0, classes=None, shard=0, num_shards=1):
|
84 |
+
super().__init__()
|
85 |
+
self.resolution = resolution
|
86 |
+
self.guide_resolution = guide_resolution
|
87 |
+
self.local_images = image_paths[shard:][::num_shards]
|
88 |
+
self.local_guides = guide_paths[shard:][::num_shards] if guide_paths else None
|
89 |
+
self.local_guides2 = guide_paths2[shard:][::num_shards] if guide_paths else None
|
90 |
+
self.crop_resolution = crop_resolution if crop_resolution > 0 else resolution
|
91 |
+
self.local_classes = None if classes is None else classes[shard:][::num_shards]
|
92 |
+
|
93 |
+
def __len__(self):
|
94 |
+
return len(self.local_images) * 1000000
|
95 |
+
|
96 |
+
def __getitem__(self, idx):
|
97 |
+
idx = idx % len(self.local_images)
|
98 |
+
path = self.local_images[idx]
|
99 |
+
with bf.BlobFile(path, "rb") as f:
|
100 |
+
pil_image = Image.open(f)
|
101 |
+
pil_image.load()
|
102 |
+
|
103 |
+
# We are not on a new enough PIL to support the `reducing_gap`
|
104 |
+
# argument, which uses BOX downsampling at powers of two first.
|
105 |
+
# Thus, we do it by hand to improve downsample quality.
|
106 |
+
while min(*pil_image.size) >= 2 * self.resolution:
|
107 |
+
pil_image = pil_image.resize(
|
108 |
+
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
|
109 |
+
)
|
110 |
+
|
111 |
+
scale = self.resolution / min(*pil_image.size)
|
112 |
+
pil_image = pil_image.resize(
|
113 |
+
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
|
114 |
+
)
|
115 |
+
|
116 |
+
arr = np.array(pil_image.convert("RGB"))
|
117 |
+
crop_y = (arr.shape[0] - self.crop_resolution) // 2
|
118 |
+
crop_x = (arr.shape[1] - self.crop_resolution) // 2
|
119 |
+
arr = arr[crop_y : crop_y + self.crop_resolution, crop_x : crop_x + self.crop_resolution]
|
120 |
+
arr = arr.astype(np.float32) / 127.5 - 1
|
121 |
+
|
122 |
+
out_dict = {}
|
123 |
+
|
124 |
+
if self.local_guides:
|
125 |
+
path = self.local_guides[idx] if np.random.rand() < 0.5 else self.local_guides2[idx]
|
126 |
+
with bf.BlobFile(path, "rb") as f:
|
127 |
+
pil_image = Image.open(f)
|
128 |
+
pil_image.load()
|
129 |
+
|
130 |
+
# We are not on a new enough PIL to support the `reducing_gap`
|
131 |
+
# argument, which uses BOX downsampling at powers of two first.
|
132 |
+
# Thus, we do it by hand to improve downsample quality.
|
133 |
+
while min(*pil_image.size) >= 2 * self.guide_resolution:
|
134 |
+
pil_image = pil_image.resize(
|
135 |
+
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
|
136 |
+
)
|
137 |
+
|
138 |
+
scale = self.guide_resolution / min(*pil_image.size)
|
139 |
+
pil_image = pil_image.resize(
|
140 |
+
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
|
141 |
+
)
|
142 |
+
|
143 |
+
crop_resolution = self.guide_resolution // self.resolution * self.crop_resolution
|
144 |
+
|
145 |
+
guide_arr = np.array(pil_image.convert("L"))[...,None] # np.array(pil_image.convert("RGB"))
|
146 |
+
|
147 |
+
# extra noise
|
148 |
+
if np.random.rand() < 0.5:
|
149 |
+
w, h = guide_arr.shape[:2][::-1]
|
150 |
+
a = np.random.randint(2,12)
|
151 |
+
mean = np.asarray(
|
152 |
+
Image.fromarray(
|
153 |
+
np.random.randint(0,255,[a,a],dtype='uint8')
|
154 |
+
).resize([w,h], Image.NEAREST)
|
155 |
+
).astype('float32') / 255.0 * 2 - 1
|
156 |
+
std = np.asarray(
|
157 |
+
Image.fromarray(
|
158 |
+
np.random.randint(0,255,[a,a],dtype='uint8')
|
159 |
+
).resize([w, h], Image.NEAREST)
|
160 |
+
).astype('float32') / 255.0 * 7.5 + 0.125
|
161 |
+
guide_arr = (guide_arr - mean[...,None]) * std[...,None]
|
162 |
+
|
163 |
+
crop_y = (guide_arr.shape[0] - crop_resolution) // 2
|
164 |
+
crop_x = (guide_arr.shape[1] - crop_resolution) // 2
|
165 |
+
guide_arr = guide_arr[crop_y : crop_y + crop_resolution, crop_x : crop_x + crop_resolution]
|
166 |
+
guide_arr = guide_arr.astype(np.float32) / 127.5 - 1
|
167 |
+
|
168 |
+
out_dict["guide"] = np.transpose(guide_arr, [2, 0, 1]).astype('float32')
|
169 |
+
|
170 |
+
if self.local_classes is not None:
|
171 |
+
out_dict["y"] = np.array(self.local_classes[idx], dtype=np.int64)
|
172 |
+
|
173 |
+
return np.transpose(arr, [2, 0, 1]), out_dict
|
pixel_guide_diffusion/logger.py
ADDED
@@ -0,0 +1,495 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
|
3 |
+
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
import shutil
|
9 |
+
import os.path as osp
|
10 |
+
import json
|
11 |
+
import time
|
12 |
+
import datetime
|
13 |
+
import tempfile
|
14 |
+
import warnings
|
15 |
+
from collections import defaultdict
|
16 |
+
from contextlib import contextmanager
|
17 |
+
|
18 |
+
DEBUG = 10
|
19 |
+
INFO = 20
|
20 |
+
WARN = 30
|
21 |
+
ERROR = 40
|
22 |
+
|
23 |
+
DISABLED = 50
|
24 |
+
|
25 |
+
|
26 |
+
class KVWriter(object):
|
27 |
+
def writekvs(self, kvs):
|
28 |
+
raise NotImplementedError
|
29 |
+
|
30 |
+
|
31 |
+
class SeqWriter(object):
|
32 |
+
def writeseq(self, seq):
|
33 |
+
raise NotImplementedError
|
34 |
+
|
35 |
+
|
36 |
+
class HumanOutputFormat(KVWriter, SeqWriter):
|
37 |
+
def __init__(self, filename_or_file):
|
38 |
+
if isinstance(filename_or_file, str):
|
39 |
+
self.file = open(filename_or_file, "wt")
|
40 |
+
self.own_file = True
|
41 |
+
else:
|
42 |
+
assert hasattr(filename_or_file, "read"), (
|
43 |
+
"expected file or str, got %s" % filename_or_file
|
44 |
+
)
|
45 |
+
self.file = filename_or_file
|
46 |
+
self.own_file = False
|
47 |
+
|
48 |
+
def writekvs(self, kvs):
|
49 |
+
# Create strings for printing
|
50 |
+
key2str = {}
|
51 |
+
for (key, val) in sorted(kvs.items()):
|
52 |
+
if hasattr(val, "__float__"):
|
53 |
+
valstr = "%-8.3g" % val
|
54 |
+
else:
|
55 |
+
valstr = str(val)
|
56 |
+
key2str[self._truncate(key)] = self._truncate(valstr)
|
57 |
+
|
58 |
+
# Find max widths
|
59 |
+
if len(key2str) == 0:
|
60 |
+
print("WARNING: tried to write empty key-value dict")
|
61 |
+
return
|
62 |
+
else:
|
63 |
+
keywidth = max(map(len, key2str.keys()))
|
64 |
+
valwidth = max(map(len, key2str.values()))
|
65 |
+
|
66 |
+
# Write out the data
|
67 |
+
dashes = "-" * (keywidth + valwidth + 7)
|
68 |
+
lines = [dashes]
|
69 |
+
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
|
70 |
+
lines.append(
|
71 |
+
"| %s%s | %s%s |"
|
72 |
+
% (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
|
73 |
+
)
|
74 |
+
lines.append(dashes)
|
75 |
+
self.file.write("\n".join(lines) + "\n")
|
76 |
+
|
77 |
+
# Flush the output to the file
|
78 |
+
self.file.flush()
|
79 |
+
|
80 |
+
def _truncate(self, s):
|
81 |
+
maxlen = 30
|
82 |
+
return s[: maxlen - 3] + "..." if len(s) > maxlen else s
|
83 |
+
|
84 |
+
def writeseq(self, seq):
|
85 |
+
seq = list(seq)
|
86 |
+
for (i, elem) in enumerate(seq):
|
87 |
+
self.file.write(elem)
|
88 |
+
if i < len(seq) - 1: # add space unless this is the last one
|
89 |
+
self.file.write(" ")
|
90 |
+
self.file.write("\n")
|
91 |
+
self.file.flush()
|
92 |
+
|
93 |
+
def close(self):
|
94 |
+
if self.own_file:
|
95 |
+
self.file.close()
|
96 |
+
|
97 |
+
|
98 |
+
class JSONOutputFormat(KVWriter):
|
99 |
+
def __init__(self, filename):
|
100 |
+
self.file = open(filename, "wt")
|
101 |
+
|
102 |
+
def writekvs(self, kvs):
|
103 |
+
for k, v in sorted(kvs.items()):
|
104 |
+
if hasattr(v, "dtype"):
|
105 |
+
kvs[k] = float(v)
|
106 |
+
self.file.write(json.dumps(kvs) + "\n")
|
107 |
+
self.file.flush()
|
108 |
+
|
109 |
+
def close(self):
|
110 |
+
self.file.close()
|
111 |
+
|
112 |
+
|
113 |
+
class CSVOutputFormat(KVWriter):
|
114 |
+
def __init__(self, filename):
|
115 |
+
self.file = open(filename, "w+t")
|
116 |
+
self.keys = []
|
117 |
+
self.sep = ","
|
118 |
+
|
119 |
+
def writekvs(self, kvs):
|
120 |
+
# Add our current row to the history
|
121 |
+
extra_keys = list(kvs.keys() - self.keys)
|
122 |
+
extra_keys.sort()
|
123 |
+
if extra_keys:
|
124 |
+
self.keys.extend(extra_keys)
|
125 |
+
self.file.seek(0)
|
126 |
+
lines = self.file.readlines()
|
127 |
+
self.file.seek(0)
|
128 |
+
for (i, k) in enumerate(self.keys):
|
129 |
+
if i > 0:
|
130 |
+
self.file.write(",")
|
131 |
+
self.file.write(k)
|
132 |
+
self.file.write("\n")
|
133 |
+
for line in lines[1:]:
|
134 |
+
self.file.write(line[:-1])
|
135 |
+
self.file.write(self.sep * len(extra_keys))
|
136 |
+
self.file.write("\n")
|
137 |
+
for (i, k) in enumerate(self.keys):
|
138 |
+
if i > 0:
|
139 |
+
self.file.write(",")
|
140 |
+
v = kvs.get(k)
|
141 |
+
if v is not None:
|
142 |
+
self.file.write(str(v))
|
143 |
+
self.file.write("\n")
|
144 |
+
self.file.flush()
|
145 |
+
|
146 |
+
def close(self):
|
147 |
+
self.file.close()
|
148 |
+
|
149 |
+
|
150 |
+
class TensorBoardOutputFormat(KVWriter):
|
151 |
+
"""
|
152 |
+
Dumps key/value pairs into TensorBoard's numeric format.
|
153 |
+
"""
|
154 |
+
|
155 |
+
def __init__(self, dir):
|
156 |
+
os.makedirs(dir, exist_ok=True)
|
157 |
+
self.dir = dir
|
158 |
+
self.step = 1
|
159 |
+
prefix = "events"
|
160 |
+
path = osp.join(osp.abspath(dir), prefix)
|
161 |
+
import tensorflow as tf
|
162 |
+
from tensorflow.python import pywrap_tensorflow
|
163 |
+
from tensorflow.core.util import event_pb2
|
164 |
+
from tensorflow.python.util import compat
|
165 |
+
|
166 |
+
self.tf = tf
|
167 |
+
self.event_pb2 = event_pb2
|
168 |
+
self.pywrap_tensorflow = pywrap_tensorflow
|
169 |
+
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
|
170 |
+
|
171 |
+
def writekvs(self, kvs):
|
172 |
+
def summary_val(k, v):
|
173 |
+
kwargs = {"tag": k, "simple_value": float(v)}
|
174 |
+
return self.tf.Summary.Value(**kwargs)
|
175 |
+
|
176 |
+
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
|
177 |
+
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
|
178 |
+
event.step = (
|
179 |
+
self.step
|
180 |
+
) # is there any reason why you'd want to specify the step?
|
181 |
+
self.writer.WriteEvent(event)
|
182 |
+
self.writer.Flush()
|
183 |
+
self.step += 1
|
184 |
+
|
185 |
+
def close(self):
|
186 |
+
if self.writer:
|
187 |
+
self.writer.Close()
|
188 |
+
self.writer = None
|
189 |
+
|
190 |
+
|
191 |
+
def make_output_format(format, ev_dir, log_suffix=""):
|
192 |
+
os.makedirs(ev_dir, exist_ok=True)
|
193 |
+
if format == "stdout":
|
194 |
+
return HumanOutputFormat(sys.stdout)
|
195 |
+
elif format == "log":
|
196 |
+
return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
|
197 |
+
elif format == "json":
|
198 |
+
return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
|
199 |
+
elif format == "csv":
|
200 |
+
return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
|
201 |
+
elif format == "tensorboard":
|
202 |
+
return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
|
203 |
+
else:
|
204 |
+
raise ValueError("Unknown format specified: %s" % (format,))
|
205 |
+
|
206 |
+
|
207 |
+
# ================================================================
|
208 |
+
# API
|
209 |
+
# ================================================================
|
210 |
+
|
211 |
+
|
212 |
+
def logkv(key, val):
|
213 |
+
"""
|
214 |
+
Log a value of some diagnostic
|
215 |
+
Call this once for each diagnostic quantity, each iteration
|
216 |
+
If called many times, last value will be used.
|
217 |
+
"""
|
218 |
+
get_current().logkv(key, val)
|
219 |
+
|
220 |
+
|
221 |
+
def logkv_mean(key, val):
|
222 |
+
"""
|
223 |
+
The same as logkv(), but if called many times, values averaged.
|
224 |
+
"""
|
225 |
+
get_current().logkv_mean(key, val)
|
226 |
+
|
227 |
+
|
228 |
+
def logkvs(d):
|
229 |
+
"""
|
230 |
+
Log a dictionary of key-value pairs
|
231 |
+
"""
|
232 |
+
for (k, v) in d.items():
|
233 |
+
logkv(k, v)
|
234 |
+
|
235 |
+
|
236 |
+
def dumpkvs():
|
237 |
+
"""
|
238 |
+
Write all of the diagnostics from the current iteration
|
239 |
+
"""
|
240 |
+
return get_current().dumpkvs()
|
241 |
+
|
242 |
+
|
243 |
+
def getkvs():
|
244 |
+
return get_current().name2val
|
245 |
+
|
246 |
+
|
247 |
+
def log(*args, level=INFO):
|
248 |
+
"""
|
249 |
+
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
|
250 |
+
"""
|
251 |
+
get_current().log(*args, level=level)
|
252 |
+
|
253 |
+
|
254 |
+
def debug(*args):
|
255 |
+
log(*args, level=DEBUG)
|
256 |
+
|
257 |
+
|
258 |
+
def info(*args):
|
259 |
+
log(*args, level=INFO)
|
260 |
+
|
261 |
+
|
262 |
+
def warn(*args):
|
263 |
+
log(*args, level=WARN)
|
264 |
+
|
265 |
+
|
266 |
+
def error(*args):
|
267 |
+
log(*args, level=ERROR)
|
268 |
+
|
269 |
+
|
270 |
+
def set_level(level):
|
271 |
+
"""
|
272 |
+
Set logging threshold on current logger.
|
273 |
+
"""
|
274 |
+
get_current().set_level(level)
|
275 |
+
|
276 |
+
|
277 |
+
def set_comm(comm):
|
278 |
+
get_current().set_comm(comm)
|
279 |
+
|
280 |
+
|
281 |
+
def get_dir():
|
282 |
+
"""
|
283 |
+
Get directory that log files are being written to.
|
284 |
+
will be None if there is no output directory (i.e., if you didn't call start)
|
285 |
+
"""
|
286 |
+
return get_current().get_dir()
|
287 |
+
|
288 |
+
|
289 |
+
record_tabular = logkv
|
290 |
+
dump_tabular = dumpkvs
|
291 |
+
|
292 |
+
|
293 |
+
@contextmanager
|
294 |
+
def profile_kv(scopename):
|
295 |
+
logkey = "wait_" + scopename
|
296 |
+
tstart = time.time()
|
297 |
+
try:
|
298 |
+
yield
|
299 |
+
finally:
|
300 |
+
get_current().name2val[logkey] += time.time() - tstart
|
301 |
+
|
302 |
+
|
303 |
+
def profile(n):
|
304 |
+
"""
|
305 |
+
Usage:
|
306 |
+
@profile("my_func")
|
307 |
+
def my_func(): code
|
308 |
+
"""
|
309 |
+
|
310 |
+
def decorator_with_name(func):
|
311 |
+
def func_wrapper(*args, **kwargs):
|
312 |
+
with profile_kv(n):
|
313 |
+
return func(*args, **kwargs)
|
314 |
+
|
315 |
+
return func_wrapper
|
316 |
+
|
317 |
+
return decorator_with_name
|
318 |
+
|
319 |
+
|
320 |
+
# ================================================================
|
321 |
+
# Backend
|
322 |
+
# ================================================================
|
323 |
+
|
324 |
+
|
325 |
+
def get_current():
|
326 |
+
if Logger.CURRENT is None:
|
327 |
+
_configure_default_logger()
|
328 |
+
|
329 |
+
return Logger.CURRENT
|
330 |
+
|
331 |
+
|
332 |
+
class Logger(object):
|
333 |
+
DEFAULT = None # A logger with no output files. (See right below class definition)
|
334 |
+
# So that you can still log to the terminal without setting up any output files
|
335 |
+
CURRENT = None # Current logger being used by the free functions above
|
336 |
+
|
337 |
+
def __init__(self, dir, output_formats, comm=None):
|
338 |
+
self.name2val = defaultdict(float) # values this iteration
|
339 |
+
self.name2cnt = defaultdict(int)
|
340 |
+
self.level = INFO
|
341 |
+
self.dir = dir
|
342 |
+
self.output_formats = output_formats
|
343 |
+
self.comm = comm
|
344 |
+
|
345 |
+
# Logging API, forwarded
|
346 |
+
# ----------------------------------------
|
347 |
+
def logkv(self, key, val):
|
348 |
+
self.name2val[key] = val
|
349 |
+
|
350 |
+
def logkv_mean(self, key, val):
|
351 |
+
oldval, cnt = self.name2val[key], self.name2cnt[key]
|
352 |
+
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
|
353 |
+
self.name2cnt[key] = cnt + 1
|
354 |
+
|
355 |
+
def dumpkvs(self):
|
356 |
+
if self.comm is None:
|
357 |
+
d = self.name2val
|
358 |
+
else:
|
359 |
+
d = mpi_weighted_mean(
|
360 |
+
self.comm,
|
361 |
+
{
|
362 |
+
name: (val, self.name2cnt.get(name, 1))
|
363 |
+
for (name, val) in self.name2val.items()
|
364 |
+
},
|
365 |
+
)
|
366 |
+
if self.comm.rank != 0:
|
367 |
+
d["dummy"] = 1 # so we don't get a warning about empty dict
|
368 |
+
out = d.copy() # Return the dict for unit testing purposes
|
369 |
+
for fmt in self.output_formats:
|
370 |
+
if isinstance(fmt, KVWriter):
|
371 |
+
fmt.writekvs(d)
|
372 |
+
self.name2val.clear()
|
373 |
+
self.name2cnt.clear()
|
374 |
+
return out
|
375 |
+
|
376 |
+
def log(self, *args, level=INFO):
|
377 |
+
if self.level <= level:
|
378 |
+
self._do_log(args)
|
379 |
+
|
380 |
+
# Configuration
|
381 |
+
# ----------------------------------------
|
382 |
+
def set_level(self, level):
|
383 |
+
self.level = level
|
384 |
+
|
385 |
+
def set_comm(self, comm):
|
386 |
+
self.comm = comm
|
387 |
+
|
388 |
+
def get_dir(self):
|
389 |
+
return self.dir
|
390 |
+
|
391 |
+
def close(self):
|
392 |
+
for fmt in self.output_formats:
|
393 |
+
fmt.close()
|
394 |
+
|
395 |
+
# Misc
|
396 |
+
# ----------------------------------------
|
397 |
+
def _do_log(self, args):
|
398 |
+
for fmt in self.output_formats:
|
399 |
+
if isinstance(fmt, SeqWriter):
|
400 |
+
fmt.writeseq(map(str, args))
|
401 |
+
|
402 |
+
|
403 |
+
def get_rank_without_mpi_import():
|
404 |
+
# check environment variables here instead of importing mpi4py
|
405 |
+
# to avoid calling MPI_Init() when this module is imported
|
406 |
+
for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
|
407 |
+
if varname in os.environ:
|
408 |
+
return int(os.environ[varname])
|
409 |
+
return 0
|
410 |
+
|
411 |
+
|
412 |
+
def mpi_weighted_mean(comm, local_name2valcount):
|
413 |
+
"""
|
414 |
+
Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
|
415 |
+
Perform a weighted average over dicts that are each on a different node
|
416 |
+
Input: local_name2valcount: dict mapping key -> (value, count)
|
417 |
+
Returns: key -> mean
|
418 |
+
"""
|
419 |
+
all_name2valcount = comm.gather(local_name2valcount)
|
420 |
+
if comm.rank == 0:
|
421 |
+
name2sum = defaultdict(float)
|
422 |
+
name2count = defaultdict(float)
|
423 |
+
for n2vc in all_name2valcount:
|
424 |
+
for (name, (val, count)) in n2vc.items():
|
425 |
+
try:
|
426 |
+
val = float(val)
|
427 |
+
except ValueError:
|
428 |
+
if comm.rank == 0:
|
429 |
+
warnings.warn(
|
430 |
+
"WARNING: tried to compute mean on non-float {}={}".format(
|
431 |
+
name, val
|
432 |
+
)
|
433 |
+
)
|
434 |
+
else:
|
435 |
+
name2sum[name] += val * count
|
436 |
+
name2count[name] += count
|
437 |
+
return {name: name2sum[name] / name2count[name] for name in name2sum}
|
438 |
+
else:
|
439 |
+
return {}
|
440 |
+
|
441 |
+
|
442 |
+
def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
|
443 |
+
"""
|
444 |
+
If comm is provided, average all numerical stats across that comm
|
445 |
+
"""
|
446 |
+
if dir is None:
|
447 |
+
dir = os.getenv("OPENAI_LOGDIR")
|
448 |
+
if dir is None:
|
449 |
+
dir = osp.join(
|
450 |
+
tempfile.gettempdir(),
|
451 |
+
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"),
|
452 |
+
)
|
453 |
+
assert isinstance(dir, str)
|
454 |
+
dir = os.path.expanduser(dir)
|
455 |
+
os.makedirs(os.path.expanduser(dir), exist_ok=True)
|
456 |
+
|
457 |
+
rank = get_rank_without_mpi_import()
|
458 |
+
if rank > 0:
|
459 |
+
log_suffix = log_suffix + "-rank%03i" % rank
|
460 |
+
|
461 |
+
if format_strs is None:
|
462 |
+
if rank == 0:
|
463 |
+
format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
|
464 |
+
else:
|
465 |
+
format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
|
466 |
+
format_strs = filter(None, format_strs)
|
467 |
+
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
|
468 |
+
|
469 |
+
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
|
470 |
+
if output_formats:
|
471 |
+
log("Logging to %s" % dir)
|
472 |
+
|
473 |
+
|
474 |
+
def _configure_default_logger():
|
475 |
+
configure()
|
476 |
+
Logger.DEFAULT = Logger.CURRENT
|
477 |
+
|
478 |
+
|
479 |
+
def reset():
|
480 |
+
if Logger.CURRENT is not Logger.DEFAULT:
|
481 |
+
Logger.CURRENT.close()
|
482 |
+
Logger.CURRENT = Logger.DEFAULT
|
483 |
+
log("Reset logger")
|
484 |
+
|
485 |
+
|
486 |
+
@contextmanager
|
487 |
+
def scoped_configure(dir=None, format_strs=None, comm=None):
|
488 |
+
prevlogger = Logger.CURRENT
|
489 |
+
configure(dir=dir, format_strs=format_strs, comm=comm)
|
490 |
+
try:
|
491 |
+
yield
|
492 |
+
finally:
|
493 |
+
Logger.CURRENT.close()
|
494 |
+
Logger.CURRENT = prevlogger
|
495 |
+
|
pixel_guide_diffusion/losses.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Helpers for various likelihood-based losses. These are ported from the original
|
3 |
+
Ho et al. diffusion models codebase:
|
4 |
+
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
|
5 |
+
"""
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
import torch as th
|
10 |
+
|
11 |
+
|
12 |
+
def normal_kl(mean1, logvar1, mean2, logvar2):
|
13 |
+
"""
|
14 |
+
Compute the KL divergence between two gaussians.
|
15 |
+
|
16 |
+
Shapes are automatically broadcasted, so batches can be compared to
|
17 |
+
scalars, among other use cases.
|
18 |
+
"""
|
19 |
+
tensor = None
|
20 |
+
for obj in (mean1, logvar1, mean2, logvar2):
|
21 |
+
if isinstance(obj, th.Tensor):
|
22 |
+
tensor = obj
|
23 |
+
break
|
24 |
+
assert tensor is not None, "at least one argument must be a Tensor"
|
25 |
+
|
26 |
+
# Force variances to be Tensors. Broadcasting helps convert scalars to
|
27 |
+
# Tensors, but it does not work for th.exp().
|
28 |
+
logvar1, logvar2 = [
|
29 |
+
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
|
30 |
+
for x in (logvar1, logvar2)
|
31 |
+
]
|
32 |
+
|
33 |
+
return 0.5 * (
|
34 |
+
-1.0
|
35 |
+
+ logvar2
|
36 |
+
- logvar1
|
37 |
+
+ th.exp(logvar1 - logvar2)
|
38 |
+
+ ((mean1 - mean2) ** 2) * th.exp(-logvar2)
|
39 |
+
)
|
40 |
+
|
41 |
+
|
42 |
+
def approx_standard_normal_cdf(x):
|
43 |
+
"""
|
44 |
+
A fast approximation of the cumulative distribution function of the
|
45 |
+
standard normal.
|
46 |
+
"""
|
47 |
+
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
|
48 |
+
|
49 |
+
|
50 |
+
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
|
51 |
+
"""
|
52 |
+
Compute the log-likelihood of a Gaussian distribution discretizing to a
|
53 |
+
given image.
|
54 |
+
|
55 |
+
:param x: the target images. It is assumed that this was uint8 values,
|
56 |
+
rescaled to the range [-1, 1].
|
57 |
+
:param means: the Gaussian mean Tensor.
|
58 |
+
:param log_scales: the Gaussian log stddev Tensor.
|
59 |
+
:return: a tensor like x of log probabilities (in nats).
|
60 |
+
"""
|
61 |
+
assert x.shape == means.shape == log_scales.shape
|
62 |
+
centered_x = x - means
|
63 |
+
inv_stdv = th.exp(-log_scales)
|
64 |
+
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
|
65 |
+
cdf_plus = approx_standard_normal_cdf(plus_in)
|
66 |
+
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
|
67 |
+
cdf_min = approx_standard_normal_cdf(min_in)
|
68 |
+
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
|
69 |
+
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
|
70 |
+
cdf_delta = cdf_plus - cdf_min
|
71 |
+
log_probs = th.where(
|
72 |
+
x < -0.999,
|
73 |
+
log_cdf_plus,
|
74 |
+
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
|
75 |
+
)
|
76 |
+
assert log_probs.shape == x.shape
|
77 |
+
return log_probs
|
pixel_guide_diffusion/nn.py
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Various utilities for neural networks.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import math
|
6 |
+
|
7 |
+
import torch as th
|
8 |
+
import torch.nn as nn
|
9 |
+
|
10 |
+
|
11 |
+
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
|
12 |
+
class SiLU(nn.Module):
|
13 |
+
def forward(self, x):
|
14 |
+
return x * th.sigmoid(x)
|
15 |
+
|
16 |
+
|
17 |
+
class GroupNorm32(nn.GroupNorm):
|
18 |
+
def forward(self, x):
|
19 |
+
return super().forward(x.float()).type(x.dtype)
|
20 |
+
|
21 |
+
|
22 |
+
class SpaceToDepth(nn.Module):
|
23 |
+
def __init__(self, factor):
|
24 |
+
super().__init__()
|
25 |
+
|
26 |
+
self.factor = factor
|
27 |
+
|
28 |
+
def forward(self, x):
|
29 |
+
if self.factor == 1:
|
30 |
+
return x
|
31 |
+
|
32 |
+
batch, channel, height, width = x.shape
|
33 |
+
h_fold = height // self.factor
|
34 |
+
w_fold = width // self.factor
|
35 |
+
|
36 |
+
return (
|
37 |
+
x.view(batch, channel, h_fold, self.factor, w_fold, self.factor)
|
38 |
+
.permute(0, 1, 3, 5, 2, 4)
|
39 |
+
.reshape(batch, -1, h_fold, w_fold)
|
40 |
+
)
|
41 |
+
|
42 |
+
|
43 |
+
def conv_nd(dims, *args, **kwargs):
|
44 |
+
"""
|
45 |
+
Create a 1D, 2D, or 3D convolution module.
|
46 |
+
"""
|
47 |
+
if dims == 1:
|
48 |
+
return nn.Conv1d(*args, **kwargs)
|
49 |
+
elif dims == 2:
|
50 |
+
return nn.Conv2d(*args, **kwargs)
|
51 |
+
elif dims == 3:
|
52 |
+
return nn.Conv3d(*args, **kwargs)
|
53 |
+
raise ValueError(f"unsupported dimensions: {dims}")
|
54 |
+
|
55 |
+
|
56 |
+
def linear(*args, **kwargs):
|
57 |
+
"""
|
58 |
+
Create a linear module.
|
59 |
+
"""
|
60 |
+
return nn.Linear(*args, **kwargs)
|
61 |
+
|
62 |
+
|
63 |
+
def avg_pool_nd(dims, *args, **kwargs):
|
64 |
+
"""
|
65 |
+
Create a 1D, 2D, or 3D average pooling module.
|
66 |
+
"""
|
67 |
+
if dims == 1:
|
68 |
+
return nn.AvgPool1d(*args, **kwargs)
|
69 |
+
elif dims == 2:
|
70 |
+
return nn.AvgPool2d(*args, **kwargs)
|
71 |
+
elif dims == 3:
|
72 |
+
return nn.AvgPool3d(*args, **kwargs)
|
73 |
+
raise ValueError(f"unsupported dimensions: {dims}")
|
74 |
+
|
75 |
+
|
76 |
+
def update_ema(target_params, source_params, rate=0.99):
|
77 |
+
"""
|
78 |
+
Update target parameters to be closer to those of source parameters using
|
79 |
+
an exponential moving average.
|
80 |
+
|
81 |
+
:param target_params: the target parameter sequence.
|
82 |
+
:param source_params: the source parameter sequence.
|
83 |
+
:param rate: the EMA rate (closer to 1 means slower).
|
84 |
+
"""
|
85 |
+
for targ, src in zip(target_params, source_params):
|
86 |
+
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
|
87 |
+
|
88 |
+
|
89 |
+
def zero_module(module):
|
90 |
+
"""
|
91 |
+
Zero out the parameters of a module and return it.
|
92 |
+
"""
|
93 |
+
for p in module.parameters():
|
94 |
+
p.detach().zero_()
|
95 |
+
return module
|
96 |
+
|
97 |
+
|
98 |
+
def scale_module(module, scale):
|
99 |
+
"""
|
100 |
+
Scale the parameters of a module and return it.
|
101 |
+
"""
|
102 |
+
for p in module.parameters():
|
103 |
+
p.detach().mul_(scale)
|
104 |
+
return module
|
105 |
+
|
106 |
+
|
107 |
+
def mean_flat(tensor):
|
108 |
+
"""
|
109 |
+
Take the mean over all non-batch dimensions.
|
110 |
+
"""
|
111 |
+
return tensor.mean(dim=list(range(1, len(tensor.shape))))
|
112 |
+
|
113 |
+
|
114 |
+
def normalization(channels):
|
115 |
+
"""
|
116 |
+
Make a standard normalization layer.
|
117 |
+
|
118 |
+
:param channels: number of input channels.
|
119 |
+
:return: an nn.Module for normalization.
|
120 |
+
"""
|
121 |
+
return GroupNorm32(32, channels)
|
122 |
+
|
123 |
+
|
124 |
+
def timestep_embedding(timesteps, dim, max_period=10000):
|
125 |
+
"""
|
126 |
+
Create sinusoidal timestep embeddings.
|
127 |
+
|
128 |
+
:param timesteps: a 1-D Tensor of N indices, one per batch element.
|
129 |
+
These may be fractional.
|
130 |
+
:param dim: the dimension of the output.
|
131 |
+
:param max_period: controls the minimum frequency of the embeddings.
|
132 |
+
:return: an [N x dim] Tensor of positional embeddings.
|
133 |
+
"""
|
134 |
+
half = dim // 2
|
135 |
+
freqs = th.exp(
|
136 |
+
-math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
|
137 |
+
).to(device=timesteps.device)
|
138 |
+
args = timesteps[:, None].float() * freqs[None]
|
139 |
+
embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
|
140 |
+
if dim % 2:
|
141 |
+
embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
|
142 |
+
return embedding
|
143 |
+
|
144 |
+
|
145 |
+
def checkpoint(func, inputs, params, flag):
|
146 |
+
"""
|
147 |
+
Evaluate a function without caching intermediate activations, allowing for
|
148 |
+
reduced memory at the expense of extra compute in the backward pass.
|
149 |
+
|
150 |
+
:param func: the function to evaluate.
|
151 |
+
:param inputs: the argument sequence to pass to `func`.
|
152 |
+
:param params: a sequence of parameters `func` depends on but does not
|
153 |
+
explicitly take as arguments.
|
154 |
+
:param flag: if False, disable gradient checkpointing.
|
155 |
+
"""
|
156 |
+
if flag:
|
157 |
+
args = tuple(inputs) + tuple(params)
|
158 |
+
return CheckpointFunction.apply(func, len(inputs), *args)
|
159 |
+
else:
|
160 |
+
return func(*inputs)
|
161 |
+
|
162 |
+
|
163 |
+
class CheckpointFunction(th.autograd.Function):
|
164 |
+
@staticmethod
|
165 |
+
def forward(ctx, run_function, length, *args):
|
166 |
+
ctx.run_function = run_function
|
167 |
+
ctx.input_tensors = list(args[:length])
|
168 |
+
ctx.input_params = list(args[length:])
|
169 |
+
with th.no_grad():
|
170 |
+
output_tensors = ctx.run_function(*ctx.input_tensors)
|
171 |
+
return output_tensors
|
172 |
+
|
173 |
+
@staticmethod
|
174 |
+
def backward(ctx, *output_grads):
|
175 |
+
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
|
176 |
+
with th.enable_grad():
|
177 |
+
# Fixes a bug where the first op in run_function modifies the
|
178 |
+
# Tensor storage in place, which is not allowed for detach()'d
|
179 |
+
# Tensors.
|
180 |
+
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
|
181 |
+
output_tensors = ctx.run_function(*shallow_copies)
|
182 |
+
input_grads = th.autograd.grad(
|
183 |
+
output_tensors,
|
184 |
+
ctx.input_tensors + ctx.input_params,
|
185 |
+
output_grads,
|
186 |
+
allow_unused=True,
|
187 |
+
)
|
188 |
+
del ctx.input_tensors
|
189 |
+
del ctx.input_params
|
190 |
+
del output_tensors
|
191 |
+
return (None, None) + input_grads
|
pixel_guide_diffusion/resample.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import ABC, abstractmethod
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import torch as th
|
5 |
+
import torch.distributed as dist
|
6 |
+
|
7 |
+
|
8 |
+
def create_named_schedule_sampler(name, diffusion):
|
9 |
+
"""
|
10 |
+
Create a ScheduleSampler from a library of pre-defined samplers.
|
11 |
+
|
12 |
+
:param name: the name of the sampler.
|
13 |
+
:param diffusion: the diffusion object to sample for.
|
14 |
+
"""
|
15 |
+
if name == "uniform":
|
16 |
+
return UniformSampler(diffusion)
|
17 |
+
elif name == "loss-second-moment":
|
18 |
+
return LossSecondMomentResampler(diffusion)
|
19 |
+
else:
|
20 |
+
raise NotImplementedError(f"unknown schedule sampler: {name}")
|
21 |
+
|
22 |
+
|
23 |
+
class ScheduleSampler(ABC):
|
24 |
+
"""
|
25 |
+
A distribution over timesteps in the diffusion process, intended to reduce
|
26 |
+
variance of the objective.
|
27 |
+
|
28 |
+
By default, samplers perform unbiased importance sampling, in which the
|
29 |
+
objective's mean is unchanged.
|
30 |
+
However, subclasses may override sample() to change how the resampled
|
31 |
+
terms are reweighted, allowing for actual changes in the objective.
|
32 |
+
"""
|
33 |
+
|
34 |
+
@abstractmethod
|
35 |
+
def weights(self):
|
36 |
+
"""
|
37 |
+
Get a numpy array of weights, one per diffusion step.
|
38 |
+
|
39 |
+
The weights needn't be normalized, but must be positive.
|
40 |
+
"""
|
41 |
+
|
42 |
+
def sample(self, batch_size, device):
|
43 |
+
"""
|
44 |
+
Importance-sample timesteps for a batch.
|
45 |
+
|
46 |
+
:param batch_size: the number of timesteps.
|
47 |
+
:param device: the torch device to save to.
|
48 |
+
:return: a tuple (timesteps, weights):
|
49 |
+
- timesteps: a tensor of timestep indices.
|
50 |
+
- weights: a tensor of weights to scale the resulting losses.
|
51 |
+
"""
|
52 |
+
w = self.weights()
|
53 |
+
p = w / np.sum(w)
|
54 |
+
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
|
55 |
+
indices = th.from_numpy(indices_np).long().to(device)
|
56 |
+
weights_np = 1 / (len(p) * p[indices_np])
|
57 |
+
weights = th.from_numpy(weights_np).float().to(device)
|
58 |
+
return indices, weights
|
59 |
+
|
60 |
+
|
61 |
+
class UniformSampler(ScheduleSampler):
|
62 |
+
def __init__(self, diffusion):
|
63 |
+
self.diffusion = diffusion
|
64 |
+
self._weights = np.ones([diffusion.num_timesteps])
|
65 |
+
|
66 |
+
def weights(self):
|
67 |
+
return self._weights
|
68 |
+
|
69 |
+
|
70 |
+
class LossAwareSampler(ScheduleSampler):
|
71 |
+
def update_with_local_losses(self, local_ts, local_losses):
|
72 |
+
"""
|
73 |
+
Update the reweighting using losses from a model.
|
74 |
+
|
75 |
+
Call this method from each rank with a batch of timesteps and the
|
76 |
+
corresponding losses for each of those timesteps.
|
77 |
+
This method will perform synchronization to make sure all of the ranks
|
78 |
+
maintain the exact same reweighting.
|
79 |
+
|
80 |
+
:param local_ts: an integer Tensor of timesteps.
|
81 |
+
:param local_losses: a 1D Tensor of losses.
|
82 |
+
"""
|
83 |
+
batch_sizes = [
|
84 |
+
th.tensor([0], dtype=th.int32, device=local_ts.device)
|
85 |
+
for _ in range(dist.get_world_size())
|
86 |
+
]
|
87 |
+
dist.all_gather(
|
88 |
+
batch_sizes,
|
89 |
+
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
|
90 |
+
)
|
91 |
+
|
92 |
+
# Pad all_gather batches to be the maximum batch size.
|
93 |
+
batch_sizes = [x.item() for x in batch_sizes]
|
94 |
+
max_bs = max(batch_sizes)
|
95 |
+
|
96 |
+
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
|
97 |
+
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
|
98 |
+
dist.all_gather(timestep_batches, local_ts)
|
99 |
+
dist.all_gather(loss_batches, local_losses)
|
100 |
+
timesteps = [
|
101 |
+
x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
|
102 |
+
]
|
103 |
+
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
|
104 |
+
self.update_with_all_losses(timesteps, losses)
|
105 |
+
|
106 |
+
@abstractmethod
|
107 |
+
def update_with_all_losses(self, ts, losses):
|
108 |
+
"""
|
109 |
+
Update the reweighting using losses from a model.
|
110 |
+
|
111 |
+
Sub-classes should override this method to update the reweighting
|
112 |
+
using losses from the model.
|
113 |
+
|
114 |
+
This method directly updates the reweighting without synchronizing
|
115 |
+
between workers. It is called by update_with_local_losses from all
|
116 |
+
ranks with identical arguments. Thus, it should have deterministic
|
117 |
+
behavior to maintain state across workers.
|
118 |
+
|
119 |
+
:param ts: a list of int timesteps.
|
120 |
+
:param losses: a list of float losses, one per timestep.
|
121 |
+
"""
|
122 |
+
|
123 |
+
|
124 |
+
class LossSecondMomentResampler(LossAwareSampler):
|
125 |
+
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
|
126 |
+
self.diffusion = diffusion
|
127 |
+
self.history_per_term = history_per_term
|
128 |
+
self.uniform_prob = uniform_prob
|
129 |
+
self._loss_history = np.zeros(
|
130 |
+
[diffusion.num_timesteps, history_per_term], dtype=np.float64
|
131 |
+
)
|
132 |
+
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
|
133 |
+
|
134 |
+
def weights(self):
|
135 |
+
if not self._warmed_up():
|
136 |
+
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
|
137 |
+
weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
|
138 |
+
weights /= np.sum(weights)
|
139 |
+
weights *= 1 - self.uniform_prob
|
140 |
+
weights += self.uniform_prob / len(weights)
|
141 |
+
return weights
|
142 |
+
|
143 |
+
def update_with_all_losses(self, ts, losses):
|
144 |
+
for t, loss in zip(ts, losses):
|
145 |
+
if self._loss_counts[t] == self.history_per_term:
|
146 |
+
# Shift out the oldest loss term.
|
147 |
+
self._loss_history[t, :-1] = self._loss_history[t, 1:]
|
148 |
+
self._loss_history[t, -1] = loss
|
149 |
+
else:
|
150 |
+
self._loss_history[t, self._loss_counts[t]] = loss
|
151 |
+
self._loss_counts[t] += 1
|
152 |
+
|
153 |
+
def _warmed_up(self):
|
154 |
+
return (self._loss_counts == self.history_per_term).all()
|
pixel_guide_diffusion/respace.py
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import torch as th
|
3 |
+
|
4 |
+
from .gaussian_diffusion import GaussianDiffusion
|
5 |
+
|
6 |
+
|
7 |
+
def space_timesteps(num_timesteps, section_counts):
|
8 |
+
"""
|
9 |
+
Create a list of timesteps to use from an original diffusion process,
|
10 |
+
given the number of timesteps we want to take from equally-sized portions
|
11 |
+
of the original process.
|
12 |
+
|
13 |
+
For example, if there's 300 timesteps and the section counts are [10,15,20]
|
14 |
+
then the first 100 timesteps are strided to be 10 timesteps, the second 100
|
15 |
+
are strided to be 15 timesteps, and the final 100 are strided to be 20.
|
16 |
+
|
17 |
+
If the stride is a string starting with "ddim", then the fixed striding
|
18 |
+
from the DDIM paper is used, and only one section is allowed.
|
19 |
+
|
20 |
+
:param num_timesteps: the number of diffusion steps in the original
|
21 |
+
process to divide up.
|
22 |
+
:param section_counts: either a list of numbers, or a string containing
|
23 |
+
comma-separated numbers, indicating the step count
|
24 |
+
per section. As a special case, use "ddimN" where N
|
25 |
+
is a number of steps to use the striding from the
|
26 |
+
DDIM paper.
|
27 |
+
:return: a set of diffusion steps from the original process to use.
|
28 |
+
"""
|
29 |
+
if isinstance(section_counts, str):
|
30 |
+
if section_counts.startswith("ddim"):
|
31 |
+
desired_count = int(section_counts[len("ddim") :])
|
32 |
+
for i in range(1, num_timesteps):
|
33 |
+
if len(range(0, num_timesteps, i)) == desired_count:
|
34 |
+
return set(range(0, num_timesteps, i))
|
35 |
+
raise ValueError(
|
36 |
+
f"cannot create exactly {num_timesteps} steps with an integer stride"
|
37 |
+
)
|
38 |
+
section_counts = [int(x) for x in section_counts.split(",")]
|
39 |
+
size_per = num_timesteps // len(section_counts)
|
40 |
+
extra = num_timesteps % len(section_counts)
|
41 |
+
start_idx = 0
|
42 |
+
all_steps = []
|
43 |
+
for i, section_count in enumerate(section_counts):
|
44 |
+
size = size_per + (1 if i < extra else 0)
|
45 |
+
if size < section_count:
|
46 |
+
raise ValueError(
|
47 |
+
f"cannot divide section of {size} steps into {section_count}"
|
48 |
+
)
|
49 |
+
if section_count <= 1:
|
50 |
+
frac_stride = 1
|
51 |
+
else:
|
52 |
+
frac_stride = (size - 1) / (section_count - 1)
|
53 |
+
cur_idx = 0.0
|
54 |
+
taken_steps = []
|
55 |
+
for _ in range(section_count):
|
56 |
+
taken_steps.append(start_idx + round(cur_idx))
|
57 |
+
cur_idx += frac_stride
|
58 |
+
all_steps += taken_steps
|
59 |
+
start_idx += size
|
60 |
+
return set(all_steps)
|
61 |
+
|
62 |
+
|
63 |
+
class SpacedDiffusion(GaussianDiffusion):
|
64 |
+
"""
|
65 |
+
A diffusion process which can skip steps in a base diffusion process.
|
66 |
+
|
67 |
+
:param use_timesteps: a collection (sequence or set) of timesteps from the
|
68 |
+
original diffusion process to retain.
|
69 |
+
:param kwargs: the kwargs to create the base diffusion process.
|
70 |
+
"""
|
71 |
+
|
72 |
+
def __init__(self, use_timesteps, **kwargs):
|
73 |
+
self.use_timesteps = set(use_timesteps)
|
74 |
+
self.timestep_map = []
|
75 |
+
self.original_num_steps = len(kwargs["betas"])
|
76 |
+
|
77 |
+
base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
|
78 |
+
last_alpha_cumprod = 1.0
|
79 |
+
new_betas = []
|
80 |
+
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
|
81 |
+
if i in self.use_timesteps:
|
82 |
+
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
|
83 |
+
last_alpha_cumprod = alpha_cumprod
|
84 |
+
self.timestep_map.append(i)
|
85 |
+
kwargs["betas"] = np.array(new_betas)
|
86 |
+
super().__init__(**kwargs)
|
87 |
+
|
88 |
+
def p_mean_variance(
|
89 |
+
self, model, *args, **kwargs
|
90 |
+
): # pylint: disable=signature-differs
|
91 |
+
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
|
92 |
+
|
93 |
+
def training_losses(
|
94 |
+
self, model, *args, **kwargs
|
95 |
+
): # pylint: disable=signature-differs
|
96 |
+
return super().training_losses(self._wrap_model(model), *args, **kwargs)
|
97 |
+
|
98 |
+
def _wrap_model(self, model):
|
99 |
+
if isinstance(model, _WrappedModel):
|
100 |
+
return model
|
101 |
+
return _WrappedModel(
|
102 |
+
model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
|
103 |
+
)
|
104 |
+
|
105 |
+
def _scale_timesteps(self, t):
|
106 |
+
# Scaling is done by the wrapped model.
|
107 |
+
return t
|
108 |
+
|
109 |
+
|
110 |
+
class _WrappedModel:
|
111 |
+
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
|
112 |
+
self.model = model
|
113 |
+
self.timestep_map = timestep_map
|
114 |
+
self.rescale_timesteps = rescale_timesteps
|
115 |
+
self.original_num_steps = original_num_steps
|
116 |
+
|
117 |
+
def __call__(self, x, ts, **kwargs):
|
118 |
+
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
|
119 |
+
new_ts = map_tensor[ts]
|
120 |
+
if self.rescale_timesteps:
|
121 |
+
new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
|
122 |
+
return self.model(x, new_ts, **kwargs)
|
pixel_guide_diffusion/script_util.py
ADDED
@@ -0,0 +1,537 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import inspect
|
3 |
+
|
4 |
+
from . import gaussian_diffusion as gd
|
5 |
+
from .respace import SpacedDiffusion, space_timesteps
|
6 |
+
from .unet import PixelGuideSuperResModel, PixelGuideModel, SuperResModel, UNetModel
|
7 |
+
|
8 |
+
NUM_CLASSES = 1000
|
9 |
+
|
10 |
+
|
11 |
+
def model_and_diffusion_defaults():
|
12 |
+
"""
|
13 |
+
Defaults for image training.
|
14 |
+
"""
|
15 |
+
return dict(
|
16 |
+
image_size=64,
|
17 |
+
num_channels=128,
|
18 |
+
num_res_blocks=2,
|
19 |
+
num_heads=4,
|
20 |
+
num_heads_upsample=-1,
|
21 |
+
attention_resolutions="16,8",
|
22 |
+
dropout=0.0,
|
23 |
+
learn_sigma=False,
|
24 |
+
sigma_small=False,
|
25 |
+
class_cond=False,
|
26 |
+
diffusion_steps=1000,
|
27 |
+
noise_schedule="linear",
|
28 |
+
timestep_respacing="",
|
29 |
+
use_kl=False,
|
30 |
+
predict_xstart=False,
|
31 |
+
rescale_timesteps=True,
|
32 |
+
rescale_learned_sigmas=True,
|
33 |
+
use_checkpoint=False,
|
34 |
+
use_scale_shift_norm=True,
|
35 |
+
use_attention=True,
|
36 |
+
)
|
37 |
+
|
38 |
+
|
39 |
+
def create_model_and_diffusion(
|
40 |
+
image_size,
|
41 |
+
class_cond,
|
42 |
+
learn_sigma,
|
43 |
+
sigma_small,
|
44 |
+
num_channels,
|
45 |
+
num_res_blocks,
|
46 |
+
num_heads,
|
47 |
+
num_heads_upsample,
|
48 |
+
attention_resolutions,
|
49 |
+
dropout,
|
50 |
+
diffusion_steps,
|
51 |
+
noise_schedule,
|
52 |
+
timestep_respacing,
|
53 |
+
use_kl,
|
54 |
+
predict_xstart,
|
55 |
+
rescale_timesteps,
|
56 |
+
rescale_learned_sigmas,
|
57 |
+
use_checkpoint,
|
58 |
+
use_scale_shift_norm,
|
59 |
+
use_attention,
|
60 |
+
):
|
61 |
+
model = create_model(
|
62 |
+
image_size,
|
63 |
+
num_channels,
|
64 |
+
num_res_blocks,
|
65 |
+
learn_sigma=learn_sigma,
|
66 |
+
class_cond=class_cond,
|
67 |
+
use_checkpoint=use_checkpoint,
|
68 |
+
attention_resolutions=attention_resolutions,
|
69 |
+
num_heads=num_heads,
|
70 |
+
num_heads_upsample=num_heads_upsample,
|
71 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
72 |
+
use_attention=use_attention,
|
73 |
+
dropout=dropout,
|
74 |
+
)
|
75 |
+
diffusion = create_gaussian_diffusion(
|
76 |
+
steps=diffusion_steps,
|
77 |
+
learn_sigma=learn_sigma,
|
78 |
+
sigma_small=sigma_small,
|
79 |
+
noise_schedule=noise_schedule,
|
80 |
+
use_kl=use_kl,
|
81 |
+
predict_xstart=predict_xstart,
|
82 |
+
rescale_timesteps=rescale_timesteps,
|
83 |
+
rescale_learned_sigmas=rescale_learned_sigmas,
|
84 |
+
timestep_respacing=timestep_respacing,
|
85 |
+
)
|
86 |
+
return model, diffusion
|
87 |
+
|
88 |
+
|
89 |
+
def create_model(
|
90 |
+
image_size,
|
91 |
+
num_channels,
|
92 |
+
num_res_blocks,
|
93 |
+
learn_sigma,
|
94 |
+
class_cond,
|
95 |
+
use_checkpoint,
|
96 |
+
attention_resolutions,
|
97 |
+
num_heads,
|
98 |
+
num_heads_upsample,
|
99 |
+
use_scale_shift_norm,
|
100 |
+
use_attention,
|
101 |
+
dropout,
|
102 |
+
):
|
103 |
+
if image_size == 256:
|
104 |
+
channel_mult = (1, 1, 2, 2, 4, 4)
|
105 |
+
elif image_size == 128:
|
106 |
+
channel_mult = (1, 2, 2, 3, 4)
|
107 |
+
elif image_size == 64:
|
108 |
+
channel_mult = (1, 2, 3, 4)
|
109 |
+
elif image_size == 32:
|
110 |
+
channel_mult = (1, 2, 2, 2)
|
111 |
+
else:
|
112 |
+
raise ValueError(f"unsupported image size: {image_size}")
|
113 |
+
|
114 |
+
attention_ds = []
|
115 |
+
for res in attention_resolutions.split(","):
|
116 |
+
attention_ds.append(image_size // int(res))
|
117 |
+
|
118 |
+
return UNetModel(
|
119 |
+
in_channels=3,
|
120 |
+
model_channels=num_channels,
|
121 |
+
out_channels=(3 if not learn_sigma else 6),
|
122 |
+
num_res_blocks=num_res_blocks,
|
123 |
+
attention_resolutions=tuple(attention_ds),
|
124 |
+
dropout=dropout,
|
125 |
+
channel_mult=channel_mult,
|
126 |
+
num_classes=(NUM_CLASSES if class_cond else None),
|
127 |
+
use_checkpoint=use_checkpoint,
|
128 |
+
num_heads=num_heads,
|
129 |
+
num_heads_upsample=num_heads_upsample,
|
130 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
131 |
+
use_attention=use_attention
|
132 |
+
)
|
133 |
+
|
134 |
+
|
135 |
+
def sr_model_and_diffusion_defaults():
|
136 |
+
res = model_and_diffusion_defaults()
|
137 |
+
res["large_size"] = 256
|
138 |
+
res["small_size"] = 64
|
139 |
+
arg_names = inspect.getfullargspec(sr_create_model_and_diffusion)[0]
|
140 |
+
for k in res.copy().keys():
|
141 |
+
if k not in arg_names:
|
142 |
+
del res[k]
|
143 |
+
return res
|
144 |
+
|
145 |
+
|
146 |
+
def sr_create_model_and_diffusion(
|
147 |
+
large_size,
|
148 |
+
small_size,
|
149 |
+
class_cond,
|
150 |
+
learn_sigma,
|
151 |
+
num_channels,
|
152 |
+
num_res_blocks,
|
153 |
+
num_heads,
|
154 |
+
num_heads_upsample,
|
155 |
+
attention_resolutions,
|
156 |
+
dropout,
|
157 |
+
diffusion_steps,
|
158 |
+
noise_schedule,
|
159 |
+
timestep_respacing,
|
160 |
+
use_kl,
|
161 |
+
predict_xstart,
|
162 |
+
rescale_timesteps,
|
163 |
+
rescale_learned_sigmas,
|
164 |
+
use_checkpoint,
|
165 |
+
use_scale_shift_norm,
|
166 |
+
use_attention
|
167 |
+
):
|
168 |
+
model = sr_create_model(
|
169 |
+
large_size,
|
170 |
+
small_size,
|
171 |
+
num_channels,
|
172 |
+
num_res_blocks,
|
173 |
+
learn_sigma=learn_sigma,
|
174 |
+
class_cond=class_cond,
|
175 |
+
use_checkpoint=use_checkpoint,
|
176 |
+
attention_resolutions=attention_resolutions,
|
177 |
+
num_heads=num_heads,
|
178 |
+
num_heads_upsample=num_heads_upsample,
|
179 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
180 |
+
use_attention=use_attention,
|
181 |
+
dropout=dropout,
|
182 |
+
)
|
183 |
+
diffusion = create_gaussian_diffusion(
|
184 |
+
steps=diffusion_steps,
|
185 |
+
learn_sigma=learn_sigma,
|
186 |
+
noise_schedule=noise_schedule,
|
187 |
+
use_kl=use_kl,
|
188 |
+
predict_xstart=predict_xstart,
|
189 |
+
rescale_timesteps=rescale_timesteps,
|
190 |
+
rescale_learned_sigmas=rescale_learned_sigmas,
|
191 |
+
timestep_respacing=timestep_respacing,
|
192 |
+
)
|
193 |
+
return model, diffusion
|
194 |
+
|
195 |
+
|
196 |
+
def sr_create_model(
|
197 |
+
large_size,
|
198 |
+
small_size,
|
199 |
+
num_channels,
|
200 |
+
num_res_blocks,
|
201 |
+
learn_sigma,
|
202 |
+
class_cond,
|
203 |
+
use_checkpoint,
|
204 |
+
attention_resolutions,
|
205 |
+
num_heads,
|
206 |
+
num_heads_upsample,
|
207 |
+
use_scale_shift_norm,
|
208 |
+
use_attention,
|
209 |
+
dropout,
|
210 |
+
):
|
211 |
+
_ = small_size # hack to prevent unused variable
|
212 |
+
|
213 |
+
if large_size == 256:
|
214 |
+
channel_mult = (1, 1, 2, 2, 4, 4)
|
215 |
+
elif large_size == 128:
|
216 |
+
channel_mult = (1, 2, 2, 3, 4)
|
217 |
+
elif large_size == 64:
|
218 |
+
channel_mult = (1, 2, 3, 4)
|
219 |
+
else:
|
220 |
+
raise ValueError(f"unsupported large size: {large_size}")
|
221 |
+
|
222 |
+
attention_ds = []
|
223 |
+
for res in attention_resolutions.split(","):
|
224 |
+
attention_ds.append(large_size // int(res))
|
225 |
+
|
226 |
+
return SuperResModel(
|
227 |
+
in_channels=3,
|
228 |
+
model_channels=num_channels,
|
229 |
+
out_channels=(3 if not learn_sigma else 6),
|
230 |
+
num_res_blocks=num_res_blocks,
|
231 |
+
attention_resolutions=tuple(attention_ds),
|
232 |
+
dropout=dropout,
|
233 |
+
channel_mult=channel_mult,
|
234 |
+
num_classes=(NUM_CLASSES if class_cond else None),
|
235 |
+
use_checkpoint=use_checkpoint,
|
236 |
+
num_heads=num_heads,
|
237 |
+
num_heads_upsample=num_heads_upsample,
|
238 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
239 |
+
use_attention=use_attention
|
240 |
+
)
|
241 |
+
|
242 |
+
|
243 |
+
|
244 |
+
def pg_model_and_diffusion_defaults():
|
245 |
+
res = model_and_diffusion_defaults()
|
246 |
+
res["image_size"] = 32
|
247 |
+
res["guide_size"] = 256
|
248 |
+
arg_names = inspect.getfullargspec(pg_create_model_and_diffusion)[0]
|
249 |
+
for k in res.copy().keys():
|
250 |
+
if k not in arg_names:
|
251 |
+
del res[k]
|
252 |
+
return res
|
253 |
+
|
254 |
+
|
255 |
+
def pg_create_model_and_diffusion(
|
256 |
+
image_size,
|
257 |
+
guide_size,
|
258 |
+
class_cond,
|
259 |
+
learn_sigma,
|
260 |
+
num_channels,
|
261 |
+
num_res_blocks,
|
262 |
+
num_heads,
|
263 |
+
num_heads_upsample,
|
264 |
+
attention_resolutions,
|
265 |
+
dropout,
|
266 |
+
diffusion_steps,
|
267 |
+
noise_schedule,
|
268 |
+
timestep_respacing,
|
269 |
+
use_kl,
|
270 |
+
predict_xstart,
|
271 |
+
rescale_timesteps,
|
272 |
+
rescale_learned_sigmas,
|
273 |
+
use_checkpoint,
|
274 |
+
use_scale_shift_norm,
|
275 |
+
use_attention
|
276 |
+
):
|
277 |
+
model = pg_create_model(
|
278 |
+
image_size,
|
279 |
+
guide_size,
|
280 |
+
num_channels,
|
281 |
+
num_res_blocks,
|
282 |
+
learn_sigma=learn_sigma,
|
283 |
+
class_cond=class_cond,
|
284 |
+
use_checkpoint=use_checkpoint,
|
285 |
+
attention_resolutions=attention_resolutions,
|
286 |
+
num_heads=num_heads,
|
287 |
+
num_heads_upsample=num_heads_upsample,
|
288 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
289 |
+
dropout=dropout,
|
290 |
+
use_attention=use_attention
|
291 |
+
)
|
292 |
+
diffusion = create_gaussian_diffusion(
|
293 |
+
steps=diffusion_steps,
|
294 |
+
learn_sigma=learn_sigma,
|
295 |
+
noise_schedule=noise_schedule,
|
296 |
+
use_kl=use_kl,
|
297 |
+
predict_xstart=predict_xstart,
|
298 |
+
rescale_timesteps=rescale_timesteps,
|
299 |
+
rescale_learned_sigmas=rescale_learned_sigmas,
|
300 |
+
timestep_respacing=timestep_respacing,
|
301 |
+
)
|
302 |
+
return model, diffusion
|
303 |
+
|
304 |
+
|
305 |
+
def pg_create_model(
|
306 |
+
image_size,
|
307 |
+
guide_size,
|
308 |
+
num_channels,
|
309 |
+
num_res_blocks,
|
310 |
+
learn_sigma,
|
311 |
+
class_cond,
|
312 |
+
use_checkpoint,
|
313 |
+
attention_resolutions,
|
314 |
+
num_heads,
|
315 |
+
num_heads_upsample,
|
316 |
+
use_scale_shift_norm,
|
317 |
+
use_attention,
|
318 |
+
dropout,
|
319 |
+
):
|
320 |
+
|
321 |
+
if image_size == 256:
|
322 |
+
channel_mult = (1, 1, 2, 2, 4, 4)
|
323 |
+
elif image_size == 128:
|
324 |
+
channel_mult = (1, 2, 2, 3, 4)
|
325 |
+
elif image_size == 64:
|
326 |
+
channel_mult = (1, 2, 3, 4)
|
327 |
+
elif image_size == 32:
|
328 |
+
channel_mult = (1, 2, 2, 2)
|
329 |
+
else:
|
330 |
+
raise ValueError(f"unsupported image size: {image_size}")
|
331 |
+
|
332 |
+
attention_ds = []
|
333 |
+
for res in attention_resolutions.split(","):
|
334 |
+
attention_ds.append(image_size // int(res))
|
335 |
+
|
336 |
+
guide_fold = guide_size // image_size
|
337 |
+
|
338 |
+
return PixelGuideModel(
|
339 |
+
in_channels=3,
|
340 |
+
guide_channels=1,
|
341 |
+
model_channels=num_channels,
|
342 |
+
out_channels=(3 if not learn_sigma else 6),
|
343 |
+
num_res_blocks=num_res_blocks,
|
344 |
+
attention_resolutions=tuple(attention_ds),
|
345 |
+
dropout=dropout,
|
346 |
+
channel_mult=channel_mult,
|
347 |
+
num_classes=(NUM_CLASSES if class_cond else None),
|
348 |
+
use_checkpoint=use_checkpoint,
|
349 |
+
num_heads=num_heads,
|
350 |
+
num_heads_upsample=num_heads_upsample,
|
351 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
352 |
+
use_attention=use_attention,
|
353 |
+
guide_fold=guide_fold
|
354 |
+
)
|
355 |
+
|
356 |
+
|
357 |
+
def pgsr_model_and_diffusion_defaults():
|
358 |
+
res = model_and_diffusion_defaults()
|
359 |
+
res["large_size"] = 256
|
360 |
+
res["small_size"] = 64
|
361 |
+
res["guide_size"] = 256
|
362 |
+
arg_names = inspect.getfullargspec(pgsr_create_model_and_diffusion)[0]
|
363 |
+
for k in res.copy().keys():
|
364 |
+
if k not in arg_names:
|
365 |
+
del res[k]
|
366 |
+
return res
|
367 |
+
|
368 |
+
|
369 |
+
def pgsr_create_model_and_diffusion(
|
370 |
+
large_size,
|
371 |
+
small_size,
|
372 |
+
guide_size,
|
373 |
+
class_cond,
|
374 |
+
learn_sigma,
|
375 |
+
num_channels,
|
376 |
+
num_res_blocks,
|
377 |
+
num_heads,
|
378 |
+
num_heads_upsample,
|
379 |
+
attention_resolutions,
|
380 |
+
dropout,
|
381 |
+
diffusion_steps,
|
382 |
+
noise_schedule,
|
383 |
+
timestep_respacing,
|
384 |
+
use_kl,
|
385 |
+
predict_xstart,
|
386 |
+
rescale_timesteps,
|
387 |
+
rescale_learned_sigmas,
|
388 |
+
use_checkpoint,
|
389 |
+
use_scale_shift_norm,
|
390 |
+
use_attention,
|
391 |
+
):
|
392 |
+
model = pgsr_create_model(
|
393 |
+
large_size,
|
394 |
+
small_size,
|
395 |
+
guide_size,
|
396 |
+
num_channels,
|
397 |
+
num_res_blocks,
|
398 |
+
learn_sigma=learn_sigma,
|
399 |
+
class_cond=class_cond,
|
400 |
+
use_checkpoint=use_checkpoint,
|
401 |
+
attention_resolutions=attention_resolutions,
|
402 |
+
num_heads=num_heads,
|
403 |
+
num_heads_upsample=num_heads_upsample,
|
404 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
405 |
+
use_attention=use_attention,
|
406 |
+
dropout=dropout,
|
407 |
+
)
|
408 |
+
diffusion = create_gaussian_diffusion(
|
409 |
+
steps=diffusion_steps,
|
410 |
+
learn_sigma=learn_sigma,
|
411 |
+
noise_schedule=noise_schedule,
|
412 |
+
use_kl=use_kl,
|
413 |
+
predict_xstart=predict_xstart,
|
414 |
+
rescale_timesteps=rescale_timesteps,
|
415 |
+
rescale_learned_sigmas=rescale_learned_sigmas,
|
416 |
+
timestep_respacing=timestep_respacing,
|
417 |
+
)
|
418 |
+
return model, diffusion
|
419 |
+
|
420 |
+
|
421 |
+
def pgsr_create_model(
|
422 |
+
large_size,
|
423 |
+
small_size,
|
424 |
+
guide_size,
|
425 |
+
num_channels,
|
426 |
+
num_res_blocks,
|
427 |
+
learn_sigma,
|
428 |
+
class_cond,
|
429 |
+
use_checkpoint,
|
430 |
+
attention_resolutions,
|
431 |
+
num_heads,
|
432 |
+
num_heads_upsample,
|
433 |
+
use_scale_shift_norm,
|
434 |
+
use_attention,
|
435 |
+
dropout,
|
436 |
+
):
|
437 |
+
_ = small_size # hack to prevent unused variable
|
438 |
+
|
439 |
+
if large_size == 256:
|
440 |
+
channel_mult = (1, 2, 2, 3, 4)
|
441 |
+
elif large_size == 128:
|
442 |
+
channel_mult = (1, 2, 2, 2)
|
443 |
+
else:
|
444 |
+
raise ValueError(f"unsupported image size: {large_size}")
|
445 |
+
|
446 |
+
attention_ds = []
|
447 |
+
for res in attention_resolutions.split(","):
|
448 |
+
attention_ds.append(large_size // int(res))
|
449 |
+
|
450 |
+
guide_fold = guide_size // large_size
|
451 |
+
|
452 |
+
return PixelGuideSuperResModel(
|
453 |
+
in_channels=3,
|
454 |
+
guide_channels=1,
|
455 |
+
model_channels=num_channels,
|
456 |
+
out_channels=(3 if not learn_sigma else 6),
|
457 |
+
num_res_blocks=num_res_blocks,
|
458 |
+
attention_resolutions=tuple(attention_ds),
|
459 |
+
dropout=dropout,
|
460 |
+
channel_mult=channel_mult,
|
461 |
+
num_classes=(NUM_CLASSES if class_cond else None),
|
462 |
+
use_checkpoint=use_checkpoint,
|
463 |
+
num_heads=num_heads,
|
464 |
+
num_heads_upsample=num_heads_upsample,
|
465 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
466 |
+
use_attention=use_attention,
|
467 |
+
guide_fold=guide_fold
|
468 |
+
)
|
469 |
+
|
470 |
+
|
471 |
+
def create_gaussian_diffusion(
|
472 |
+
*,
|
473 |
+
steps=1000,
|
474 |
+
learn_sigma=False,
|
475 |
+
sigma_small=False,
|
476 |
+
noise_schedule="linear",
|
477 |
+
use_kl=False,
|
478 |
+
predict_xstart=False,
|
479 |
+
rescale_timesteps=False,
|
480 |
+
rescale_learned_sigmas=False,
|
481 |
+
timestep_respacing="",
|
482 |
+
):
|
483 |
+
betas = gd.get_named_beta_schedule(noise_schedule, steps)
|
484 |
+
if use_kl:
|
485 |
+
loss_type = gd.LossType.RESCALED_KL
|
486 |
+
elif rescale_learned_sigmas:
|
487 |
+
loss_type = gd.LossType.RESCALED_MSE
|
488 |
+
else:
|
489 |
+
loss_type = gd.LossType.MSE
|
490 |
+
if not timestep_respacing:
|
491 |
+
timestep_respacing = [steps]
|
492 |
+
return SpacedDiffusion(
|
493 |
+
use_timesteps=space_timesteps(steps, timestep_respacing),
|
494 |
+
betas=betas,
|
495 |
+
model_mean_type=(
|
496 |
+
gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X
|
497 |
+
),
|
498 |
+
model_var_type=(
|
499 |
+
(
|
500 |
+
gd.ModelVarType.FIXED_LARGE
|
501 |
+
if not sigma_small
|
502 |
+
else gd.ModelVarType.FIXED_SMALL
|
503 |
+
)
|
504 |
+
if not learn_sigma
|
505 |
+
else gd.ModelVarType.LEARNED_RANGE
|
506 |
+
),
|
507 |
+
loss_type=loss_type,
|
508 |
+
rescale_timesteps=rescale_timesteps,
|
509 |
+
)
|
510 |
+
|
511 |
+
|
512 |
+
def add_dict_to_argparser(parser, default_dict):
|
513 |
+
for k, v in default_dict.items():
|
514 |
+
v_type = type(v)
|
515 |
+
if v is None:
|
516 |
+
v_type = str
|
517 |
+
elif isinstance(v, bool):
|
518 |
+
v_type = str2bool
|
519 |
+
parser.add_argument(f"--{k}", default=v, type=v_type)
|
520 |
+
|
521 |
+
|
522 |
+
def args_to_dict(args, keys):
|
523 |
+
return {k: getattr(args, k) for k in keys}
|
524 |
+
|
525 |
+
|
526 |
+
def str2bool(v):
|
527 |
+
"""
|
528 |
+
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
|
529 |
+
"""
|
530 |
+
if isinstance(v, bool):
|
531 |
+
return v
|
532 |
+
if v.lower() in ("yes", "true", "t", "y", "1"):
|
533 |
+
return True
|
534 |
+
elif v.lower() in ("no", "false", "f", "n", "0"):
|
535 |
+
return False
|
536 |
+
else:
|
537 |
+
raise argparse.ArgumentTypeError("boolean value expected")
|
pixel_guide_diffusion/train_util.py
ADDED
@@ -0,0 +1,356 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import functools
|
3 |
+
import os
|
4 |
+
|
5 |
+
import blobfile as bf
|
6 |
+
import numpy as np
|
7 |
+
import torch as th
|
8 |
+
import torch.distributed as dist
|
9 |
+
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
|
10 |
+
from torch.optim import AdamW
|
11 |
+
|
12 |
+
from . import dist_util, logger
|
13 |
+
from .fp16_util import (
|
14 |
+
make_master_params,
|
15 |
+
master_params_to_model_params,
|
16 |
+
model_grads_to_master_grads,
|
17 |
+
unflatten_master_params,
|
18 |
+
zero_grad,
|
19 |
+
)
|
20 |
+
from .nn import update_ema
|
21 |
+
from .resample import LossAwareSampler, UniformSampler
|
22 |
+
|
23 |
+
# For ImageNet experiments, this was a good default value.
|
24 |
+
# We found that the lg_loss_scale quickly climbed to
|
25 |
+
# 20-21 within the first ~1K steps of training.
|
26 |
+
INITIAL_LOG_LOSS_SCALE = 20.0
|
27 |
+
|
28 |
+
|
29 |
+
class TrainLoop:
|
30 |
+
def __init__(
|
31 |
+
self,
|
32 |
+
*,
|
33 |
+
model,
|
34 |
+
diffusion,
|
35 |
+
data,
|
36 |
+
batch_size,
|
37 |
+
microbatch,
|
38 |
+
lr,
|
39 |
+
ema_rate,
|
40 |
+
log_interval,
|
41 |
+
save_interval,
|
42 |
+
resume_checkpoint,
|
43 |
+
use_fp16=False,
|
44 |
+
fp16_scale_growth=1e-3,
|
45 |
+
schedule_sampler=None,
|
46 |
+
weight_decay=0.0,
|
47 |
+
lr_anneal_steps=0,
|
48 |
+
):
|
49 |
+
self.model = model
|
50 |
+
self.diffusion = diffusion
|
51 |
+
self.data = data
|
52 |
+
self.batch_size = batch_size
|
53 |
+
self.microbatch = microbatch if microbatch > 0 else batch_size
|
54 |
+
self.lr = lr
|
55 |
+
self.ema_rate = (
|
56 |
+
[ema_rate]
|
57 |
+
if isinstance(ema_rate, float)
|
58 |
+
else [float(x) for x in ema_rate.split(",")]
|
59 |
+
)
|
60 |
+
self.log_interval = log_interval
|
61 |
+
self.save_interval = save_interval
|
62 |
+
self.resume_checkpoint = resume_checkpoint
|
63 |
+
self.use_fp16 = use_fp16
|
64 |
+
self.fp16_scale_growth = fp16_scale_growth
|
65 |
+
self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
|
66 |
+
self.weight_decay = weight_decay
|
67 |
+
self.lr_anneal_steps = lr_anneal_steps
|
68 |
+
|
69 |
+
self.step = 0
|
70 |
+
self.resume_step = 0
|
71 |
+
self.global_batch = self.batch_size * dist.get_world_size()
|
72 |
+
|
73 |
+
self.model_params = list(self.model.parameters())
|
74 |
+
self.master_params = self.model_params
|
75 |
+
self.lg_loss_scale = INITIAL_LOG_LOSS_SCALE
|
76 |
+
self.sync_cuda = th.cuda.is_available()
|
77 |
+
|
78 |
+
self._load_and_sync_parameters()
|
79 |
+
if self.use_fp16:
|
80 |
+
self._setup_fp16()
|
81 |
+
|
82 |
+
self.opt = AdamW(self.master_params, lr=self.lr, weight_decay=self.weight_decay)
|
83 |
+
if self.resume_step:
|
84 |
+
self._load_optimizer_state()
|
85 |
+
# Model was resumed, either due to a restart or a checkpoint
|
86 |
+
# being specified at the command line.
|
87 |
+
self.ema_params = [
|
88 |
+
self._load_ema_parameters(rate) for rate in self.ema_rate
|
89 |
+
]
|
90 |
+
else:
|
91 |
+
self.ema_params = [
|
92 |
+
copy.deepcopy(self.master_params) for _ in range(len(self.ema_rate))
|
93 |
+
]
|
94 |
+
|
95 |
+
if th.cuda.is_available():
|
96 |
+
self.use_ddp = True
|
97 |
+
self.ddp_model = DDP(
|
98 |
+
self.model,
|
99 |
+
device_ids=[dist_util.dev()],
|
100 |
+
output_device=dist_util.dev(),
|
101 |
+
broadcast_buffers=False,
|
102 |
+
bucket_cap_mb=128,
|
103 |
+
find_unused_parameters=False,
|
104 |
+
)
|
105 |
+
else:
|
106 |
+
if dist.get_world_size() > 1:
|
107 |
+
logger.warn(
|
108 |
+
"Distributed training requires CUDA. "
|
109 |
+
"Gradients will not be synchronized properly!"
|
110 |
+
)
|
111 |
+
self.use_ddp = False
|
112 |
+
self.ddp_model = self.model
|
113 |
+
|
114 |
+
def _load_and_sync_parameters(self):
|
115 |
+
resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
|
116 |
+
|
117 |
+
if resume_checkpoint:
|
118 |
+
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
|
119 |
+
if dist.get_rank() == 0:
|
120 |
+
logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
|
121 |
+
self.model.load_state_dict(
|
122 |
+
dist_util.load_state_dict(
|
123 |
+
resume_checkpoint, map_location=dist_util.dev()
|
124 |
+
)
|
125 |
+
)
|
126 |
+
|
127 |
+
dist_util.sync_params(self.model.parameters())
|
128 |
+
|
129 |
+
def _load_ema_parameters(self, rate):
|
130 |
+
ema_params = copy.deepcopy(self.master_params)
|
131 |
+
|
132 |
+
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
|
133 |
+
ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
|
134 |
+
if ema_checkpoint:
|
135 |
+
if dist.get_rank() == 0:
|
136 |
+
logger.log(f"loading EMA from checkpoint: {ema_checkpoint}...")
|
137 |
+
state_dict = dist_util.load_state_dict(
|
138 |
+
ema_checkpoint, map_location=dist_util.dev()
|
139 |
+
)
|
140 |
+
ema_params = self._state_dict_to_master_params(state_dict)
|
141 |
+
|
142 |
+
dist_util.sync_params(ema_params)
|
143 |
+
return ema_params
|
144 |
+
|
145 |
+
def _load_optimizer_state(self):
|
146 |
+
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
|
147 |
+
opt_checkpoint = bf.join(
|
148 |
+
bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
|
149 |
+
)
|
150 |
+
if bf.exists(opt_checkpoint):
|
151 |
+
logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
|
152 |
+
state_dict = dist_util.load_state_dict(
|
153 |
+
opt_checkpoint, map_location=dist_util.dev()
|
154 |
+
)
|
155 |
+
self.opt.load_state_dict(state_dict)
|
156 |
+
|
157 |
+
def _setup_fp16(self):
|
158 |
+
self.master_params = make_master_params(self.model_params)
|
159 |
+
self.model.convert_to_fp16()
|
160 |
+
|
161 |
+
def run_loop(self):
|
162 |
+
while (
|
163 |
+
not self.lr_anneal_steps
|
164 |
+
or self.step + self.resume_step < self.lr_anneal_steps
|
165 |
+
):
|
166 |
+
batch, cond = next(self.data)
|
167 |
+
self.run_step(batch, cond)
|
168 |
+
if self.step % self.log_interval == 0:
|
169 |
+
logger.dumpkvs()
|
170 |
+
if self.step % self.save_interval == 0:
|
171 |
+
self.save()
|
172 |
+
# Run for a finite amount of time in integration tests.
|
173 |
+
if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
|
174 |
+
return
|
175 |
+
self.step += 1
|
176 |
+
# Save the last checkpoint if it wasn't already saved.
|
177 |
+
if (self.step - 1) % self.save_interval != 0:
|
178 |
+
self.save()
|
179 |
+
|
180 |
+
def run_step(self, batch, cond):
|
181 |
+
self.forward_backward(batch, cond)
|
182 |
+
if self.use_fp16:
|
183 |
+
self.optimize_fp16()
|
184 |
+
else:
|
185 |
+
self.optimize_normal()
|
186 |
+
self.log_step()
|
187 |
+
|
188 |
+
def forward_backward(self, batch, cond):
|
189 |
+
zero_grad(self.model_params)
|
190 |
+
for i in range(0, batch.shape[0], self.microbatch):
|
191 |
+
micro = batch[i : i + self.microbatch].to(dist_util.dev())
|
192 |
+
micro_cond = {
|
193 |
+
k: v[i : i + self.microbatch].to(dist_util.dev())
|
194 |
+
for k, v in cond.items()
|
195 |
+
}
|
196 |
+
last_batch = (i + self.microbatch) >= batch.shape[0]
|
197 |
+
t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
|
198 |
+
|
199 |
+
compute_losses = functools.partial(
|
200 |
+
self.diffusion.training_losses,
|
201 |
+
self.ddp_model,
|
202 |
+
micro,
|
203 |
+
t,
|
204 |
+
model_kwargs=micro_cond,
|
205 |
+
)
|
206 |
+
|
207 |
+
if last_batch or not self.use_ddp:
|
208 |
+
losses = compute_losses()
|
209 |
+
else:
|
210 |
+
with self.ddp_model.no_sync():
|
211 |
+
losses = compute_losses()
|
212 |
+
|
213 |
+
if isinstance(self.schedule_sampler, LossAwareSampler):
|
214 |
+
self.schedule_sampler.update_with_local_losses(
|
215 |
+
t, losses["loss"].detach()
|
216 |
+
)
|
217 |
+
|
218 |
+
loss = (losses["loss"] * weights).mean()
|
219 |
+
log_loss_dict(
|
220 |
+
self.diffusion, t, {k: v * weights for k, v in losses.items()}
|
221 |
+
)
|
222 |
+
if self.use_fp16:
|
223 |
+
loss_scale = 2 ** self.lg_loss_scale
|
224 |
+
(loss * loss_scale).backward()
|
225 |
+
else:
|
226 |
+
loss.backward()
|
227 |
+
|
228 |
+
def optimize_fp16(self):
|
229 |
+
if any(not th.isfinite(p.grad).all() for p in self.model_params):
|
230 |
+
self.lg_loss_scale -= 1
|
231 |
+
logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
|
232 |
+
return
|
233 |
+
|
234 |
+
model_grads_to_master_grads(self.model_params, self.master_params)
|
235 |
+
self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale))
|
236 |
+
self._log_grad_norm()
|
237 |
+
self._anneal_lr()
|
238 |
+
self.opt.step()
|
239 |
+
for rate, params in zip(self.ema_rate, self.ema_params):
|
240 |
+
update_ema(params, self.master_params, rate=rate)
|
241 |
+
master_params_to_model_params(self.model_params, self.master_params)
|
242 |
+
self.lg_loss_scale += self.fp16_scale_growth
|
243 |
+
|
244 |
+
def optimize_normal(self):
|
245 |
+
self._log_grad_norm()
|
246 |
+
self._anneal_lr()
|
247 |
+
self.opt.step()
|
248 |
+
for rate, params in zip(self.ema_rate, self.ema_params):
|
249 |
+
update_ema(params, self.master_params, rate=rate)
|
250 |
+
|
251 |
+
def _log_grad_norm(self):
|
252 |
+
sqsum = 0.0
|
253 |
+
for p in self.master_params:
|
254 |
+
sqsum += (p.grad ** 2).sum().item()
|
255 |
+
logger.logkv_mean("grad_norm", np.sqrt(sqsum))
|
256 |
+
|
257 |
+
def _anneal_lr(self):
|
258 |
+
if not self.lr_anneal_steps:
|
259 |
+
return
|
260 |
+
frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
|
261 |
+
lr = self.lr * (1 - frac_done)
|
262 |
+
for param_group in self.opt.param_groups:
|
263 |
+
param_group["lr"] = lr
|
264 |
+
|
265 |
+
def log_step(self):
|
266 |
+
logger.logkv("step", self.step + self.resume_step)
|
267 |
+
logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
|
268 |
+
if self.use_fp16:
|
269 |
+
logger.logkv("lg_loss_scale", self.lg_loss_scale)
|
270 |
+
|
271 |
+
def save(self):
|
272 |
+
def save_checkpoint(rate, params):
|
273 |
+
state_dict = self._master_params_to_state_dict(params)
|
274 |
+
if dist.get_rank() == 0:
|
275 |
+
logger.log(f"saving model {rate}...")
|
276 |
+
if not rate:
|
277 |
+
filename = f"model{(self.step+self.resume_step):06d}.pt"
|
278 |
+
else:
|
279 |
+
filename = f"ema_{rate}_{(self.step+self.resume_step):06d}.pt"
|
280 |
+
with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
|
281 |
+
th.save(state_dict, f)
|
282 |
+
|
283 |
+
save_checkpoint(0, self.master_params)
|
284 |
+
for rate, params in zip(self.ema_rate, self.ema_params):
|
285 |
+
save_checkpoint(rate, params)
|
286 |
+
|
287 |
+
if dist.get_rank() == 0:
|
288 |
+
with bf.BlobFile(
|
289 |
+
bf.join(get_blob_logdir(), f"opt{(self.step+self.resume_step):06d}.pt"),
|
290 |
+
"wb",
|
291 |
+
) as f:
|
292 |
+
th.save(self.opt.state_dict(), f)
|
293 |
+
|
294 |
+
dist.barrier()
|
295 |
+
|
296 |
+
def _master_params_to_state_dict(self, master_params):
|
297 |
+
if self.use_fp16:
|
298 |
+
master_params = unflatten_master_params(
|
299 |
+
self.model.parameters(), master_params
|
300 |
+
)
|
301 |
+
state_dict = self.model.state_dict()
|
302 |
+
for i, (name, _value) in enumerate(self.model.named_parameters()):
|
303 |
+
assert name in state_dict
|
304 |
+
state_dict[name] = master_params[i]
|
305 |
+
return state_dict
|
306 |
+
|
307 |
+
def _state_dict_to_master_params(self, state_dict):
|
308 |
+
params = [state_dict[name] for name, _ in self.model.named_parameters()]
|
309 |
+
if self.use_fp16:
|
310 |
+
return make_master_params(params)
|
311 |
+
else:
|
312 |
+
return params
|
313 |
+
|
314 |
+
|
315 |
+
def parse_resume_step_from_filename(filename):
|
316 |
+
"""
|
317 |
+
Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
|
318 |
+
checkpoint's number of steps.
|
319 |
+
"""
|
320 |
+
split = filename.split("model")
|
321 |
+
if len(split) < 2:
|
322 |
+
return 0
|
323 |
+
split1 = split[-1].split(".")[0]
|
324 |
+
try:
|
325 |
+
return int(split1)
|
326 |
+
except ValueError:
|
327 |
+
return 0
|
328 |
+
|
329 |
+
|
330 |
+
def get_blob_logdir():
|
331 |
+
return os.environ.get("DIFFUSION_BLOB_LOGDIR", logger.get_dir())
|
332 |
+
|
333 |
+
|
334 |
+
def find_resume_checkpoint():
|
335 |
+
# On your infrastructure, you may want to override this to automatically
|
336 |
+
# discover the latest checkpoint on your blob storage, etc.
|
337 |
+
return None
|
338 |
+
|
339 |
+
|
340 |
+
def find_ema_checkpoint(main_checkpoint, step, rate):
|
341 |
+
if main_checkpoint is None:
|
342 |
+
return None
|
343 |
+
filename = f"ema_{rate}_{(step):06d}.pt"
|
344 |
+
path = bf.join(bf.dirname(main_checkpoint), filename)
|
345 |
+
if bf.exists(path):
|
346 |
+
return path
|
347 |
+
return None
|
348 |
+
|
349 |
+
|
350 |
+
def log_loss_dict(diffusion, ts, losses):
|
351 |
+
for key, values in losses.items():
|
352 |
+
logger.logkv_mean(key, values.mean().item())
|
353 |
+
# Log the quantiles (four quartiles, in particular).
|
354 |
+
for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
|
355 |
+
quartile = int(4 * sub_t / diffusion.num_timesteps)
|
356 |
+
logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
|
pixel_guide_diffusion/unet.py
ADDED
@@ -0,0 +1,594 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import abstractmethod
|
2 |
+
|
3 |
+
import math
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import torch as th
|
7 |
+
import torch.nn as nn
|
8 |
+
import torch.nn.functional as F
|
9 |
+
|
10 |
+
from .fp16_util import convert_module_to_f16, convert_module_to_f32
|
11 |
+
from .nn import (
|
12 |
+
SiLU,
|
13 |
+
SpaceToDepth,
|
14 |
+
conv_nd,
|
15 |
+
linear,
|
16 |
+
avg_pool_nd,
|
17 |
+
zero_module,
|
18 |
+
normalization,
|
19 |
+
timestep_embedding,
|
20 |
+
checkpoint,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
class TimestepBlock(nn.Module):
|
25 |
+
"""
|
26 |
+
Any module where forward() takes timestep embeddings as a second argument.
|
27 |
+
"""
|
28 |
+
|
29 |
+
@abstractmethod
|
30 |
+
def forward(self, x, emb):
|
31 |
+
"""
|
32 |
+
Apply the module to `x` given `emb` timestep embeddings.
|
33 |
+
"""
|
34 |
+
|
35 |
+
|
36 |
+
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
|
37 |
+
"""
|
38 |
+
A sequential module that passes timestep embeddings to the children that
|
39 |
+
support it as an extra input.
|
40 |
+
"""
|
41 |
+
|
42 |
+
def forward(self, x, emb):
|
43 |
+
for layer in self:
|
44 |
+
if isinstance(layer, TimestepBlock):
|
45 |
+
x = layer(x, emb)
|
46 |
+
else:
|
47 |
+
x = layer(x)
|
48 |
+
return x
|
49 |
+
|
50 |
+
|
51 |
+
class Upsample(nn.Module):
|
52 |
+
"""
|
53 |
+
An upsampling layer with an optional convolution.
|
54 |
+
|
55 |
+
:param channels: channels in the inputs and outputs.
|
56 |
+
:param use_conv: a bool determining if a convolution is applied.
|
57 |
+
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
58 |
+
upsampling occurs in the inner-two dimensions.
|
59 |
+
"""
|
60 |
+
|
61 |
+
def __init__(self, channels, use_conv, dims=2):
|
62 |
+
super().__init__()
|
63 |
+
self.channels = channels
|
64 |
+
self.use_conv = use_conv
|
65 |
+
self.dims = dims
|
66 |
+
if use_conv:
|
67 |
+
self.conv = conv_nd(dims, channels, channels, 3, padding=1)
|
68 |
+
|
69 |
+
def forward(self, x):
|
70 |
+
assert x.shape[1] == self.channels
|
71 |
+
if self.dims == 3:
|
72 |
+
x = F.interpolate(
|
73 |
+
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
|
74 |
+
)
|
75 |
+
else:
|
76 |
+
x = F.interpolate(x, scale_factor=2, mode="nearest")
|
77 |
+
if self.use_conv:
|
78 |
+
x = self.conv(x)
|
79 |
+
return x
|
80 |
+
|
81 |
+
|
82 |
+
class Downsample(nn.Module):
|
83 |
+
"""
|
84 |
+
A downsampling layer with an optional convolution.
|
85 |
+
|
86 |
+
:param channels: channels in the inputs and outputs.
|
87 |
+
:param use_conv: a bool determining if a convolution is applied.
|
88 |
+
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
89 |
+
downsampling occurs in the inner-two dimensions.
|
90 |
+
"""
|
91 |
+
|
92 |
+
def __init__(self, channels, use_conv, dims=2):
|
93 |
+
super().__init__()
|
94 |
+
self.channels = channels
|
95 |
+
self.use_conv = use_conv
|
96 |
+
self.dims = dims
|
97 |
+
stride = 2 if dims != 3 else (1, 2, 2)
|
98 |
+
if use_conv:
|
99 |
+
self.op = conv_nd(dims, channels, channels, 3, stride=stride, padding=1)
|
100 |
+
else:
|
101 |
+
self.op = avg_pool_nd(stride)
|
102 |
+
|
103 |
+
def forward(self, x):
|
104 |
+
assert x.shape[1] == self.channels
|
105 |
+
return self.op(x)
|
106 |
+
|
107 |
+
|
108 |
+
class ResBlock(TimestepBlock):
|
109 |
+
"""
|
110 |
+
A residual block that can optionally change the number of channels.
|
111 |
+
|
112 |
+
:param channels: the number of input channels.
|
113 |
+
:param emb_channels: the number of timestep embedding channels.
|
114 |
+
:param dropout: the rate of dropout.
|
115 |
+
:param out_channels: if specified, the number of out channels.
|
116 |
+
:param use_conv: if True and out_channels is specified, use a spatial
|
117 |
+
convolution instead of a smaller 1x1 convolution to change the
|
118 |
+
channels in the skip connection.
|
119 |
+
:param dims: determines if the signal is 1D, 2D, or 3D.
|
120 |
+
:param use_checkpoint: if True, use gradient checkpointing on this module.
|
121 |
+
"""
|
122 |
+
|
123 |
+
def __init__(
|
124 |
+
self,
|
125 |
+
channels,
|
126 |
+
emb_channels,
|
127 |
+
dropout,
|
128 |
+
out_channels=None,
|
129 |
+
use_conv=False,
|
130 |
+
use_scale_shift_norm=False,
|
131 |
+
dims=2,
|
132 |
+
use_checkpoint=False,
|
133 |
+
):
|
134 |
+
super().__init__()
|
135 |
+
self.channels = channels
|
136 |
+
self.emb_channels = emb_channels
|
137 |
+
self.dropout = dropout
|
138 |
+
self.out_channels = out_channels or channels
|
139 |
+
self.use_conv = use_conv
|
140 |
+
self.use_checkpoint = use_checkpoint
|
141 |
+
self.use_scale_shift_norm = use_scale_shift_norm
|
142 |
+
|
143 |
+
self.in_layers = nn.Sequential(
|
144 |
+
normalization(channels),
|
145 |
+
SiLU(),
|
146 |
+
conv_nd(dims, channels, self.out_channels, 3, padding=1),
|
147 |
+
)
|
148 |
+
self.emb_layers = nn.Sequential(
|
149 |
+
SiLU(),
|
150 |
+
linear(
|
151 |
+
emb_channels,
|
152 |
+
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
|
153 |
+
),
|
154 |
+
)
|
155 |
+
self.out_layers = nn.Sequential(
|
156 |
+
normalization(self.out_channels),
|
157 |
+
SiLU(),
|
158 |
+
nn.Dropout(p=dropout),
|
159 |
+
zero_module(
|
160 |
+
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
|
161 |
+
),
|
162 |
+
)
|
163 |
+
|
164 |
+
if self.out_channels == channels:
|
165 |
+
self.skip_connection = nn.Identity()
|
166 |
+
elif use_conv:
|
167 |
+
self.skip_connection = conv_nd(
|
168 |
+
dims, channels, self.out_channels, 3, padding=1
|
169 |
+
)
|
170 |
+
else:
|
171 |
+
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
|
172 |
+
|
173 |
+
def forward(self, x, emb):
|
174 |
+
"""
|
175 |
+
Apply the block to a Tensor, conditioned on a timestep embedding.
|
176 |
+
|
177 |
+
:param x: an [N x C x ...] Tensor of features.
|
178 |
+
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
|
179 |
+
:return: an [N x C x ...] Tensor of outputs.
|
180 |
+
"""
|
181 |
+
return checkpoint(
|
182 |
+
self._forward, (x, emb), self.parameters(), self.use_checkpoint
|
183 |
+
)
|
184 |
+
|
185 |
+
def _forward(self, x, emb):
|
186 |
+
h = self.in_layers(x)
|
187 |
+
emb_out = self.emb_layers(emb).type(h.dtype)
|
188 |
+
while len(emb_out.shape) < len(h.shape):
|
189 |
+
emb_out = emb_out[..., None]
|
190 |
+
if self.use_scale_shift_norm:
|
191 |
+
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
|
192 |
+
scale, shift = th.chunk(emb_out, 2, dim=1)
|
193 |
+
h = out_norm(h) * (1 + scale) + shift
|
194 |
+
h = out_rest(h)
|
195 |
+
else:
|
196 |
+
h = h + emb_out
|
197 |
+
h = self.out_layers(h)
|
198 |
+
return self.skip_connection(x) + h
|
199 |
+
|
200 |
+
|
201 |
+
class AttentionBlock(nn.Module):
|
202 |
+
"""
|
203 |
+
An attention block that allows spatial positions to attend to each other.
|
204 |
+
|
205 |
+
Originally ported from here, but adapted to the N-d case.
|
206 |
+
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
|
207 |
+
"""
|
208 |
+
|
209 |
+
def __init__(self, channels, num_heads=1, use_checkpoint=False):
|
210 |
+
super().__init__()
|
211 |
+
self.channels = channels
|
212 |
+
self.num_heads = num_heads
|
213 |
+
self.use_checkpoint = use_checkpoint
|
214 |
+
|
215 |
+
self.norm = normalization(channels)
|
216 |
+
self.qkv = conv_nd(1, channels, channels * 3, 1)
|
217 |
+
self.attention = QKVAttention()
|
218 |
+
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
|
219 |
+
|
220 |
+
def forward(self, x):
|
221 |
+
return checkpoint(self._forward, (x,), self.parameters(), self.use_checkpoint)
|
222 |
+
|
223 |
+
def _forward(self, x):
|
224 |
+
b, c, *spatial = x.shape
|
225 |
+
x = x.reshape(b, c, -1)
|
226 |
+
qkv = self.qkv(self.norm(x))
|
227 |
+
qkv = qkv.reshape(b * self.num_heads, -1, qkv.shape[2])
|
228 |
+
h = self.attention(qkv)
|
229 |
+
h = h.reshape(b, -1, h.shape[-1])
|
230 |
+
h = self.proj_out(h)
|
231 |
+
return (x + h).reshape(b, c, *spatial)
|
232 |
+
|
233 |
+
|
234 |
+
class QKVAttention(nn.Module):
|
235 |
+
"""
|
236 |
+
A module which performs QKV attention.
|
237 |
+
"""
|
238 |
+
|
239 |
+
def forward(self, qkv):
|
240 |
+
"""
|
241 |
+
Apply QKV attention.
|
242 |
+
|
243 |
+
:param qkv: an [N x (C * 3) x T] tensor of Qs, Ks, and Vs.
|
244 |
+
:return: an [N x C x T] tensor after attention.
|
245 |
+
"""
|
246 |
+
ch = qkv.shape[1] // 3
|
247 |
+
q, k, v = th.split(qkv, ch, dim=1)
|
248 |
+
scale = 1 / math.sqrt(math.sqrt(ch))
|
249 |
+
weight = th.einsum(
|
250 |
+
"bct,bcs->bts", q * scale, k * scale
|
251 |
+
) # More stable with f16 than dividing afterwards
|
252 |
+
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
|
253 |
+
return th.einsum("bts,bcs->bct", weight, v)
|
254 |
+
|
255 |
+
@staticmethod
|
256 |
+
def count_flops(model, _x, y):
|
257 |
+
"""
|
258 |
+
A counter for the `thop` package to count the operations in an
|
259 |
+
attention operation.
|
260 |
+
|
261 |
+
Meant to be used like:
|
262 |
+
|
263 |
+
macs, params = thop.profile(
|
264 |
+
model,
|
265 |
+
inputs=(inputs, timestamps),
|
266 |
+
custom_ops={QKVAttention: QKVAttention.count_flops},
|
267 |
+
)
|
268 |
+
|
269 |
+
"""
|
270 |
+
b, c, *spatial = y[0].shape
|
271 |
+
num_spatial = int(np.prod(spatial))
|
272 |
+
# We perform two matmuls with the same number of ops.
|
273 |
+
# The first computes the weight matrix, the second computes
|
274 |
+
# the combination of the value vectors.
|
275 |
+
matmul_ops = 2 * b * (num_spatial ** 2) * c
|
276 |
+
model.total_ops += th.DoubleTensor([matmul_ops])
|
277 |
+
|
278 |
+
|
279 |
+
class UNetModel(nn.Module):
|
280 |
+
"""
|
281 |
+
The full UNet model with attention and timestep embedding.
|
282 |
+
|
283 |
+
:param in_channels: channels in the input Tensor.
|
284 |
+
:param model_channels: base channel count for the model.
|
285 |
+
:param out_channels: channels in the output Tensor.
|
286 |
+
:param num_res_blocks: number of residual blocks per downsample.
|
287 |
+
:param attention_resolutions: a collection of downsample rates at which
|
288 |
+
attention will take place. May be a set, list, or tuple.
|
289 |
+
For example, if this contains 4, then at 4x downsampling, attention
|
290 |
+
will be used.
|
291 |
+
:param dropout: the dropout probability.
|
292 |
+
:param channel_mult: channel multiplier for each level of the UNet.
|
293 |
+
:param conv_resample: if True, use learned convolutions for upsampling and
|
294 |
+
downsampling.
|
295 |
+
:param dims: determines if the signal is 1D, 2D, or 3D.
|
296 |
+
:param num_classes: if specified (as an int), then this model will be
|
297 |
+
class-conditional with `num_classes` classes.
|
298 |
+
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
|
299 |
+
:param num_heads: the number of attention heads in each attention layer.
|
300 |
+
"""
|
301 |
+
|
302 |
+
def __init__(
|
303 |
+
self,
|
304 |
+
in_channels,
|
305 |
+
model_channels,
|
306 |
+
out_channels,
|
307 |
+
num_res_blocks,
|
308 |
+
attention_resolutions,
|
309 |
+
dropout=0,
|
310 |
+
channel_mult=(1, 2, 4, 8),
|
311 |
+
conv_resample=True,
|
312 |
+
dims=2,
|
313 |
+
num_classes=None,
|
314 |
+
use_checkpoint=False,
|
315 |
+
num_heads=1,
|
316 |
+
num_heads_upsample=-1,
|
317 |
+
use_scale_shift_norm=False,
|
318 |
+
use_attention=True
|
319 |
+
):
|
320 |
+
super().__init__()
|
321 |
+
|
322 |
+
if num_heads_upsample == -1:
|
323 |
+
num_heads_upsample = num_heads
|
324 |
+
|
325 |
+
self.in_channels = in_channels
|
326 |
+
self.model_channels = model_channels
|
327 |
+
self.out_channels = out_channels
|
328 |
+
self.num_res_blocks = num_res_blocks
|
329 |
+
self.attention_resolutions = attention_resolutions
|
330 |
+
self.dropout = dropout
|
331 |
+
self.channel_mult = channel_mult
|
332 |
+
self.conv_resample = conv_resample
|
333 |
+
self.num_classes = num_classes
|
334 |
+
self.use_checkpoint = use_checkpoint
|
335 |
+
self.num_heads = num_heads
|
336 |
+
self.num_heads_upsample = num_heads_upsample
|
337 |
+
|
338 |
+
time_embed_dim = model_channels * 4
|
339 |
+
self.time_embed = nn.Sequential(
|
340 |
+
linear(model_channels, time_embed_dim),
|
341 |
+
SiLU(),
|
342 |
+
linear(time_embed_dim, time_embed_dim),
|
343 |
+
)
|
344 |
+
|
345 |
+
if self.num_classes is not None:
|
346 |
+
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
|
347 |
+
|
348 |
+
self.input_blocks = nn.ModuleList(
|
349 |
+
[
|
350 |
+
TimestepEmbedSequential(
|
351 |
+
conv_nd(dims, in_channels, model_channels, 3, padding=1)
|
352 |
+
)
|
353 |
+
]
|
354 |
+
)
|
355 |
+
input_block_chans = [model_channels]
|
356 |
+
ch = model_channels
|
357 |
+
ds = 1
|
358 |
+
for level, mult in enumerate(channel_mult):
|
359 |
+
for _ in range(num_res_blocks):
|
360 |
+
layers = [
|
361 |
+
ResBlock(
|
362 |
+
ch,
|
363 |
+
time_embed_dim,
|
364 |
+
dropout,
|
365 |
+
out_channels=mult * model_channels,
|
366 |
+
dims=dims,
|
367 |
+
use_checkpoint=use_checkpoint,
|
368 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
369 |
+
)
|
370 |
+
]
|
371 |
+
ch = mult * model_channels
|
372 |
+
if ds in attention_resolutions:
|
373 |
+
layers.append(
|
374 |
+
AttentionBlock(
|
375 |
+
ch, use_checkpoint=use_checkpoint, num_heads=num_heads
|
376 |
+
) if use_attention else nn.Sequential()
|
377 |
+
)
|
378 |
+
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
379 |
+
input_block_chans.append(ch)
|
380 |
+
if level != len(channel_mult) - 1:
|
381 |
+
self.input_blocks.append(
|
382 |
+
TimestepEmbedSequential(Downsample(ch, conv_resample, dims=dims))
|
383 |
+
)
|
384 |
+
input_block_chans.append(ch)
|
385 |
+
ds *= 2
|
386 |
+
|
387 |
+
self.middle_block = TimestepEmbedSequential(
|
388 |
+
ResBlock(
|
389 |
+
ch,
|
390 |
+
time_embed_dim,
|
391 |
+
dropout,
|
392 |
+
dims=dims,
|
393 |
+
use_checkpoint=use_checkpoint,
|
394 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
395 |
+
),
|
396 |
+
AttentionBlock(ch, use_checkpoint=use_checkpoint, num_heads=num_heads) if use_attention else nn.Sequential(),
|
397 |
+
ResBlock(
|
398 |
+
ch,
|
399 |
+
time_embed_dim,
|
400 |
+
dropout,
|
401 |
+
dims=dims,
|
402 |
+
use_checkpoint=use_checkpoint,
|
403 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
404 |
+
),
|
405 |
+
)
|
406 |
+
|
407 |
+
self.output_blocks = nn.ModuleList([])
|
408 |
+
for level, mult in list(enumerate(channel_mult))[::-1]:
|
409 |
+
for i in range(num_res_blocks + 1):
|
410 |
+
layers = [
|
411 |
+
ResBlock(
|
412 |
+
ch + input_block_chans.pop(),
|
413 |
+
time_embed_dim,
|
414 |
+
dropout,
|
415 |
+
out_channels=model_channels * mult,
|
416 |
+
dims=dims,
|
417 |
+
use_checkpoint=use_checkpoint,
|
418 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
419 |
+
)
|
420 |
+
]
|
421 |
+
ch = model_channels * mult
|
422 |
+
if ds in attention_resolutions:
|
423 |
+
layers.append(
|
424 |
+
AttentionBlock(
|
425 |
+
ch,
|
426 |
+
use_checkpoint=use_checkpoint,
|
427 |
+
num_heads=num_heads_upsample,
|
428 |
+
) if use_attention else nn.Sequential()
|
429 |
+
)
|
430 |
+
if level and i == num_res_blocks:
|
431 |
+
layers.append(Upsample(ch, conv_resample, dims=dims))
|
432 |
+
ds //= 2
|
433 |
+
self.output_blocks.append(TimestepEmbedSequential(*layers))
|
434 |
+
|
435 |
+
self.out = nn.Sequential(
|
436 |
+
normalization(ch),
|
437 |
+
SiLU(),
|
438 |
+
zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
|
439 |
+
)
|
440 |
+
|
441 |
+
def convert_to_fp16(self):
|
442 |
+
"""
|
443 |
+
Convert the torso of the model to float16.
|
444 |
+
"""
|
445 |
+
self.input_blocks.apply(convert_module_to_f16)
|
446 |
+
self.middle_block.apply(convert_module_to_f16)
|
447 |
+
self.output_blocks.apply(convert_module_to_f16)
|
448 |
+
|
449 |
+
def convert_to_fp32(self):
|
450 |
+
"""
|
451 |
+
Convert the torso of the model to float32.
|
452 |
+
"""
|
453 |
+
self.input_blocks.apply(convert_module_to_f32)
|
454 |
+
self.middle_block.apply(convert_module_to_f32)
|
455 |
+
self.output_blocks.apply(convert_module_to_f32)
|
456 |
+
|
457 |
+
@property
|
458 |
+
def inner_dtype(self):
|
459 |
+
"""
|
460 |
+
Get the dtype used by the torso of the model.
|
461 |
+
"""
|
462 |
+
return next(self.input_blocks.parameters()).dtype
|
463 |
+
|
464 |
+
def forward(self, x, timesteps, y=None):
|
465 |
+
"""
|
466 |
+
Apply the model to an input batch.
|
467 |
+
|
468 |
+
:param x: an [N x C x ...] Tensor of inputs.
|
469 |
+
:param timesteps: a 1-D batch of timesteps.
|
470 |
+
:param y: an [N] Tensor of labels, if class-conditional.
|
471 |
+
:return: an [N x C x ...] Tensor of outputs.
|
472 |
+
"""
|
473 |
+
assert (y is not None) == (
|
474 |
+
self.num_classes is not None
|
475 |
+
), "must specify y if and only if the model is class-conditional"
|
476 |
+
|
477 |
+
hs = []
|
478 |
+
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
|
479 |
+
|
480 |
+
if self.num_classes is not None:
|
481 |
+
assert y.shape == (x.shape[0],)
|
482 |
+
emb = emb + self.label_emb(y)
|
483 |
+
|
484 |
+
h = x.type(self.inner_dtype)
|
485 |
+
for module in self.input_blocks:
|
486 |
+
h = module(h, emb)
|
487 |
+
hs.append(h)
|
488 |
+
h = self.middle_block(h, emb)
|
489 |
+
for module in self.output_blocks:
|
490 |
+
cat_in = th.cat([h, hs.pop()], dim=1)
|
491 |
+
h = module(cat_in, emb)
|
492 |
+
h = h.type(x.dtype)
|
493 |
+
return self.out(h)
|
494 |
+
|
495 |
+
def get_feature_vectors(self, x, timesteps, y=None):
|
496 |
+
"""
|
497 |
+
Apply the model and return all of the intermediate tensors.
|
498 |
+
|
499 |
+
:param x: an [N x C x ...] Tensor of inputs.
|
500 |
+
:param timesteps: a 1-D batch of timesteps.
|
501 |
+
:param y: an [N] Tensor of labels, if class-conditional.
|
502 |
+
:return: a dict with the following keys:
|
503 |
+
- 'down': a list of hidden state tensors from downsampling.
|
504 |
+
- 'middle': the tensor of the output of the lowest-resolution
|
505 |
+
block in the model.
|
506 |
+
- 'up': a list of hidden state tensors from upsampling.
|
507 |
+
"""
|
508 |
+
hs = []
|
509 |
+
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
|
510 |
+
if self.num_classes is not None:
|
511 |
+
assert y.shape == (x.shape[0],)
|
512 |
+
emb = emb + self.label_emb(y)
|
513 |
+
result = dict(down=[], up=[])
|
514 |
+
h = x.type(self.inner_dtype)
|
515 |
+
for module in self.input_blocks:
|
516 |
+
h = module(h, emb)
|
517 |
+
hs.append(h)
|
518 |
+
result["down"].append(h.type(x.dtype))
|
519 |
+
h = self.middle_block(h, emb)
|
520 |
+
result["middle"] = h.type(x.dtype)
|
521 |
+
for module in self.output_blocks:
|
522 |
+
cat_in = th.cat([h, hs.pop()], dim=1)
|
523 |
+
h = module(cat_in, emb)
|
524 |
+
result["up"].append(h.type(x.dtype))
|
525 |
+
return result
|
526 |
+
|
527 |
+
|
528 |
+
class SuperResModel(UNetModel):
|
529 |
+
"""
|
530 |
+
A UNetModel that performs super-resolution.
|
531 |
+
|
532 |
+
Expects an extra kwarg `low_res` to condition on a low-resolution image.
|
533 |
+
"""
|
534 |
+
|
535 |
+
def __init__(self, in_channels, *args, **kwargs):
|
536 |
+
super().__init__(in_channels * 2, *args, **kwargs)
|
537 |
+
|
538 |
+
def forward(self, x, timesteps, low_res=None, **kwargs):
|
539 |
+
_, _, new_height, new_width = x.shape
|
540 |
+
upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
|
541 |
+
x = th.cat([x, upsampled], dim=1)
|
542 |
+
return super().forward(x, timesteps, **kwargs)
|
543 |
+
|
544 |
+
def get_feature_vectors(self, x, timesteps, low_res=None, **kwargs):
|
545 |
+
_, new_height, new_width, _ = x.shape
|
546 |
+
upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
|
547 |
+
x = th.cat([x, upsampled], dim=1)
|
548 |
+
return super().get_feature_vectors(x, timesteps, **kwargs)
|
549 |
+
|
550 |
+
|
551 |
+
class PixelGuideModel(UNetModel):
|
552 |
+
"""
|
553 |
+
A UNetModel that need a guide tensor which has the same height and width with the input tensor.
|
554 |
+
|
555 |
+
Expects an extra kwarg `guide` as a condition for the model.
|
556 |
+
"""
|
557 |
+
|
558 |
+
def __init__(self, in_channels, guide_channels, *args, guide_fold=1, **kwargs):
|
559 |
+
super().__init__(in_channels + guide_channels * guide_fold**2, *args, **kwargs)
|
560 |
+
|
561 |
+
self.guide_folder = SpaceToDepth(guide_fold)
|
562 |
+
|
563 |
+
def forward(self, x, timesteps, guide=None, **kwargs):
|
564 |
+
guide = self.guide_folder(guide)
|
565 |
+
x = th.cat([x, guide], dim=1)
|
566 |
+
return super().forward(x, timesteps, **kwargs)
|
567 |
+
|
568 |
+
def get_feature_vectors(self, x, timesteps, guide=None, **kwargs):
|
569 |
+
guide = self.guide_folder(guide)
|
570 |
+
x = th.cat([x, guide], dim=1)
|
571 |
+
return super().get_feature_vectors(x, timesteps, **kwargs)
|
572 |
+
|
573 |
+
|
574 |
+
class PixelGuideSuperResModel(PixelGuideModel):
|
575 |
+
"""
|
576 |
+
A PixelGuideModel that performs super-resolution.
|
577 |
+
|
578 |
+
Expects an extra kwarg `low_res` to condition on a low-resolution image.
|
579 |
+
"""
|
580 |
+
|
581 |
+
def __init__(self, in_channels, *args, **kwargs):
|
582 |
+
super().__init__(in_channels * 2, *args, **kwargs)
|
583 |
+
|
584 |
+
def forward(self, x, timesteps, low_res=None, **kwargs):
|
585 |
+
_, _, new_height, new_width = x.shape
|
586 |
+
upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
|
587 |
+
x = th.cat([x, upsampled], dim=1)
|
588 |
+
return super().forward(x, timesteps, **kwargs)
|
589 |
+
|
590 |
+
def get_feature_vectors(self, x, timesteps, low_res=None, **kwargs):
|
591 |
+
_, new_height, new_width, _ = x.shape
|
592 |
+
upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
|
593 |
+
x = th.cat([x, upsampled], dim=1)
|
594 |
+
return super().get_feature_vectors(x, timesteps, **kwargs)
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
blobfile
|
2 |
+
mpi4py
|
3 |
+
gradio==3.0.5
|
4 |
+
urllib3==1.24.3
|
5 |
+
torch
|
6 |
+
torchvision
|
scripts/cascaded_pixel_guide_sample.py
ADDED
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Generate a large batch of samples from a super resolution model, given a batch
|
3 |
+
of samples from a regular model from image_sample.py.
|
4 |
+
"""
|
5 |
+
|
6 |
+
import argparse
|
7 |
+
import os
|
8 |
+
|
9 |
+
import blobfile as bf
|
10 |
+
import numpy as np
|
11 |
+
import torch as th
|
12 |
+
import torch.distributed as dist
|
13 |
+
|
14 |
+
from torchvision import utils
|
15 |
+
from pixel_guide_diffusion import dist_util, logger
|
16 |
+
from pixel_guide_diffusion.image_datasets import load_data
|
17 |
+
from pixel_guide_diffusion.script_util import (
|
18 |
+
pg_model_and_diffusion_defaults,
|
19 |
+
pg_create_model_and_diffusion,
|
20 |
+
pgsr_model_and_diffusion_defaults,
|
21 |
+
pgsr_create_model_and_diffusion,
|
22 |
+
args_to_dict,
|
23 |
+
add_dict_to_argparser,
|
24 |
+
)
|
25 |
+
|
26 |
+
|
27 |
+
def main():
|
28 |
+
args = create_argparser().parse_args()
|
29 |
+
|
30 |
+
dist_util.setup_dist()
|
31 |
+
logger.configure()
|
32 |
+
|
33 |
+
logger.log("creating model...")
|
34 |
+
model, diffusion = pg_create_model_and_diffusion(
|
35 |
+
**args_to_dict(args, pg_model_and_diffusion_defaults().keys())
|
36 |
+
)
|
37 |
+
model.load_state_dict(
|
38 |
+
dist_util.load_state_dict(args.model_path, map_location="cpu")
|
39 |
+
)
|
40 |
+
model.to(dist_util.dev())
|
41 |
+
model.eval()
|
42 |
+
|
43 |
+
logger.log("creating model2...")
|
44 |
+
args.num_channels = args.num_channels2
|
45 |
+
args.use_attention = args.use_attention2
|
46 |
+
model2, diffusion2 = pgsr_create_model_and_diffusion(
|
47 |
+
**args_to_dict(args, pgsr_model_and_diffusion_defaults().keys())
|
48 |
+
)
|
49 |
+
model2.load_state_dict(
|
50 |
+
dist_util.load_state_dict(args.model_path2, map_location="cpu")
|
51 |
+
)
|
52 |
+
model2.to(dist_util.dev())
|
53 |
+
model2.eval()
|
54 |
+
|
55 |
+
logger.log("creating data loader...")
|
56 |
+
data = load_data(
|
57 |
+
data_dir=args.data_dir,
|
58 |
+
batch_size=args.batch_size,
|
59 |
+
image_size=args.large_size,
|
60 |
+
class_cond=args.class_cond,
|
61 |
+
guide_dir=args.guide_dir,
|
62 |
+
guide_size=args.guide_size,
|
63 |
+
deterministic=True,
|
64 |
+
)
|
65 |
+
|
66 |
+
if args.seed > -1:
|
67 |
+
th.manual_seed(args.seed)
|
68 |
+
|
69 |
+
logger.log("creating samples...")
|
70 |
+
os.makedirs('sample', exist_ok=True)
|
71 |
+
i = 0
|
72 |
+
while i * args.batch_size < args.num_samples:
|
73 |
+
if dist.get_rank() == 0:
|
74 |
+
target, model_kwargs = next(data)
|
75 |
+
target = target.to(dist_util.dev())
|
76 |
+
model_kwargs = {k: v.to(dist_util.dev()) for k, v in model_kwargs.items()}
|
77 |
+
|
78 |
+
with th.no_grad():
|
79 |
+
sample_fn = (
|
80 |
+
diffusion.p_sample_loop if not args.use_ddim else diffusion.ddim_sample_loop
|
81 |
+
)
|
82 |
+
sample = sample_fn(
|
83 |
+
model,
|
84 |
+
(args.batch_size, 3, args.image_size, args.image_size),
|
85 |
+
clip_denoised=args.clip_denoised,
|
86 |
+
model_kwargs=model_kwargs,
|
87 |
+
)
|
88 |
+
|
89 |
+
model_kwargs["low_res"] = sample
|
90 |
+
sample_fn2 = (
|
91 |
+
diffusion2.p_sample_loop if not args.use_ddim else diffusion2.ddim_sample_loop
|
92 |
+
)
|
93 |
+
sample2 = sample_fn2(
|
94 |
+
model2,
|
95 |
+
(args.batch_size, 3, args.large_size, args.large_size),
|
96 |
+
clip_denoised=args.clip_denoised,
|
97 |
+
model_kwargs=model_kwargs,
|
98 |
+
)
|
99 |
+
|
100 |
+
guide = model_kwargs["guide"]
|
101 |
+
h, w = guide.shape[2:]
|
102 |
+
guide = guide.clamp(-1,1).repeat(1,3,1,1)
|
103 |
+
sample = th.nn.functional.interpolate(sample.clamp(-1,1), size=(h, w))
|
104 |
+
sample2 = th.nn.functional.interpolate(sample2.clamp(-1,1), size=(h, w))
|
105 |
+
target = th.nn.functional.interpolate(target.clamp(-1,1), size=(h, w))
|
106 |
+
|
107 |
+
# images = th.cat([guide, sample, sample2, target], 0)
|
108 |
+
images = th.cat([guide, sample2, target], 0)
|
109 |
+
utils.save_image(
|
110 |
+
images,
|
111 |
+
f"sample/{str(i).zfill(6)}.png",
|
112 |
+
nrow=args.batch_size,
|
113 |
+
normalize=True,
|
114 |
+
range=(-1, 1),
|
115 |
+
)
|
116 |
+
|
117 |
+
i += 1
|
118 |
+
logger.log(f"created {i * args.batch_size} samples")
|
119 |
+
|
120 |
+
logger.log("sampling complete")
|
121 |
+
|
122 |
+
|
123 |
+
def create_argparser():
|
124 |
+
defaults = dict(
|
125 |
+
data_dir="",
|
126 |
+
guide_dir="",
|
127 |
+
clip_denoised=True,
|
128 |
+
num_samples=100,
|
129 |
+
batch_size=4,
|
130 |
+
use_ddim=False,
|
131 |
+
base_samples="",
|
132 |
+
model_path="",
|
133 |
+
seed=-1,
|
134 |
+
)
|
135 |
+
defaults.update(pg_model_and_diffusion_defaults())
|
136 |
+
defaults.update(pgsr_model_and_diffusion_defaults())
|
137 |
+
defaults.update(dict(
|
138 |
+
num_channels2=128,
|
139 |
+
use_attention2=True,
|
140 |
+
model_path2="",
|
141 |
+
))
|
142 |
+
parser = argparse.ArgumentParser()
|
143 |
+
add_dict_to_argparser(parser, defaults)
|
144 |
+
return parser
|
145 |
+
|
146 |
+
|
147 |
+
if __name__ == "__main__":
|
148 |
+
main()
|
scripts/image_nll.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Approximate the bits/dimension for an image model.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import argparse
|
6 |
+
import os
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
import torch.distributed as dist
|
10 |
+
|
11 |
+
from pixel_guide_diffusion import dist_util, logger
|
12 |
+
from pixel_guide_diffusion.image_datasets import load_data
|
13 |
+
from pixel_guide_diffusion.script_util import (
|
14 |
+
model_and_diffusion_defaults,
|
15 |
+
create_model_and_diffusion,
|
16 |
+
add_dict_to_argparser,
|
17 |
+
args_to_dict,
|
18 |
+
)
|
19 |
+
|
20 |
+
|
21 |
+
def main():
|
22 |
+
args = create_argparser().parse_args()
|
23 |
+
|
24 |
+
dist_util.setup_dist()
|
25 |
+
logger.configure()
|
26 |
+
|
27 |
+
logger.log("creating model and diffusion...")
|
28 |
+
model, diffusion = create_model_and_diffusion(
|
29 |
+
**args_to_dict(args, model_and_diffusion_defaults().keys())
|
30 |
+
)
|
31 |
+
model.load_state_dict(
|
32 |
+
dist_util.load_state_dict(args.model_path, map_location="cpu")
|
33 |
+
)
|
34 |
+
model.to(dist_util.dev())
|
35 |
+
model.eval()
|
36 |
+
|
37 |
+
logger.log("creating data loader...")
|
38 |
+
data = load_data(
|
39 |
+
data_dir=args.data_dir,
|
40 |
+
batch_size=args.batch_size,
|
41 |
+
image_size=args.image_size,
|
42 |
+
class_cond=args.class_cond,
|
43 |
+
deterministic=True,
|
44 |
+
)
|
45 |
+
|
46 |
+
logger.log("evaluating...")
|
47 |
+
run_bpd_evaluation(model, diffusion, data, args.num_samples, args.clip_denoised)
|
48 |
+
|
49 |
+
|
50 |
+
def run_bpd_evaluation(model, diffusion, data, num_samples, clip_denoised):
|
51 |
+
all_bpd = []
|
52 |
+
all_metrics = {"vb": [], "mse": [], "xstart_mse": []}
|
53 |
+
num_complete = 0
|
54 |
+
while num_complete < num_samples:
|
55 |
+
batch, model_kwargs = next(data)
|
56 |
+
batch = batch.to(dist_util.dev())
|
57 |
+
model_kwargs = {k: v.to(dist_util.dev()) for k, v in model_kwargs.items()}
|
58 |
+
minibatch_metrics = diffusion.calc_bpd_loop(
|
59 |
+
model, batch, clip_denoised=clip_denoised, model_kwargs=model_kwargs
|
60 |
+
)
|
61 |
+
|
62 |
+
for key, term_list in all_metrics.items():
|
63 |
+
terms = minibatch_metrics[key].mean(dim=0) / dist.get_world_size()
|
64 |
+
dist.all_reduce(terms)
|
65 |
+
term_list.append(terms.detach().cpu().numpy())
|
66 |
+
|
67 |
+
total_bpd = minibatch_metrics["total_bpd"]
|
68 |
+
total_bpd = total_bpd.mean() / dist.get_world_size()
|
69 |
+
dist.all_reduce(total_bpd)
|
70 |
+
all_bpd.append(total_bpd.item())
|
71 |
+
num_complete += dist.get_world_size() * batch.shape[0]
|
72 |
+
|
73 |
+
logger.log(f"done {num_complete} samples: bpd={np.mean(all_bpd)}")
|
74 |
+
|
75 |
+
if dist.get_rank() == 0:
|
76 |
+
for name, terms in all_metrics.items():
|
77 |
+
out_path = os.path.join(logger.get_dir(), f"{name}_terms.npz")
|
78 |
+
logger.log(f"saving {name} terms to {out_path}")
|
79 |
+
np.savez(out_path, np.mean(np.stack(terms), axis=0))
|
80 |
+
|
81 |
+
dist.barrier()
|
82 |
+
logger.log("evaluation complete")
|
83 |
+
|
84 |
+
|
85 |
+
def create_argparser():
|
86 |
+
defaults = dict(
|
87 |
+
data_dir="", clip_denoised=True, num_samples=1000, batch_size=1, model_path=""
|
88 |
+
)
|
89 |
+
defaults.update(model_and_diffusion_defaults())
|
90 |
+
parser = argparse.ArgumentParser()
|
91 |
+
add_dict_to_argparser(parser, defaults)
|
92 |
+
return parser
|
93 |
+
|
94 |
+
|
95 |
+
if __name__ == "__main__":
|
96 |
+
main()
|
scripts/image_sample.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Generate a large batch of image samples from a model and save them as a large
|
3 |
+
numpy array. This can be used to produce samples for FID evaluation.
|
4 |
+
"""
|
5 |
+
|
6 |
+
import argparse
|
7 |
+
import os
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
import torch as th
|
11 |
+
import torch.distributed as dist
|
12 |
+
|
13 |
+
from pixel_guide_diffusion import dist_util, logger
|
14 |
+
from pixel_guide_diffusion.script_util import (
|
15 |
+
NUM_CLASSES,
|
16 |
+
model_and_diffusion_defaults,
|
17 |
+
create_model_and_diffusion,
|
18 |
+
add_dict_to_argparser,
|
19 |
+
args_to_dict,
|
20 |
+
)
|
21 |
+
|
22 |
+
|
23 |
+
def main():
|
24 |
+
args = create_argparser().parse_args()
|
25 |
+
|
26 |
+
dist_util.setup_dist()
|
27 |
+
logger.configure()
|
28 |
+
|
29 |
+
logger.log("creating model and diffusion...")
|
30 |
+
model, diffusion = create_model_and_diffusion(
|
31 |
+
**args_to_dict(args, model_and_diffusion_defaults().keys())
|
32 |
+
)
|
33 |
+
model.load_state_dict(
|
34 |
+
dist_util.load_state_dict(args.model_path, map_location="cpu")
|
35 |
+
)
|
36 |
+
model.to(dist_util.dev())
|
37 |
+
model.eval()
|
38 |
+
|
39 |
+
logger.log("sampling...")
|
40 |
+
all_images = []
|
41 |
+
all_labels = []
|
42 |
+
while len(all_images) * args.batch_size < args.num_samples:
|
43 |
+
model_kwargs = {}
|
44 |
+
if args.class_cond:
|
45 |
+
classes = th.randint(
|
46 |
+
low=0, high=NUM_CLASSES, size=(args.batch_size,), device=dist_util.dev()
|
47 |
+
)
|
48 |
+
model_kwargs["y"] = classes
|
49 |
+
sample_fn = (
|
50 |
+
diffusion.p_sample_loop if not args.use_ddim else diffusion.ddim_sample_loop
|
51 |
+
)
|
52 |
+
sample = sample_fn(
|
53 |
+
model,
|
54 |
+
(args.batch_size, 3, args.image_size, args.image_size),
|
55 |
+
clip_denoised=args.clip_denoised,
|
56 |
+
model_kwargs=model_kwargs,
|
57 |
+
)
|
58 |
+
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
|
59 |
+
sample = sample.permute(0, 2, 3, 1)
|
60 |
+
sample = sample.contiguous()
|
61 |
+
|
62 |
+
gathered_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())]
|
63 |
+
dist.all_gather(gathered_samples, sample) # gather not supported with NCCL
|
64 |
+
all_images.extend([sample.cpu().numpy() for sample in gathered_samples])
|
65 |
+
if args.class_cond:
|
66 |
+
gathered_labels = [
|
67 |
+
th.zeros_like(classes) for _ in range(dist.get_world_size())
|
68 |
+
]
|
69 |
+
dist.all_gather(gathered_labels, classes)
|
70 |
+
all_labels.extend([labels.cpu().numpy() for labels in gathered_labels])
|
71 |
+
logger.log(f"created {len(all_images) * args.batch_size} samples")
|
72 |
+
|
73 |
+
arr = np.concatenate(all_images, axis=0)
|
74 |
+
arr = arr[: args.num_samples]
|
75 |
+
if args.class_cond:
|
76 |
+
label_arr = np.concatenate(all_labels, axis=0)
|
77 |
+
label_arr = label_arr[: args.num_samples]
|
78 |
+
if dist.get_rank() == 0:
|
79 |
+
shape_str = "x".join([str(x) for x in arr.shape])
|
80 |
+
out_path = os.path.join(logger.get_dir(), f"samples_{shape_str}.npz")
|
81 |
+
logger.log(f"saving to {out_path}")
|
82 |
+
if args.class_cond:
|
83 |
+
np.savez(out_path, arr, label_arr)
|
84 |
+
else:
|
85 |
+
np.savez(out_path, arr)
|
86 |
+
|
87 |
+
dist.barrier()
|
88 |
+
logger.log("sampling complete")
|
89 |
+
|
90 |
+
|
91 |
+
def create_argparser():
|
92 |
+
defaults = dict(
|
93 |
+
clip_denoised=True,
|
94 |
+
num_samples=10000,
|
95 |
+
batch_size=16,
|
96 |
+
use_ddim=False,
|
97 |
+
model_path="",
|
98 |
+
)
|
99 |
+
defaults.update(model_and_diffusion_defaults())
|
100 |
+
parser = argparse.ArgumentParser()
|
101 |
+
add_dict_to_argparser(parser, defaults)
|
102 |
+
return parser
|
103 |
+
|
104 |
+
|
105 |
+
if __name__ == "__main__":
|
106 |
+
main()
|
scripts/image_train.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Train a diffusion model on images.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import argparse
|
6 |
+
|
7 |
+
from pixel_guide_diffusion import dist_util, logger
|
8 |
+
from pixel_guide_diffusion.image_datasets import load_data
|
9 |
+
from pixel_guide_diffusion.resample import create_named_schedule_sampler
|
10 |
+
from pixel_guide_diffusion.script_util import (
|
11 |
+
model_and_diffusion_defaults,
|
12 |
+
create_model_and_diffusion,
|
13 |
+
args_to_dict,
|
14 |
+
add_dict_to_argparser,
|
15 |
+
)
|
16 |
+
from pixel_guide_diffusion.train_util import TrainLoop
|
17 |
+
|
18 |
+
|
19 |
+
def main():
|
20 |
+
args = create_argparser().parse_args()
|
21 |
+
|
22 |
+
dist_util.setup_dist()
|
23 |
+
logger.configure()
|
24 |
+
|
25 |
+
logger.log("creating model and diffusion...")
|
26 |
+
model, diffusion = create_model_and_diffusion(
|
27 |
+
**args_to_dict(args, model_and_diffusion_defaults().keys())
|
28 |
+
)
|
29 |
+
model.to(dist_util.dev())
|
30 |
+
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion)
|
31 |
+
|
32 |
+
logger.log("creating data loader...")
|
33 |
+
data = load_data(
|
34 |
+
data_dir=args.data_dir,
|
35 |
+
batch_size=args.batch_size,
|
36 |
+
image_size=args.image_size,
|
37 |
+
class_cond=args.class_cond,
|
38 |
+
)
|
39 |
+
|
40 |
+
logger.log("training...")
|
41 |
+
TrainLoop(
|
42 |
+
model=model,
|
43 |
+
diffusion=diffusion,
|
44 |
+
data=data,
|
45 |
+
batch_size=args.batch_size,
|
46 |
+
microbatch=args.microbatch,
|
47 |
+
lr=args.lr,
|
48 |
+
ema_rate=args.ema_rate,
|
49 |
+
log_interval=args.log_interval,
|
50 |
+
save_interval=args.save_interval,
|
51 |
+
resume_checkpoint=args.resume_checkpoint,
|
52 |
+
use_fp16=args.use_fp16,
|
53 |
+
fp16_scale_growth=args.fp16_scale_growth,
|
54 |
+
schedule_sampler=schedule_sampler,
|
55 |
+
weight_decay=args.weight_decay,
|
56 |
+
lr_anneal_steps=args.lr_anneal_steps,
|
57 |
+
).run_loop()
|
58 |
+
|
59 |
+
|
60 |
+
def create_argparser():
|
61 |
+
defaults = dict(
|
62 |
+
data_dir="",
|
63 |
+
schedule_sampler="uniform",
|
64 |
+
lr=1e-4,
|
65 |
+
weight_decay=0.0,
|
66 |
+
lr_anneal_steps=0,
|
67 |
+
batch_size=1,
|
68 |
+
microbatch=-1, # -1 disables microbatches
|
69 |
+
ema_rate="0.9999", # comma-separated list of EMA values
|
70 |
+
log_interval=10,
|
71 |
+
save_interval=10000,
|
72 |
+
resume_checkpoint="",
|
73 |
+
use_fp16=False,
|
74 |
+
fp16_scale_growth=1e-3,
|
75 |
+
)
|
76 |
+
defaults.update(model_and_diffusion_defaults())
|
77 |
+
parser = argparse.ArgumentParser()
|
78 |
+
add_dict_to_argparser(parser, defaults)
|
79 |
+
return parser
|
80 |
+
|
81 |
+
|
82 |
+
if __name__ == "__main__":
|
83 |
+
main()
|
scripts/pixel_guide_sample.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Generate a large batch of samples from a super resolution model, given a batch
|
3 |
+
of samples from a regular model from image_sample.py.
|
4 |
+
"""
|
5 |
+
|
6 |
+
import argparse
|
7 |
+
import os
|
8 |
+
|
9 |
+
import blobfile as bf
|
10 |
+
import numpy as np
|
11 |
+
import torch as th
|
12 |
+
import torch.distributed as dist
|
13 |
+
|
14 |
+
from torchvision import utils
|
15 |
+
from pixel_guide_diffusion import dist_util, logger
|
16 |
+
from pixel_guide_diffusion.image_datasets import load_data
|
17 |
+
from pixel_guide_diffusion.script_util import (
|
18 |
+
pg_model_and_diffusion_defaults,
|
19 |
+
pg_create_model_and_diffusion,
|
20 |
+
args_to_dict,
|
21 |
+
add_dict_to_argparser,
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
def main():
|
26 |
+
args = create_argparser().parse_args()
|
27 |
+
|
28 |
+
dist_util.setup_dist()
|
29 |
+
logger.configure()
|
30 |
+
|
31 |
+
logger.log("creating model...")
|
32 |
+
model, diffusion = pg_create_model_and_diffusion(
|
33 |
+
**args_to_dict(args, pg_model_and_diffusion_defaults().keys())
|
34 |
+
)
|
35 |
+
model.load_state_dict(
|
36 |
+
dist_util.load_state_dict(args.model_path, map_location="cpu")
|
37 |
+
)
|
38 |
+
model.to(dist_util.dev())
|
39 |
+
model.eval()
|
40 |
+
|
41 |
+
logger.log("creating data loader...")
|
42 |
+
data = load_data(
|
43 |
+
data_dir=args.data_dir,
|
44 |
+
batch_size=args.batch_size,
|
45 |
+
image_size=args.image_size,
|
46 |
+
class_cond=args.class_cond,
|
47 |
+
guide_dir=args.guide_dir,
|
48 |
+
guide_size=args.guide_size,
|
49 |
+
deterministic=True,
|
50 |
+
)
|
51 |
+
|
52 |
+
logger.log("creating samples...")
|
53 |
+
os.makedirs('sample', exist_ok=True)
|
54 |
+
i = 0
|
55 |
+
while i * args.batch_size < args.num_samples:
|
56 |
+
if dist.get_rank() == 0:
|
57 |
+
target, model_kwargs = next(data)
|
58 |
+
target = target.to(dist_util.dev())
|
59 |
+
model_kwargs = {k: v.to(dist_util.dev()) for k, v in model_kwargs.items()}
|
60 |
+
|
61 |
+
with th.no_grad():
|
62 |
+
sample_fn = (
|
63 |
+
diffusion.p_sample_loop if not args.use_ddim else diffusion.ddim_sample_loop
|
64 |
+
)
|
65 |
+
sample = sample_fn(
|
66 |
+
model,
|
67 |
+
(args.batch_size, 3, args.image_size, args.image_size),
|
68 |
+
clip_denoised=args.clip_denoised,
|
69 |
+
model_kwargs=model_kwargs,
|
70 |
+
)
|
71 |
+
|
72 |
+
guide = model_kwargs["guide"]
|
73 |
+
h, w = guide.shape[2:]
|
74 |
+
guide = guide.clamp(-1,1).repeat(1,3,1,1)
|
75 |
+
sample = th.nn.functional.interpolate(sample.clamp(-1,1), size=(h, w))
|
76 |
+
target = th.nn.functional.interpolate(target.clamp(-1,1), size=(h, w))
|
77 |
+
|
78 |
+
images = th.cat([guide, sample, target], 0)
|
79 |
+
utils.save_image(
|
80 |
+
images,
|
81 |
+
f"sample/{str(i).zfill(6)}.png",
|
82 |
+
nrow=args.batch_size,
|
83 |
+
normalize=True,
|
84 |
+
range=(-1, 1),
|
85 |
+
)
|
86 |
+
|
87 |
+
i += 1
|
88 |
+
logger.log(f"created {i * args.batch_size} samples")
|
89 |
+
|
90 |
+
logger.log("sampling complete")
|
91 |
+
|
92 |
+
|
93 |
+
def create_argparser():
|
94 |
+
defaults = dict(
|
95 |
+
data_dir="",
|
96 |
+
guide_dir="",
|
97 |
+
clip_denoised=True,
|
98 |
+
num_samples=100,
|
99 |
+
batch_size=4,
|
100 |
+
use_ddim=False,
|
101 |
+
base_samples="",
|
102 |
+
model_path="",
|
103 |
+
)
|
104 |
+
defaults.update(pg_model_and_diffusion_defaults())
|
105 |
+
parser = argparse.ArgumentParser()
|
106 |
+
add_dict_to_argparser(parser, defaults)
|
107 |
+
return parser
|
108 |
+
|
109 |
+
|
110 |
+
if __name__ == "__main__":
|
111 |
+
main()
|
scripts/pixel_guide_super_res_sample.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Generate a large batch of samples from a super resolution model, given a batch
|
3 |
+
of samples from a regular model from image_sample.py.
|
4 |
+
"""
|
5 |
+
|
6 |
+
import argparse
|
7 |
+
import os
|
8 |
+
|
9 |
+
import blobfile as bf
|
10 |
+
import numpy as np
|
11 |
+
import torch as th
|
12 |
+
import torch.distributed as dist
|
13 |
+
|
14 |
+
from torchvision import utils
|
15 |
+
from pixel_guide_diffusion import dist_util, logger
|
16 |
+
from pixel_guide_diffusion.image_datasets import load_data
|
17 |
+
from pixel_guide_diffusion.script_util import (
|
18 |
+
pgsr_model_and_diffusion_defaults,
|
19 |
+
pgsr_create_model_and_diffusion,
|
20 |
+
args_to_dict,
|
21 |
+
add_dict_to_argparser,
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
def main():
|
26 |
+
args = create_argparser().parse_args()
|
27 |
+
|
28 |
+
dist_util.setup_dist()
|
29 |
+
logger.configure()
|
30 |
+
|
31 |
+
logger.log("creating model...")
|
32 |
+
model, diffusion = pgsr_create_model_and_diffusion(
|
33 |
+
**args_to_dict(args, pgsr_model_and_diffusion_defaults().keys())
|
34 |
+
)
|
35 |
+
model.load_state_dict(
|
36 |
+
dist_util.load_state_dict(args.model_path, map_location="cpu")
|
37 |
+
)
|
38 |
+
model.to(dist_util.dev())
|
39 |
+
model.eval()
|
40 |
+
|
41 |
+
logger.log("creating data loader...")
|
42 |
+
data = load_superres_data(
|
43 |
+
args.data_dir,
|
44 |
+
args.batch_size,
|
45 |
+
large_size=args.large_size,
|
46 |
+
small_size=args.small_size,
|
47 |
+
class_cond=args.class_cond,
|
48 |
+
guide_dir=args.guide_dir,
|
49 |
+
guide_size=args.guide_size,
|
50 |
+
crop_size=args.crop_size,
|
51 |
+
deterministic=True,
|
52 |
+
)
|
53 |
+
|
54 |
+
logger.log("creating samples...")
|
55 |
+
os.makedirs('sample', exist_ok=True)
|
56 |
+
i = 0
|
57 |
+
while i * args.batch_size < args.num_samples:
|
58 |
+
if dist.get_rank() == 0:
|
59 |
+
target, model_kwargs = next(data)
|
60 |
+
target = target.to(dist_util.dev())
|
61 |
+
model_kwargs = {k: v.to(dist_util.dev()) for k, v in model_kwargs.items()}
|
62 |
+
model_kwargs["low_res"] = th.nn.functional.interpolate(target, args.small_size, mode="area").detach()
|
63 |
+
|
64 |
+
with th.no_grad():
|
65 |
+
sample_fn = (
|
66 |
+
diffusion.p_sample_loop if not args.use_ddim else diffusion.ddim_sample_loop
|
67 |
+
)
|
68 |
+
sample = sample_fn(
|
69 |
+
model,
|
70 |
+
(args.batch_size, 3, args.crop_size, args.crop_size),
|
71 |
+
clip_denoised=args.clip_denoised,
|
72 |
+
model_kwargs=model_kwargs,
|
73 |
+
)
|
74 |
+
|
75 |
+
guide = model_kwargs["guide"]
|
76 |
+
low_res = model_kwargs["low_res"]
|
77 |
+
h, w = guide.shape[2:]
|
78 |
+
guide = guide.clamp(-1,1).repeat(1,3,1,1)
|
79 |
+
low_res = th.nn.functional.interpolate(low_res.clamp(-1,1), size=(h, w))
|
80 |
+
sample = th.nn.functional.interpolate(sample.clamp(-1,1), size=(h, w))
|
81 |
+
target = th.nn.functional.interpolate(target.clamp(-1,1), size=(h, w))
|
82 |
+
|
83 |
+
images = th.cat([guide, low_res, sample, target], 0)
|
84 |
+
utils.save_image(
|
85 |
+
images,
|
86 |
+
f"sample/{str(i).zfill(6)}.png",
|
87 |
+
nrow=args.batch_size,
|
88 |
+
normalize=True,
|
89 |
+
range=(-1, 1),
|
90 |
+
)
|
91 |
+
|
92 |
+
i += 1
|
93 |
+
logger.log(f"created {i * args.batch_size} samples")
|
94 |
+
|
95 |
+
logger.log("sampling complete")
|
96 |
+
|
97 |
+
|
98 |
+
def load_superres_data(data_dir, batch_size, large_size, small_size, class_cond=False, guide_dir='', guide_size=0, crop_size=0, deterministic=False):
|
99 |
+
data = load_data(
|
100 |
+
data_dir=data_dir,
|
101 |
+
batch_size=batch_size,
|
102 |
+
image_size=large_size,
|
103 |
+
class_cond=class_cond,
|
104 |
+
guide_dir=guide_dir,
|
105 |
+
guide_size=guide_size,
|
106 |
+
crop_size=crop_size,
|
107 |
+
deterministic=deterministic,
|
108 |
+
)
|
109 |
+
for large_batch, model_kwargs in data:
|
110 |
+
model_kwargs["low_res"] = th.nn.functional.interpolate(large_batch, scale_factor=small_size/large_size, mode="area").detach()
|
111 |
+
yield large_batch, model_kwargs
|
112 |
+
|
113 |
+
|
114 |
+
def create_argparser():
|
115 |
+
defaults = dict(
|
116 |
+
data_dir="",
|
117 |
+
guide_dir="",
|
118 |
+
crop_size=128,
|
119 |
+
clip_denoised=True,
|
120 |
+
num_samples=100,
|
121 |
+
batch_size=4,
|
122 |
+
use_ddim=False,
|
123 |
+
base_samples="",
|
124 |
+
model_path="",
|
125 |
+
)
|
126 |
+
defaults.update(pgsr_model_and_diffusion_defaults())
|
127 |
+
parser = argparse.ArgumentParser()
|
128 |
+
add_dict_to_argparser(parser, defaults)
|
129 |
+
return parser
|
130 |
+
|
131 |
+
|
132 |
+
if __name__ == "__main__":
|
133 |
+
main()
|
scripts/pixel_guide_super_res_train.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Train a super-resolution model.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import argparse
|
6 |
+
|
7 |
+
import torch.nn.functional as F
|
8 |
+
|
9 |
+
from pixel_guide_diffusion import dist_util, logger
|
10 |
+
from pixel_guide_diffusion.image_datasets import load_data
|
11 |
+
from pixel_guide_diffusion.resample import create_named_schedule_sampler
|
12 |
+
from pixel_guide_diffusion.script_util import (
|
13 |
+
pgsr_model_and_diffusion_defaults,
|
14 |
+
pgsr_create_model_and_diffusion,
|
15 |
+
args_to_dict,
|
16 |
+
add_dict_to_argparser,
|
17 |
+
)
|
18 |
+
from pixel_guide_diffusion.train_util import TrainLoop
|
19 |
+
|
20 |
+
|
21 |
+
def main():
|
22 |
+
args = create_argparser().parse_args()
|
23 |
+
|
24 |
+
dist_util.setup_dist()
|
25 |
+
logger.configure()
|
26 |
+
|
27 |
+
logger.log("creating model...")
|
28 |
+
model, diffusion = pgsr_create_model_and_diffusion(
|
29 |
+
**args_to_dict(args, pgsr_model_and_diffusion_defaults().keys())
|
30 |
+
)
|
31 |
+
model.to(dist_util.dev())
|
32 |
+
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion)
|
33 |
+
|
34 |
+
logger.log("creating data loader...")
|
35 |
+
data = load_superres_data(
|
36 |
+
args.data_dir,
|
37 |
+
args.batch_size,
|
38 |
+
large_size=args.large_size,
|
39 |
+
small_size=args.small_size,
|
40 |
+
class_cond=args.class_cond,
|
41 |
+
guide_dir=args.guide_dir,
|
42 |
+
guide_size=args.guide_size,
|
43 |
+
crop_size=args.crop_size,
|
44 |
+
deterministic=True,
|
45 |
+
)
|
46 |
+
|
47 |
+
logger.log("training...")
|
48 |
+
TrainLoop(
|
49 |
+
model=model,
|
50 |
+
diffusion=diffusion,
|
51 |
+
data=data,
|
52 |
+
batch_size=args.batch_size,
|
53 |
+
microbatch=args.microbatch,
|
54 |
+
lr=args.lr,
|
55 |
+
ema_rate=args.ema_rate,
|
56 |
+
log_interval=args.log_interval,
|
57 |
+
save_interval=args.save_interval,
|
58 |
+
resume_checkpoint=args.resume_checkpoint,
|
59 |
+
use_fp16=args.use_fp16,
|
60 |
+
fp16_scale_growth=args.fp16_scale_growth,
|
61 |
+
schedule_sampler=schedule_sampler,
|
62 |
+
weight_decay=args.weight_decay,
|
63 |
+
lr_anneal_steps=args.lr_anneal_steps,
|
64 |
+
).run_loop()
|
65 |
+
|
66 |
+
|
67 |
+
def load_superres_data(data_dir, batch_size, large_size, small_size, class_cond=False, guide_dir='', guide_size=0, crop_size=0, deterministic=False):
|
68 |
+
data = load_data(
|
69 |
+
data_dir=data_dir,
|
70 |
+
batch_size=batch_size,
|
71 |
+
image_size=large_size,
|
72 |
+
class_cond=class_cond,
|
73 |
+
guide_dir=guide_dir,
|
74 |
+
guide_size=guide_size,
|
75 |
+
crop_size=crop_size,
|
76 |
+
deterministic=deterministic,
|
77 |
+
)
|
78 |
+
for large_batch, model_kwargs in data:
|
79 |
+
model_kwargs["low_res"] = F.interpolate(large_batch, scale_factor=small_size/large_size, mode="area")
|
80 |
+
yield large_batch, model_kwargs
|
81 |
+
|
82 |
+
|
83 |
+
def create_argparser():
|
84 |
+
defaults = dict(
|
85 |
+
data_dir="",
|
86 |
+
guide_dir="",
|
87 |
+
crop_size=32,
|
88 |
+
schedule_sampler="uniform",
|
89 |
+
lr=1e-4,
|
90 |
+
weight_decay=0.0,
|
91 |
+
lr_anneal_steps=0,
|
92 |
+
batch_size=1,
|
93 |
+
microbatch=-1,
|
94 |
+
ema_rate="0.9999",
|
95 |
+
log_interval=10,
|
96 |
+
save_interval=10000,
|
97 |
+
resume_checkpoint="",
|
98 |
+
use_fp16=False,
|
99 |
+
fp16_scale_growth=1e-3,
|
100 |
+
)
|
101 |
+
defaults.update(pgsr_model_and_diffusion_defaults())
|
102 |
+
parser = argparse.ArgumentParser()
|
103 |
+
add_dict_to_argparser(parser, defaults)
|
104 |
+
return parser
|
105 |
+
|
106 |
+
|
107 |
+
if __name__ == "__main__":
|
108 |
+
main()
|
scripts/pixel_guide_train.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Train a super-resolution model.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import argparse
|
6 |
+
|
7 |
+
import torch.nn.functional as F
|
8 |
+
|
9 |
+
from pixel_guide_diffusion import dist_util, logger
|
10 |
+
from pixel_guide_diffusion.image_datasets import load_data
|
11 |
+
from pixel_guide_diffusion.resample import create_named_schedule_sampler
|
12 |
+
from pixel_guide_diffusion.script_util import (
|
13 |
+
pg_model_and_diffusion_defaults,
|
14 |
+
pg_create_model_and_diffusion,
|
15 |
+
args_to_dict,
|
16 |
+
add_dict_to_argparser,
|
17 |
+
)
|
18 |
+
from pixel_guide_diffusion.train_util import TrainLoop
|
19 |
+
|
20 |
+
|
21 |
+
def main():
|
22 |
+
args = create_argparser().parse_args()
|
23 |
+
|
24 |
+
dist_util.setup_dist()
|
25 |
+
logger.configure()
|
26 |
+
|
27 |
+
logger.log("creating model...")
|
28 |
+
model, diffusion = pg_create_model_and_diffusion(
|
29 |
+
**args_to_dict(args, pg_model_and_diffusion_defaults().keys())
|
30 |
+
)
|
31 |
+
model.to(dist_util.dev())
|
32 |
+
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion)
|
33 |
+
|
34 |
+
logger.log("creating data loader...")
|
35 |
+
data = load_data(
|
36 |
+
data_dir=args.data_dir,
|
37 |
+
batch_size=args.batch_size,
|
38 |
+
image_size=args.image_size,
|
39 |
+
class_cond=args.class_cond,
|
40 |
+
guide_dir=args.guide_dir,
|
41 |
+
guide_size=args.guide_size,
|
42 |
+
deterministic=True,
|
43 |
+
)
|
44 |
+
|
45 |
+
logger.log("training...")
|
46 |
+
TrainLoop(
|
47 |
+
model=model,
|
48 |
+
diffusion=diffusion,
|
49 |
+
data=data,
|
50 |
+
batch_size=args.batch_size,
|
51 |
+
microbatch=args.microbatch,
|
52 |
+
lr=args.lr,
|
53 |
+
ema_rate=args.ema_rate,
|
54 |
+
log_interval=args.log_interval,
|
55 |
+
save_interval=args.save_interval,
|
56 |
+
resume_checkpoint=args.resume_checkpoint,
|
57 |
+
use_fp16=args.use_fp16,
|
58 |
+
fp16_scale_growth=args.fp16_scale_growth,
|
59 |
+
schedule_sampler=schedule_sampler,
|
60 |
+
weight_decay=args.weight_decay,
|
61 |
+
lr_anneal_steps=args.lr_anneal_steps,
|
62 |
+
).run_loop()
|
63 |
+
|
64 |
+
|
65 |
+
def create_argparser():
|
66 |
+
defaults = dict(
|
67 |
+
data_dir="",
|
68 |
+
guide_dir="",
|
69 |
+
schedule_sampler="uniform",
|
70 |
+
lr=1e-4,
|
71 |
+
weight_decay=0.0,
|
72 |
+
lr_anneal_steps=0,
|
73 |
+
batch_size=1,
|
74 |
+
microbatch=-1,
|
75 |
+
ema_rate="0.9999",
|
76 |
+
log_interval=10,
|
77 |
+
save_interval=10000,
|
78 |
+
resume_checkpoint="",
|
79 |
+
use_fp16=False,
|
80 |
+
fp16_scale_growth=1e-3,
|
81 |
+
)
|
82 |
+
defaults.update(pg_model_and_diffusion_defaults())
|
83 |
+
parser = argparse.ArgumentParser()
|
84 |
+
add_dict_to_argparser(parser, defaults)
|
85 |
+
return parser
|
86 |
+
|
87 |
+
|
88 |
+
if __name__ == "__main__":
|
89 |
+
main()
|
scripts/super_res_sample.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Generate a large batch of samples from a super resolution model, given a batch
|
3 |
+
of samples from a regular model from image_sample.py.
|
4 |
+
"""
|
5 |
+
|
6 |
+
import argparse
|
7 |
+
import os
|
8 |
+
|
9 |
+
import blobfile as bf
|
10 |
+
import numpy as np
|
11 |
+
import torch as th
|
12 |
+
import torch.distributed as dist
|
13 |
+
|
14 |
+
from pixel_guide_diffusion import dist_util, logger
|
15 |
+
from pixel_guide_diffusion.script_util import (
|
16 |
+
sr_model_and_diffusion_defaults,
|
17 |
+
sr_create_model_and_diffusion,
|
18 |
+
args_to_dict,
|
19 |
+
add_dict_to_argparser,
|
20 |
+
)
|
21 |
+
|
22 |
+
|
23 |
+
def main():
|
24 |
+
args = create_argparser().parse_args()
|
25 |
+
|
26 |
+
dist_util.setup_dist()
|
27 |
+
logger.configure()
|
28 |
+
|
29 |
+
logger.log("creating model...")
|
30 |
+
model, diffusion = sr_create_model_and_diffusion(
|
31 |
+
**args_to_dict(args, sr_model_and_diffusion_defaults().keys())
|
32 |
+
)
|
33 |
+
model.load_state_dict(
|
34 |
+
dist_util.load_state_dict(args.model_path, map_location="cpu")
|
35 |
+
)
|
36 |
+
model.to(dist_util.dev())
|
37 |
+
model.eval()
|
38 |
+
|
39 |
+
logger.log("loading data...")
|
40 |
+
data = load_data_for_worker(args.base_samples, args.batch_size, args.class_cond)
|
41 |
+
|
42 |
+
logger.log("creating samples...")
|
43 |
+
all_images = []
|
44 |
+
while len(all_images) * args.batch_size < args.num_samples:
|
45 |
+
model_kwargs = next(data)
|
46 |
+
model_kwargs = {k: v.to(dist_util.dev()) for k, v in model_kwargs.items()}
|
47 |
+
sample = diffusion.p_sample_loop(
|
48 |
+
model,
|
49 |
+
(args.batch_size, 3, args.large_size, args.large_size),
|
50 |
+
clip_denoised=args.clip_denoised,
|
51 |
+
model_kwargs=model_kwargs,
|
52 |
+
)
|
53 |
+
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
|
54 |
+
sample = sample.permute(0, 2, 3, 1)
|
55 |
+
sample = sample.contiguous()
|
56 |
+
|
57 |
+
all_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())]
|
58 |
+
dist.all_gather(all_samples, sample) # gather not supported with NCCL
|
59 |
+
for sample in all_samples:
|
60 |
+
all_images.append(sample.cpu().numpy())
|
61 |
+
logger.log(f"created {len(all_images) * args.batch_size} samples")
|
62 |
+
|
63 |
+
arr = np.concatenate(all_images, axis=0)
|
64 |
+
arr = arr[: args.num_samples]
|
65 |
+
if dist.get_rank() == 0:
|
66 |
+
shape_str = "x".join([str(x) for x in arr.shape])
|
67 |
+
out_path = os.path.join(logger.get_dir(), f"samples_{shape_str}.npz")
|
68 |
+
logger.log(f"saving to {out_path}")
|
69 |
+
np.savez(out_path, arr)
|
70 |
+
|
71 |
+
dist.barrier()
|
72 |
+
logger.log("sampling complete")
|
73 |
+
|
74 |
+
|
75 |
+
def load_data_for_worker(base_samples, batch_size, class_cond):
|
76 |
+
with bf.BlobFile(base_samples, "rb") as f:
|
77 |
+
obj = np.load(f)
|
78 |
+
image_arr = obj["arr_0"]
|
79 |
+
if class_cond:
|
80 |
+
label_arr = obj["arr_1"]
|
81 |
+
rank = dist.get_rank()
|
82 |
+
num_ranks = dist.get_world_size()
|
83 |
+
buffer = []
|
84 |
+
label_buffer = []
|
85 |
+
while True:
|
86 |
+
for i in range(rank, len(image_arr), num_ranks):
|
87 |
+
buffer.append(image_arr[i])
|
88 |
+
if class_cond:
|
89 |
+
label_buffer.append(label_arr[i])
|
90 |
+
if len(buffer) == batch_size:
|
91 |
+
batch = th.from_numpy(np.stack(buffer)).float()
|
92 |
+
batch = batch / 127.5 - 1.0
|
93 |
+
batch = batch.permute(0, 3, 1, 2)
|
94 |
+
res = dict(low_res=batch)
|
95 |
+
if class_cond:
|
96 |
+
res["y"] = th.from_numpy(np.stack(label_buffer))
|
97 |
+
yield res
|
98 |
+
buffer, label_buffer = [], []
|
99 |
+
|
100 |
+
|
101 |
+
def create_argparser():
|
102 |
+
defaults = dict(
|
103 |
+
clip_denoised=True,
|
104 |
+
num_samples=10000,
|
105 |
+
batch_size=16,
|
106 |
+
use_ddim=False,
|
107 |
+
base_samples="",
|
108 |
+
model_path="",
|
109 |
+
)
|
110 |
+
defaults.update(sr_model_and_diffusion_defaults())
|
111 |
+
parser = argparse.ArgumentParser()
|
112 |
+
add_dict_to_argparser(parser, defaults)
|
113 |
+
return parser
|
114 |
+
|
115 |
+
|
116 |
+
if __name__ == "__main__":
|
117 |
+
main()
|
scripts/super_res_train.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Train a super-resolution model.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import argparse
|
6 |
+
|
7 |
+
import torch.nn.functional as F
|
8 |
+
|
9 |
+
from pixel_guide_diffusion import dist_util, logger
|
10 |
+
from pixel_guide_diffusion.image_datasets import load_data
|
11 |
+
from pixel_guide_diffusion.resample import create_named_schedule_sampler
|
12 |
+
from pixel_guide_diffusion.script_util import (
|
13 |
+
sr_model_and_diffusion_defaults,
|
14 |
+
sr_create_model_and_diffusion,
|
15 |
+
args_to_dict,
|
16 |
+
add_dict_to_argparser,
|
17 |
+
)
|
18 |
+
from pixel_guide_diffusion.train_util import TrainLoop
|
19 |
+
|
20 |
+
|
21 |
+
def main():
|
22 |
+
args = create_argparser().parse_args()
|
23 |
+
|
24 |
+
dist_util.setup_dist()
|
25 |
+
logger.configure()
|
26 |
+
|
27 |
+
logger.log("creating model...")
|
28 |
+
model, diffusion = sr_create_model_and_diffusion(
|
29 |
+
**args_to_dict(args, sr_model_and_diffusion_defaults().keys())
|
30 |
+
)
|
31 |
+
model.to(dist_util.dev())
|
32 |
+
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion)
|
33 |
+
|
34 |
+
logger.log("creating data loader...")
|
35 |
+
data = load_superres_data(
|
36 |
+
args.data_dir,
|
37 |
+
args.batch_size,
|
38 |
+
large_size=args.large_size,
|
39 |
+
small_size=args.small_size,
|
40 |
+
class_cond=args.class_cond,
|
41 |
+
)
|
42 |
+
|
43 |
+
logger.log("training...")
|
44 |
+
TrainLoop(
|
45 |
+
model=model,
|
46 |
+
diffusion=diffusion,
|
47 |
+
data=data,
|
48 |
+
batch_size=args.batch_size,
|
49 |
+
microbatch=args.microbatch,
|
50 |
+
lr=args.lr,
|
51 |
+
ema_rate=args.ema_rate,
|
52 |
+
log_interval=args.log_interval,
|
53 |
+
save_interval=args.save_interval,
|
54 |
+
resume_checkpoint=args.resume_checkpoint,
|
55 |
+
use_fp16=args.use_fp16,
|
56 |
+
fp16_scale_growth=args.fp16_scale_growth,
|
57 |
+
schedule_sampler=schedule_sampler,
|
58 |
+
weight_decay=args.weight_decay,
|
59 |
+
lr_anneal_steps=args.lr_anneal_steps,
|
60 |
+
).run_loop()
|
61 |
+
|
62 |
+
|
63 |
+
def load_superres_data(data_dir, batch_size, large_size, small_size, class_cond=False):
|
64 |
+
data = load_data(
|
65 |
+
data_dir=data_dir,
|
66 |
+
batch_size=batch_size,
|
67 |
+
image_size=large_size,
|
68 |
+
class_cond=class_cond,
|
69 |
+
)
|
70 |
+
for large_batch, model_kwargs in data:
|
71 |
+
model_kwargs["low_res"] = F.interpolate(large_batch, small_size, mode="area")
|
72 |
+
yield large_batch, model_kwargs
|
73 |
+
|
74 |
+
|
75 |
+
def create_argparser():
|
76 |
+
defaults = dict(
|
77 |
+
data_dir="",
|
78 |
+
schedule_sampler="uniform",
|
79 |
+
lr=1e-4,
|
80 |
+
weight_decay=0.0,
|
81 |
+
lr_anneal_steps=0,
|
82 |
+
batch_size=1,
|
83 |
+
microbatch=-1,
|
84 |
+
ema_rate="0.9999",
|
85 |
+
log_interval=10,
|
86 |
+
save_interval=10000,
|
87 |
+
resume_checkpoint="",
|
88 |
+
use_fp16=False,
|
89 |
+
fp16_scale_growth=1e-3,
|
90 |
+
)
|
91 |
+
defaults.update(sr_model_and_diffusion_defaults())
|
92 |
+
parser = argparse.ArgumentParser()
|
93 |
+
add_dict_to_argparser(parser, defaults)
|
94 |
+
return parser
|
95 |
+
|
96 |
+
|
97 |
+
if __name__ == "__main__":
|
98 |
+
main()
|
setup.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from setuptools import setup
|
2 |
+
|
3 |
+
setup(
|
4 |
+
name="pixel-guide-diffusion",
|
5 |
+
py_modules=["pixel_guide_diffusion"],
|
6 |
+
install_requires=["blobfile>=1.0.5", "torch", "tqdm"],
|
7 |
+
)
|