qninhdt commited on
Commit
ec1cb04
1 Parent(s): 0e152ec

Upload 68 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +5 -0
  2. LICENSE +212 -0
  3. README.md +316 -0
  4. data/__init__.py +98 -0
  5. data/__pycache__/__init__.cpython-310.pyc +0 -0
  6. data/__pycache__/base_dataset.cpython-310.pyc +0 -0
  7. data/__pycache__/image_folder.cpython-310.pyc +0 -0
  8. data/__pycache__/unaligned_dataset.cpython-310.pyc +0 -0
  9. data/base_dataset.py +230 -0
  10. data/image_folder.py +66 -0
  11. data/single_dataset.py +40 -0
  12. data/singleimage_dataset.py +108 -0
  13. data/template_dataset.py +75 -0
  14. data/unaligned_dataset.py +78 -0
  15. docs/datasets.md +45 -0
  16. environment.yml +16 -0
  17. experiments/__init__.py +54 -0
  18. experiments/__main__.py +87 -0
  19. experiments/grumpifycat_launcher.py +28 -0
  20. experiments/placeholder_launcher.py +81 -0
  21. experiments/pretrained_launcher.py +61 -0
  22. experiments/singleimage_launcher.py +18 -0
  23. experiments/tmux_launcher.py +215 -0
  24. imgs/gif_cut.gif +3 -0
  25. imgs/grumpycat.jpg +0 -0
  26. imgs/horse2zebra_comparison.jpg +3 -0
  27. imgs/paris.jpg +0 -0
  28. imgs/patchnce.gif +3 -0
  29. imgs/results.gif +3 -0
  30. imgs/singleimage.gif +3 -0
  31. models/__init__.py +67 -0
  32. models/__pycache__/__init__.cpython-310.pyc +0 -0
  33. models/__pycache__/base_model.cpython-310.pyc +0 -0
  34. models/__pycache__/cut_model.cpython-310.pyc +0 -0
  35. models/__pycache__/cycle_gan_model.cpython-310.pyc +0 -0
  36. models/__pycache__/networks.cpython-310.pyc +0 -0
  37. models/__pycache__/patchnce.cpython-310.pyc +0 -0
  38. models/__pycache__/stylegan_networks.cpython-310.pyc +0 -0
  39. models/base_model.py +258 -0
  40. models/cut_model.py +214 -0
  41. models/cycle_gan_model.py +325 -0
  42. models/networks.py +1403 -0
  43. models/patchnce.py +55 -0
  44. models/sincut_model.py +79 -0
  45. models/stylegan_networks.py +914 -0
  46. models/template_model.py +99 -0
  47. options/__init__.py +1 -0
  48. options/__pycache__/__init__.cpython-310.pyc +0 -0
  49. options/__pycache__/base_options.cpython-310.pyc +0 -0
  50. options/__pycache__/train_options.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ imgs/gif_cut.gif filter=lfs diff=lfs merge=lfs -text
37
+ imgs/horse2zebra_comparison.jpg filter=lfs diff=lfs merge=lfs -text
38
+ imgs/patchnce.gif filter=lfs diff=lfs merge=lfs -text
39
+ imgs/results.gif filter=lfs diff=lfs merge=lfs -text
40
+ imgs/singleimage.gif filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2020, Taesung Park and Jun-Yan Zhu
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions are met:
6
+
7
+ * Redistributions of source code must retain the above copyright notice, this
8
+ list of conditions and the following disclaimer.
9
+
10
+ * Redistributions in binary form must reproduce the above copyright notice,
11
+ this list of conditions and the following disclaimer in the documentation
12
+ and/or other materials provided with the distribution.
13
+
14
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
18
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24
+
25
+ --------------------------- LICENSE FOR CycleGAN -------------------------------
26
+ -------------------https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix------
27
+ Copyright (c) 2017, Jun-Yan Zhu and Taesung Park
28
+ All rights reserved.
29
+
30
+ Redistribution and use in source and binary forms, with or without
31
+ modification, are permitted provided that the following conditions are met:
32
+
33
+ * Redistributions of source code must retain the above copyright notice, this
34
+ list of conditions and the following disclaimer.
35
+
36
+ * Redistributions in binary form must reproduce the above copyright notice,
37
+ this list of conditions and the following disclaimer in the documentation
38
+ and/or other materials provided with the distribution.
39
+
40
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
41
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
43
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
44
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
46
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
47
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
48
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
49
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50
+
51
+ --------------------------- LICENSE FOR stylegan2-pytorch ----------------------
52
+ ----------------https://github.com/rosinality/stylegan2-pytorch/----------------
53
+ MIT License
54
+
55
+ Copyright (c) 2019 Kim Seonghyeon
56
+
57
+ Permission is hereby granted, free of charge, to any person obtaining a copy
58
+ of this software and associated documentation files (the "Software"), to deal
59
+ in the Software without restriction, including without limitation the rights
60
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
61
+ copies of the Software, and to permit persons to whom the Software is
62
+ furnished to do so, subject to the following conditions:
63
+
64
+ The above copyright notice and this permission notice shall be included in all
65
+ copies or substantial portions of the Software.
66
+
67
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
68
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
69
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
70
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
71
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
72
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
73
+ SOFTWARE.
74
+
75
+
76
+ --------------------------- LICENSE FOR pix2pix --------------------------------
77
+ BSD License
78
+
79
+ For pix2pix software
80
+ Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu
81
+ All rights reserved.
82
+
83
+ Redistribution and use in source and binary forms, with or without
84
+ modification, are permitted provided that the following conditions are met:
85
+
86
+ * Redistributions of source code must retain the above copyright notice, this
87
+ list of conditions and the following disclaimer.
88
+
89
+ * Redistributions in binary form must reproduce the above copyright notice,
90
+ this list of conditions and the following disclaimer in the documentation
91
+ and/or other materials provided with the distribution.
92
+
93
+ ----------------------------- LICENSE FOR DCGAN --------------------------------
94
+ BSD License
95
+
96
+ For dcgan.torch software
97
+
98
+ Copyright (c) 2015, Facebook, Inc. All rights reserved.
99
+
100
+ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
101
+
102
+ Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
103
+
104
+ Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
105
+
106
+ Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
107
+
108
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
109
+
110
+ --------------------------- LICENSE FOR StyleGAN2 ------------------------------
111
+ --------------------------- Inherited from stylegan2-pytorch -------------------
112
+ Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
113
+
114
+
115
+ Nvidia Source Code License-NC
116
+
117
+ =======================================================================
118
+
119
+ 1. Definitions
120
+
121
+ "Licensor" means any person or entity that distributes its Work.
122
+
123
+ "Software" means the original work of authorship made available under
124
+ this License.
125
+
126
+ "Work" means the Software and any additions to or derivative works of
127
+ the Software that are made available under this License.
128
+
129
+ "Nvidia Processors" means any central processing unit (CPU), graphics
130
+ processing unit (GPU), field-programmable gate array (FPGA),
131
+ application-specific integrated circuit (ASIC) or any combination
132
+ thereof designed, made, sold, or provided by Nvidia or its affiliates.
133
+
134
+ The terms "reproduce," "reproduction," "derivative works," and
135
+ "distribution" have the meaning as provided under U.S. copyright law;
136
+ provided, however, that for the purposes of this License, derivative
137
+ works shall not include works that remain separable from, or merely
138
+ link (or bind by name) to the interfaces of, the Work.
139
+
140
+ Works, including the Software, are "made available" under this License
141
+ by including in or with the Work either (a) a copyright notice
142
+ referencing the applicability of this License to the Work, or (b) a
143
+ copy of this License.
144
+
145
+ 2. License Grants
146
+
147
+ 2.1 Copyright Grant. Subject to the terms and conditions of this
148
+ License, each Licensor grants to you a perpetual, worldwide,
149
+ non-exclusive, royalty-free, copyright license to reproduce,
150
+ prepare derivative works of, publicly display, publicly perform,
151
+ sublicense and distribute its Work and any resulting derivative
152
+ works in any form.
153
+
154
+ 3. Limitations
155
+
156
+ 3.1 Redistribution. You may reproduce or distribute the Work only
157
+ if (a) you do so under this License, (b) you include a complete
158
+ copy of this License with your distribution, and (c) you retain
159
+ without modification any copyright, patent, trademark, or
160
+ attribution notices that are present in the Work.
161
+
162
+ 3.2 Derivative Works. You may specify that additional or different
163
+ terms apply to the use, reproduction, and distribution of your
164
+ derivative works of the Work ("Your Terms") only if (a) Your Terms
165
+ provide that the use limitation in Section 3.3 applies to your
166
+ derivative works, and (b) you identify the specific derivative
167
+ works that are subject to Your Terms. Notwithstanding Your Terms,
168
+ this License (including the redistribution requirements in Section
169
+ 3.1) will continue to apply to the Work itself.
170
+
171
+ 3.3 Use Limitation. The Work and any derivative works thereof only
172
+ may be used or intended for use non-commercially. The Work or
173
+ derivative works thereof may be used or intended for use by Nvidia
174
+ or its affiliates commercially or non-commercially. As used herein,
175
+ "non-commercially" means for research or evaluation purposes only.
176
+
177
+ 3.4 Patent Claims. If you bring or threaten to bring a patent claim
178
+ against any Licensor (including any claim, cross-claim or
179
+ counterclaim in a lawsuit) to enforce any patents that you allege
180
+ are infringed by any Work, then your rights under this License from
181
+ such Licensor (including the grants in Sections 2.1 and 2.2) will
182
+ terminate immediately.
183
+
184
+ 3.5 Trademarks. This License does not grant any rights to use any
185
+ Licensor's or its affiliates' names, logos, or trademarks, except
186
+ as necessary to reproduce the notices described in this License.
187
+
188
+ 3.6 Termination. If you violate any term of this License, then your
189
+ rights under this License (including the grants in Sections 2.1 and
190
+ 2.2) will terminate immediately.
191
+
192
+ 4. Disclaimer of Warranty.
193
+
194
+ THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY
195
+ KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF
196
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR
197
+ NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER
198
+ THIS LICENSE.
199
+
200
+ 5. Limitation of Liability.
201
+
202
+ EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL
203
+ THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE
204
+ SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT,
205
+ INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF
206
+ OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK
207
+ (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION,
208
+ LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER
209
+ COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF
210
+ THE POSSIBILITY OF SUCH DAMAGES.
211
+
212
+ =======================================================================
README.md ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ # Contrastive Unpaired Translation (CUT)
4
+
5
+ ### [video (1m)](https://youtu.be/Llg0vE_MVgk) | [video (10m)](https://youtu.be/jSGOzjmN8q0) | [website](http://taesung.me/ContrastiveUnpairedTranslation/) | [paper](https://arxiv.org/pdf/2007.15651)
6
+ <br>
7
+
8
+ <img src='imgs/gif_cut.gif' align="right" width=960>
9
+
10
+ <br><br><br>
11
+
12
+
13
+
14
+ We provide our PyTorch implementation of unpaired image-to-image translation based on patchwise contrastive learning and adversarial learning. No hand-crafted loss and inverse network is used. Compared to [CycleGAN](https://github.com/junyanz/CycleGAN), our model training is faster and less memory-intensive. In addition, our method can be extended to single image training, where each “domain” is only a *single* image.
15
+
16
+
17
+
18
+
19
+ [Contrastive Learning for Unpaired Image-to-Image Translation](http://taesung.me/ContrastiveUnpairedTranslation/)
20
+ [Taesung Park](https://taesung.me/), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros/), [Richard Zhang](https://richzhang.github.io/), [Jun-Yan Zhu](https://www.cs.cmu.edu/~junyanz/)<br>
21
+ UC Berkeley and Adobe Research<br>
22
+ In ECCV 2020
23
+
24
+
25
+ <img src='imgs/patchnce.gif' align="right" width=960>
26
+
27
+ <br><br><br>
28
+
29
+ ### Pseudo code
30
+ ```python
31
+ import torch
32
+ cross_entropy_loss = torch.nn.CrossEntropyLoss()
33
+
34
+ # Input: f_q (BxCxS) and sampled features from H(G_enc(x))
35
+ # Input: f_k (BxCxS) are sampled features from H(G_enc(G(x))
36
+ # Input: tau is the temperature used in PatchNCE loss.
37
+ # Output: PatchNCE loss
38
+ def PatchNCELoss(f_q, f_k, tau=0.07):
39
+ # batch size, channel size, and number of sample locations
40
+ B, C, S = f_q.shape
41
+
42
+ # calculate v * v+: BxSx1
43
+ l_pos = (f_k * f_q).sum(dim=1)[:, :, None]
44
+
45
+ # calculate v * v-: BxSxS
46
+ l_neg = torch.bmm(f_q.transpose(1, 2), f_k)
47
+
48
+ # The diagonal entries are not negatives. Remove them.
49
+ identity_matrix = torch.eye(S)[None, :, :]
50
+ l_neg.masked_fill_(identity_matrix, -float('inf'))
51
+
52
+ # calculate logits: (B)x(S)x(S+1)
53
+ logits = torch.cat((l_pos, l_neg), dim=2) / tau
54
+
55
+ # return PatchNCE loss
56
+ predictions = logits.flatten(0, 1)
57
+ targets = torch.zeros(B * S, dtype=torch.long)
58
+ return cross_entropy_loss(predictions, targets)
59
+ ```
60
+ ## Example Results
61
+
62
+ ### Unpaired Image-to-Image Translation
63
+ <img src="imgs/results.gif" width="800px"/>
64
+
65
+ ### Single Image Unpaired Translation
66
+ <img src="imgs/singleimage.gif" width="800px"/>
67
+
68
+
69
+ ### Russian Blue Cat to Grumpy Cat
70
+ <img src="imgs/grumpycat.jpg" width="800px"/>
71
+
72
+ ### Parisian Street to Burano's painted houses
73
+ <img src="imgs/paris.jpg" width="800px"/>
74
+
75
+
76
+
77
+ ## Prerequisites
78
+ - Linux or macOS
79
+ - Python 3
80
+ - CPU or NVIDIA GPU + CUDA CuDNN
81
+
82
+ ### Update log
83
+
84
+ 9/12/2020: Added single-image translation.
85
+
86
+ ### Getting started
87
+
88
+ - Clone this repo:
89
+ ```bash
90
+ git clone https://github.com/taesungp/contrastive-unpaired-translation CUT
91
+ cd CUT
92
+ ```
93
+
94
+ - Install PyTorch 1.1 and other dependencies (e.g., torchvision, visdom, dominate, gputil).
95
+
96
+ For pip users, please type the command `pip install -r requirements.txt`.
97
+
98
+ For Conda users, you can create a new Conda environment using `conda env create -f environment.yml`.
99
+
100
+
101
+ ### CUT and FastCUT Training and Test
102
+
103
+ - Download the `grumpifycat` dataset (Fig 8 of the paper. Russian Blue -> Grumpy Cats)
104
+ ```bash
105
+ bash ./datasets/download_cut_dataset.sh grumpifycat
106
+ ```
107
+ The dataset is downloaded and unzipped at `./datasets/grumpifycat/`.
108
+
109
+ - To view training results and loss plots, run `python -m visdom.server` and click the URL http://localhost:8097.
110
+
111
+ - Train the CUT model:
112
+ ```bash
113
+ python train.py --dataroot ./datasets/grumpifycat --name grumpycat_CUT --CUT_mode CUT
114
+ ```
115
+ Or train the FastCUT model
116
+ ```bash
117
+ python train.py --dataroot ./datasets/grumpifycat --name grumpycat_FastCUT --CUT_mode FastCUT
118
+ ```
119
+ The checkpoints will be stored at `./checkpoints/grumpycat_*/web`.
120
+
121
+ - Test the CUT model:
122
+ ```bash
123
+ python test.py --dataroot ./datasets/grumpifycat --name grumpycat_CUT --CUT_mode CUT --phase train
124
+ ```
125
+
126
+ The test results will be saved to a html file here: `./results/grumpifycat/latest_train/index.html`.
127
+
128
+ ### CUT, FastCUT, and CycleGAN
129
+ <img src="imgs/horse2zebra_comparison.jpg" width="800px"/><br>
130
+
131
+ CUT is trained with the identity preservation loss and with `lambda_NCE=1`, while FastCUT is trained without the identity loss but with higher `lambda_NCE=10.0`. Compared to CycleGAN, CUT learns to perform more powerful distribution matching, while FastCUT is designed as a lighter (half the GPU memory, can fit a larger image), and faster (twice faster to train) alternative to CycleGAN. Please refer to the [paper](https://arxiv.org/abs/2007.15651) for more details.
132
+
133
+ In the above figure, we measure the percentage of pixels belonging to the horse/zebra bodies, using a pre-trained semantic segmentation model. We find a distribution mismatch between sizes of horses and zebras images -- zebras usually appear larger (36.8\% vs. 17.9\%). Our full method CUT has the flexibility to enlarge the horses, as a means of better matching of the training statistics than CycleGAN. FastCUT behaves more conservatively like CycleGAN.
134
+
135
+ ### Training using our launcher scripts
136
+
137
+ Please see `experiments/grumpifycat_launcher.py` that generates the above command line arguments. The launcher scripts are useful for configuring rather complicated command-line arguments of training and testing.
138
+
139
+ Using the launcher, the command below generates the training command of CUT and FastCUT.
140
+ ```bash
141
+ python -m experiments grumpifycat train 0 # CUT
142
+ python -m experiments grumpifycat train 1 # FastCUT
143
+ ```
144
+
145
+ To test using the launcher,
146
+ ```bash
147
+ python -m experiments grumpifycat test 0 # CUT
148
+ python -m experiments grumpifycat test 1 # FastCUT
149
+ ```
150
+
151
+ Possible commands are run, run_test, launch, close, and so on. Please see `experiments/__main__.py` for all commands. Launcher is easy and quick to define and use. For example, the grumpifycat launcher is defined in a few lines:
152
+ ```python
153
+ from .tmux_launcher import Options, TmuxLauncher
154
+
155
+
156
+ class Launcher(TmuxLauncher):
157
+ def common_options(self):
158
+ return [
159
+ Options( # Command 0
160
+ dataroot="./datasets/grumpifycat",
161
+ name="grumpifycat_CUT",
162
+ CUT_mode="CUT"
163
+ ),
164
+
165
+ Options( # Command 1
166
+ dataroot="./datasets/grumpifycat",
167
+ name="grumpifycat_FastCUT",
168
+ CUT_mode="FastCUT",
169
+ )
170
+ ]
171
+
172
+ def commands(self):
173
+ return ["python train.py " + str(opt) for opt in self.common_options()]
174
+
175
+ def test_commands(self):
176
+ # Russian Blue -> Grumpy Cats dataset does not have test split.
177
+ # Therefore, let's set the test split to be the "train" set.
178
+ return ["python test.py " + str(opt.set(phase='train')) for opt in self.common_options()]
179
+
180
+ ```
181
+
182
+
183
+
184
+ ### Apply a pre-trained CUT model and evaluate FID
185
+
186
+ To run the pretrained models, run the following.
187
+
188
+ ```bash
189
+
190
+ # Download and unzip the pretrained models. The weights should be located at
191
+ # checkpoints/horse2zebra_cut_pretrained/latest_net_G.pth, for example.
192
+ wget http://efrosgans.eecs.berkeley.edu/CUT/pretrained_models.tar
193
+ tar -xf pretrained_models.tar
194
+
195
+ # Generate outputs. The dataset paths might need to be adjusted.
196
+ # To do this, modify the lines of experiments/pretrained_launcher.py
197
+ # [id] corresponds to the respective commands defined in pretrained_launcher.py
198
+ # 0 - CUT on Cityscapes
199
+ # 1 - FastCUT on Cityscapes
200
+ # 2 - CUT on Horse2Zebra
201
+ # 3 - FastCUT on Horse2Zebra
202
+ # 4 - CUT on Cat2Dog
203
+ # 5 - FastCUT on Cat2Dog
204
+ python -m experiments pretrained run_test [id]
205
+
206
+ # Evaluate FID. To do this, first install pytorch-fid of https://github.com/mseitzer/pytorch-fid
207
+ # pip install pytorch-fid
208
+ # For example, to evaluate horse2zebra FID of CUT,
209
+ # python -m pytorch_fid ./datasets/horse2zebra/testB/ results/horse2zebra_cut_pretrained/test_latest/images/fake_B/
210
+ # To evaluate Cityscapes FID of FastCUT,
211
+ # python -m pytorch_fid ./datasets/cityscapes/valA/ ~/projects/contrastive-unpaired-translation/results/cityscapes_fastcut_pretrained/test_latest/images/fake_B/
212
+ # Note that a special dataset needs to be used for the Cityscapes model. Please read below.
213
+ python -m pytorch_fid [path to real test images] [path to generated images]
214
+
215
+ ```
216
+
217
+ Note: the Cityscapes pretrained model was trained and evaluated on a resized and JPEG-compressed version of the original Cityscapes dataset. To perform evaluation, please download [this](http://efrosgans.eecs.berkeley.edu/CUT/datasets/cityscapes_val_for_CUT.tar) validation set and perform evaluation.
218
+
219
+
220
+ ### SinCUT Single Image Unpaired Training
221
+
222
+ To train SinCUT (single-image translation, shown in Fig 9, 13 and 14 of the paper), you need to
223
+
224
+ 1. set the `--model` option as `--model sincut`, which invokes the configuration and codes at `./models/sincut_model.py`, and
225
+ 2. specify the dataset directory of one image in each domain, such as the example dataset included in this repo at `./datasets/single_image_monet_etretat/`.
226
+
227
+ For example, to train a model for the [Etretat cliff (first image of Figure 13)](https://github.com/taesungp/contrastive-unpaired-translation/blob/master/imgs/singleimage.gif), please use the following command.
228
+
229
+ ```bash
230
+ python train.py --model sincut --name singleimage_monet_etretat --dataroot ./datasets/single_image_monet_etretat
231
+ ```
232
+
233
+ or by using the experiment launcher script,
234
+ ```bash
235
+ python -m experiments singleimage run 0
236
+ ```
237
+
238
+ For single-image translation, we adopt network architectural components of [StyleGAN2](https://github.com/NVlabs/stylegan2), as well as the pixel identity preservation loss used in [DTN](https://arxiv.org/abs/1611.02200) and [CycleGAN](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/cycle_gan_model.py#L160). In particular, we adopted the code of [rosinality](https://github.com/rosinality/stylegan2-pytorch), which exists at `models/stylegan_networks.py`.
239
+
240
+ The training takes several hours. To generate the final image using the checkpoint,
241
+
242
+ ```bash
243
+ python test.py --model sincut --name singleimage_monet_etretat --dataroot ./datasets/single_image_monet_etretat
244
+ ```
245
+
246
+ or simply
247
+
248
+ ```bash
249
+ python -m experiments singleimage run_test 0
250
+ ```
251
+
252
+ ### [Datasets](./docs/datasets.md)
253
+ Download CUT/CycleGAN/pix2pix datasets. For example,
254
+
255
+ ```bash
256
+ bash datasets/download_cut_datasets.sh horse2zebra
257
+ ```
258
+
259
+ The Cat2Dog dataset is prepared from the AFHQ dataset. Please visit https://github.com/clovaai/stargan-v2 and download the AFHQ dataset by `bash download.sh afhq-dataset` of the github repo. Then reorganize directories as follows.
260
+ ```bash
261
+ mkdir datasets/cat2dog
262
+ ln -s datasets/cat2dog/trainA [path_to_afhq]/train/cat
263
+ ln -s datasets/cat2dog/trainB [path_to_afhq]/train/dog
264
+ ln -s datasets/cat2dog/testA [path_to_afhq]/test/cat
265
+ ln -s datasets/cat2dog/testB [path_to_afhq]/test/dog
266
+ ```
267
+
268
+ The Cityscapes dataset can be downloaded from https://cityscapes-dataset.com.
269
+ After that, use the script `./datasets/prepare_cityscapes_dataset.py` to prepare the dataset.
270
+
271
+
272
+ #### Preprocessing of input images
273
+
274
+ The preprocessing of the input images, such as resizing or random cropping, is controlled by the option `--preprocess`, `--load_size`, and `--crop_size`. The usage follows the [CycleGAN/pix2pix](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) repo.
275
+
276
+ For example, the default setting `--preprocess resize_and_crop --load_size 286 --crop_size 256` resizes the input image to `286x286`, and then makes a random crop of size `256x256` as a way to perform data augmentation. There are other preprocessing options that can be specified, and they are specified in [base_dataset.py](https://github.com/taesungp/contrastive-unpaired-translation/blob/master/data/base_dataset.py#L82). Below are some example options.
277
+
278
+ - `--preprocess none`: does not perform any preprocessing. Note that the image size is still scaled to be a closest multiple of 4, because the convolutional generator cannot maintain the same image size otherwise.
279
+ - `--preprocess scale_width --load_size 768`: scales the width of the image to be of size 768.
280
+ - `--preprocess scale_shortside_and_crop`: scales the image preserving aspect ratio so that the short side is `load_size`, and then performs random cropping of window size `crop_size`.
281
+
282
+ More preprocessing options can be added by modifying [`get_transform()`](https://github.com/taesungp/contrastive-unpaired-translation/blob/master/data/base_dataset.py#L82) of `base_dataset.py`.
283
+
284
+
285
+ ### Citation
286
+ If you use this code for your research, please cite our [paper](https://arxiv.org/pdf/2007.15651).
287
+ ```
288
+ @inproceedings{park2020cut,
289
+ title={Contrastive Learning for Unpaired Image-to-Image Translation},
290
+ author={Taesung Park and Alexei A. Efros and Richard Zhang and Jun-Yan Zhu},
291
+ booktitle={European Conference on Computer Vision},
292
+ year={2020}
293
+ }
294
+ ```
295
+
296
+ If you use the original [pix2pix](https://phillipi.github.io/pix2pix/) and [CycleGAN](https://junyanz.github.io/CycleGAN/) model included in this repo, please cite the following papers
297
+ ```
298
+ @inproceedings{CycleGAN2017,
299
+ title={Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks},
300
+ author={Zhu, Jun-Yan and Park, Taesung and Isola, Phillip and Efros, Alexei A},
301
+ booktitle={IEEE International Conference on Computer Vision (ICCV)},
302
+ year={2017}
303
+ }
304
+
305
+
306
+ @inproceedings{isola2017image,
307
+ title={Image-to-Image Translation with Conditional Adversarial Networks},
308
+ author={Isola, Phillip and Zhu, Jun-Yan and Zhou, Tinghui and Efros, Alexei A},
309
+ booktitle={IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
310
+ year={2017}
311
+ }
312
+ ```
313
+
314
+
315
+ ### Acknowledgments
316
+ We thank Allan Jabri and Phillip Isola for helpful discussion and feedback. Our code is developed based on [pytorch-CycleGAN-and-pix2pix](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix). We also thank [pytorch-fid](https://github.com/mseitzer/pytorch-fid) for FID computation, [drn](https://github.com/fyu/drn) for mIoU computation, and [stylegan2-pytorch](https://github.com/rosinality/stylegan2-pytorch/) for the PyTorch implementation of StyleGAN2 used in our single-image translation setting.
data/__init__.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This package includes all the modules related to data loading and preprocessing
2
+
3
+ To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
4
+ You need to implement four functions:
5
+ -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
6
+ -- <__len__>: return the size of dataset.
7
+ -- <__getitem__>: get a data point from data loader.
8
+ -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
9
+
10
+ Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
11
+ See our template dataset class 'template_dataset.py' for more details.
12
+ """
13
+ import importlib
14
+ import torch.utils.data
15
+ from data.base_dataset import BaseDataset
16
+
17
+
18
+ def find_dataset_using_name(dataset_name):
19
+ """Import the module "data/[dataset_name]_dataset.py".
20
+
21
+ In the file, the class called DatasetNameDataset() will
22
+ be instantiated. It has to be a subclass of BaseDataset,
23
+ and it is case-insensitive.
24
+ """
25
+ dataset_filename = "data." + dataset_name + "_dataset"
26
+ datasetlib = importlib.import_module(dataset_filename)
27
+
28
+ dataset = None
29
+ target_dataset_name = dataset_name.replace('_', '') + 'dataset'
30
+ for name, cls in datasetlib.__dict__.items():
31
+ if name.lower() == target_dataset_name.lower() \
32
+ and issubclass(cls, BaseDataset):
33
+ dataset = cls
34
+
35
+ if dataset is None:
36
+ raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
37
+
38
+ return dataset
39
+
40
+
41
+ def get_option_setter(dataset_name):
42
+ """Return the static method <modify_commandline_options> of the dataset class."""
43
+ dataset_class = find_dataset_using_name(dataset_name)
44
+ return dataset_class.modify_commandline_options
45
+
46
+
47
+ def create_dataset(opt):
48
+ """Create a dataset given the option.
49
+
50
+ This function wraps the class CustomDatasetDataLoader.
51
+ This is the main interface between this package and 'train.py'/'test.py'
52
+
53
+ Example:
54
+ >>> from data import create_dataset
55
+ >>> dataset = create_dataset(opt)
56
+ """
57
+ data_loader = CustomDatasetDataLoader(opt)
58
+ dataset = data_loader.load_data()
59
+ return dataset
60
+
61
+
62
+ class CustomDatasetDataLoader():
63
+ """Wrapper class of Dataset class that performs multi-threaded data loading"""
64
+
65
+ def __init__(self, opt):
66
+ """Initialize this class
67
+
68
+ Step 1: create a dataset instance given the name [dataset_mode]
69
+ Step 2: create a multi-threaded data loader.
70
+ """
71
+ self.opt = opt
72
+ dataset_class = find_dataset_using_name(opt.dataset_mode)
73
+ self.dataset = dataset_class(opt)
74
+ print("dataset [%s] was created" % type(self.dataset).__name__)
75
+ self.dataloader = torch.utils.data.DataLoader(
76
+ self.dataset,
77
+ batch_size=opt.batch_size,
78
+ shuffle=not opt.serial_batches,
79
+ num_workers=int(opt.num_threads),
80
+ drop_last=True if opt.isTrain else False,
81
+ )
82
+
83
+ def set_epoch(self, epoch):
84
+ self.dataset.current_epoch = epoch
85
+
86
+ def load_data(self):
87
+ return self
88
+
89
+ def __len__(self):
90
+ """Return the number of data in the dataset"""
91
+ return min(len(self.dataset), self.opt.max_dataset_size)
92
+
93
+ def __iter__(self):
94
+ """Return a batch of data"""
95
+ for i, data in enumerate(self.dataloader):
96
+ if i * self.opt.batch_size >= self.opt.max_dataset_size:
97
+ break
98
+ yield data
data/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.18 kB). View file
 
data/__pycache__/base_dataset.cpython-310.pyc ADDED
Binary file (8.06 kB). View file
 
data/__pycache__/image_folder.cpython-310.pyc ADDED
Binary file (2.46 kB). View file
 
data/__pycache__/unaligned_dataset.cpython-310.pyc ADDED
Binary file (3.07 kB). View file
 
data/base_dataset.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
2
+
3
+ It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
4
+ """
5
+ import random
6
+ import numpy as np
7
+ import torch.utils.data as data
8
+ from PIL import Image
9
+ import torchvision.transforms as transforms
10
+ from abc import ABC, abstractmethod
11
+
12
+
13
+ class BaseDataset(data.Dataset, ABC):
14
+ """This class is an abstract base class (ABC) for datasets.
15
+
16
+ To create a subclass, you need to implement the following four functions:
17
+ -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
18
+ -- <__len__>: return the size of dataset.
19
+ -- <__getitem__>: get a data point.
20
+ -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
21
+ """
22
+
23
+ def __init__(self, opt):
24
+ """Initialize the class; save the options in the class
25
+
26
+ Parameters:
27
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
28
+ """
29
+ self.opt = opt
30
+ self.root = opt.dataroot
31
+ self.current_epoch = 0
32
+
33
+ @staticmethod
34
+ def modify_commandline_options(parser, is_train):
35
+ """Add new dataset-specific options, and rewrite default values for existing options.
36
+
37
+ Parameters:
38
+ parser -- original option parser
39
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
40
+
41
+ Returns:
42
+ the modified parser.
43
+ """
44
+ return parser
45
+
46
+ @abstractmethod
47
+ def __len__(self):
48
+ """Return the total number of images in the dataset."""
49
+ return 0
50
+
51
+ @abstractmethod
52
+ def __getitem__(self, index):
53
+ """Return a data point and its metadata information.
54
+
55
+ Parameters:
56
+ index - - a random integer for data indexing
57
+
58
+ Returns:
59
+ a dictionary of data with their names. It ususally contains the data itself and its metadata information.
60
+ """
61
+ pass
62
+
63
+
64
+ def get_params(opt, size):
65
+ w, h = size
66
+ new_h = h
67
+ new_w = w
68
+ if opt.preprocess == 'resize_and_crop':
69
+ new_h = new_w = opt.load_size
70
+ elif opt.preprocess == 'scale_width_and_crop':
71
+ new_w = opt.load_size
72
+ new_h = opt.load_size * h // w
73
+
74
+ x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
75
+ y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
76
+
77
+ flip = random.random() > 0.5
78
+
79
+ return {'crop_pos': (x, y), 'flip': flip}
80
+
81
+
82
+ def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True):
83
+ transform_list = []
84
+ if grayscale:
85
+ transform_list.append(transforms.Grayscale(1))
86
+ if 'fixsize' in opt.preprocess:
87
+ transform_list.append(transforms.Resize(params["size"], method))
88
+ if 'resize' in opt.preprocess:
89
+ osize = [opt.load_size, opt.load_size]
90
+ if "gta2cityscapes" in opt.dataroot:
91
+ osize[0] = opt.load_size // 2
92
+ transform_list.append(transforms.Resize(osize, method))
93
+ elif 'scale_width' in opt.preprocess:
94
+ transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
95
+ elif 'scale_shortside' in opt.preprocess:
96
+ transform_list.append(transforms.Lambda(lambda img: __scale_shortside(img, opt.load_size, opt.crop_size, method)))
97
+
98
+ if 'zoom' in opt.preprocess:
99
+ if params is None:
100
+ transform_list.append(transforms.Lambda(lambda img: __random_zoom(img, opt.load_size, opt.crop_size, method)))
101
+ else:
102
+ transform_list.append(transforms.Lambda(lambda img: __random_zoom(img, opt.load_size, opt.crop_size, method, factor=params["scale_factor"])))
103
+
104
+ if 'crop' in opt.preprocess:
105
+ if params is None or 'crop_pos' not in params:
106
+ transform_list.append(transforms.RandomCrop(opt.crop_size))
107
+ else:
108
+ transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
109
+
110
+ if 'patch' in opt.preprocess:
111
+ transform_list.append(transforms.Lambda(lambda img: __patch(img, params['patch_index'], opt.crop_size)))
112
+
113
+ if 'trim' in opt.preprocess:
114
+ transform_list.append(transforms.Lambda(lambda img: __trim(img, opt.crop_size)))
115
+
116
+ # if opt.preprocess == 'none':
117
+ transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
118
+
119
+ if not opt.no_flip:
120
+ if params is None or 'flip' not in params:
121
+ transform_list.append(transforms.RandomHorizontalFlip())
122
+ elif 'flip' in params:
123
+ transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
124
+
125
+ if convert:
126
+ transform_list += [transforms.ToTensor()]
127
+ if grayscale:
128
+ transform_list += [transforms.Normalize((0.5,), (0.5,))]
129
+ else:
130
+ transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
131
+ return transforms.Compose(transform_list)
132
+
133
+
134
+ def __make_power_2(img, base, method=Image.BICUBIC):
135
+ ow, oh = img.size
136
+ h = int(round(oh / base) * base)
137
+ w = int(round(ow / base) * base)
138
+ if h == oh and w == ow:
139
+ return img
140
+
141
+ return img.resize((w, h), method)
142
+
143
+
144
+ def __random_zoom(img, target_width, crop_width, method=Image.BICUBIC, factor=None):
145
+ if factor is None:
146
+ zoom_level = np.random.uniform(0.8, 1.0, size=[2])
147
+ else:
148
+ zoom_level = (factor[0], factor[1])
149
+ iw, ih = img.size
150
+ zoomw = max(crop_width, iw * zoom_level[0])
151
+ zoomh = max(crop_width, ih * zoom_level[1])
152
+ img = img.resize((int(round(zoomw)), int(round(zoomh))), method)
153
+ return img
154
+
155
+
156
+ def __scale_shortside(img, target_width, crop_width, method=Image.BICUBIC):
157
+ ow, oh = img.size
158
+ shortside = min(ow, oh)
159
+ if shortside >= target_width:
160
+ return img
161
+ else:
162
+ scale = target_width / shortside
163
+ return img.resize((round(ow * scale), round(oh * scale)), method)
164
+
165
+
166
+ def __trim(img, trim_width):
167
+ ow, oh = img.size
168
+ if ow > trim_width:
169
+ xstart = np.random.randint(ow - trim_width)
170
+ xend = xstart + trim_width
171
+ else:
172
+ xstart = 0
173
+ xend = ow
174
+ if oh > trim_width:
175
+ ystart = np.random.randint(oh - trim_width)
176
+ yend = ystart + trim_width
177
+ else:
178
+ ystart = 0
179
+ yend = oh
180
+ return img.crop((xstart, ystart, xend, yend))
181
+
182
+
183
+ def __scale_width(img, target_width, crop_width, method=Image.BICUBIC):
184
+ ow, oh = img.size
185
+ if ow == target_width and oh >= crop_width:
186
+ return img
187
+ w = target_width
188
+ h = int(max(target_width * oh / ow, crop_width))
189
+ return img.resize((w, h), method)
190
+
191
+
192
+ def __crop(img, pos, size):
193
+ ow, oh = img.size
194
+ x1, y1 = pos
195
+ tw = th = size
196
+ if (ow > tw or oh > th):
197
+ return img.crop((x1, y1, x1 + tw, y1 + th))
198
+ return img
199
+
200
+
201
+ def __patch(img, index, size):
202
+ ow, oh = img.size
203
+ nw, nh = ow // size, oh // size
204
+ roomx = ow - nw * size
205
+ roomy = oh - nh * size
206
+ startx = np.random.randint(int(roomx) + 1)
207
+ starty = np.random.randint(int(roomy) + 1)
208
+
209
+ index = index % (nw * nh)
210
+ ix = index // nh
211
+ iy = index % nh
212
+ gridx = startx + ix * size
213
+ gridy = starty + iy * size
214
+ return img.crop((gridx, gridy, gridx + size, gridy + size))
215
+
216
+
217
+ def __flip(img, flip):
218
+ if flip:
219
+ return img.transpose(Image.FLIP_LEFT_RIGHT)
220
+ return img
221
+
222
+
223
+ def __print_size_warning(ow, oh, w, h):
224
+ """Print warning information about image size(only print once)"""
225
+ if not hasattr(__print_size_warning, 'has_printed'):
226
+ print("The image size needs to be a multiple of 4. "
227
+ "The loaded image size was (%d, %d), so it was adjusted to "
228
+ "(%d, %d). This adjustment will be done to all images "
229
+ "whose sizes are not multiples of 4" % (ow, oh, w, h))
230
+ __print_size_warning.has_printed = True
data/image_folder.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A modified image folder class
2
+
3
+ We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
4
+ so that this class can load images from both current directory and its subdirectories.
5
+ """
6
+
7
+ import torch.utils.data as data
8
+
9
+ from PIL import Image
10
+ import os
11
+ import os.path
12
+
13
+ IMG_EXTENSIONS = [
14
+ '.jpg', '.JPG', '.jpeg', '.JPEG',
15
+ '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
16
+ '.tif', '.TIF', '.tiff', '.TIFF',
17
+ ]
18
+
19
+
20
+ def is_image_file(filename):
21
+ return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
22
+
23
+
24
+ def make_dataset(dir, max_dataset_size=float("inf")):
25
+ images = []
26
+ assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir
27
+
28
+ for root, _, fnames in sorted(os.walk(dir, followlinks=True)):
29
+ for fname in fnames:
30
+ if is_image_file(fname):
31
+ path = os.path.join(root, fname)
32
+ images.append(path)
33
+ return images[:min(max_dataset_size, len(images))]
34
+
35
+
36
+ def default_loader(path):
37
+ return Image.open(path).convert('RGB')
38
+
39
+
40
+ class ImageFolder(data.Dataset):
41
+
42
+ def __init__(self, root, transform=None, return_paths=False,
43
+ loader=default_loader):
44
+ imgs = make_dataset(root)
45
+ if len(imgs) == 0:
46
+ raise(RuntimeError("Found 0 images in: " + root + "\n"
47
+ "Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
48
+
49
+ self.root = root
50
+ self.imgs = imgs
51
+ self.transform = transform
52
+ self.return_paths = return_paths
53
+ self.loader = loader
54
+
55
+ def __getitem__(self, index):
56
+ path = self.imgs[index]
57
+ img = self.loader(path)
58
+ if self.transform is not None:
59
+ img = self.transform(img)
60
+ if self.return_paths:
61
+ return img, path
62
+ else:
63
+ return img
64
+
65
+ def __len__(self):
66
+ return len(self.imgs)
data/single_dataset.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from data.base_dataset import BaseDataset, get_transform
2
+ from data.image_folder import make_dataset
3
+ from PIL import Image
4
+
5
+
6
+ class SingleDataset(BaseDataset):
7
+ """This dataset class can load a set of images specified by the path --dataroot /path/to/data.
8
+
9
+ It can be used for generating CycleGAN results only for one side with the model option '-model test'.
10
+ """
11
+
12
+ def __init__(self, opt):
13
+ """Initialize this dataset class.
14
+
15
+ Parameters:
16
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
17
+ """
18
+ BaseDataset.__init__(self, opt)
19
+ self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))
20
+ input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
21
+ self.transform = get_transform(opt, grayscale=(input_nc == 1))
22
+
23
+ def __getitem__(self, index):
24
+ """Return a data point and its metadata information.
25
+
26
+ Parameters:
27
+ index - - a random integer for data indexing
28
+
29
+ Returns a dictionary that contains A and A_paths
30
+ A(tensor) - - an image in one domain
31
+ A_paths(str) - - the path of the image
32
+ """
33
+ A_path = self.A_paths[index]
34
+ A_img = Image.open(A_path).convert('RGB')
35
+ A = self.transform(A_img)
36
+ return {'A': A, 'A_paths': A_path}
37
+
38
+ def __len__(self):
39
+ """Return the total number of images in the dataset."""
40
+ return len(self.A_paths)
data/singleimage_dataset.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import os.path
3
+ from data.base_dataset import BaseDataset, get_transform
4
+ from data.image_folder import make_dataset
5
+ from PIL import Image
6
+ import random
7
+ import util.util as util
8
+
9
+
10
+ class SingleImageDataset(BaseDataset):
11
+ """
12
+ This dataset class can load unaligned/unpaired datasets.
13
+
14
+ It requires two directories to host training images from domain A '/path/to/data/trainA'
15
+ and from domain B '/path/to/data/trainB' respectively.
16
+ You can train the model with the dataset flag '--dataroot /path/to/data'.
17
+ Similarly, you need to prepare two directories:
18
+ '/path/to/data/testA' and '/path/to/data/testB' during test time.
19
+ """
20
+
21
+ def __init__(self, opt):
22
+ """Initialize this dataset class.
23
+
24
+ Parameters:
25
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
26
+ """
27
+ BaseDataset.__init__(self, opt)
28
+
29
+ self.dir_A = os.path.join(opt.dataroot, 'trainA') # create a path '/path/to/data/trainA'
30
+ self.dir_B = os.path.join(opt.dataroot, 'trainB') # create a path '/path/to/data/trainB'
31
+
32
+ if os.path.exists(self.dir_A) and os.path.exists(self.dir_B):
33
+ self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
34
+ self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
35
+ self.A_size = len(self.A_paths) # get the size of dataset A
36
+ self.B_size = len(self.B_paths) # get the size of dataset B
37
+
38
+ assert len(self.A_paths) == 1 and len(self.B_paths) == 1,\
39
+ "SingleImageDataset class should be used with one image in each domain"
40
+ A_img = Image.open(self.A_paths[0]).convert('RGB')
41
+ B_img = Image.open(self.B_paths[0]).convert('RGB')
42
+ print("Image sizes %s and %s" % (str(A_img.size), str(B_img.size)))
43
+
44
+ self.A_img = A_img
45
+ self.B_img = B_img
46
+
47
+ # In single-image translation, we augment the data loader by applying
48
+ # random scaling. Still, we design the data loader such that the
49
+ # amount of scaling is the same within a minibatch. To do this,
50
+ # we precompute the random scaling values, and repeat them by |batch_size|.
51
+ A_zoom = 1 / self.opt.random_scale_max
52
+ zoom_levels_A = np.random.uniform(A_zoom, 1.0, size=(len(self) // opt.batch_size + 1, 1, 2))
53
+ self.zoom_levels_A = np.reshape(np.tile(zoom_levels_A, (1, opt.batch_size, 1)), [-1, 2])
54
+
55
+ B_zoom = 1 / self.opt.random_scale_max
56
+ zoom_levels_B = np.random.uniform(B_zoom, 1.0, size=(len(self) // opt.batch_size + 1, 1, 2))
57
+ self.zoom_levels_B = np.reshape(np.tile(zoom_levels_B, (1, opt.batch_size, 1)), [-1, 2])
58
+
59
+ # While the crop locations are randomized, the negative samples should
60
+ # not come from the same location. To do this, we precompute the
61
+ # crop locations with no repetition.
62
+ self.patch_indices_A = list(range(len(self)))
63
+ random.shuffle(self.patch_indices_A)
64
+ self.patch_indices_B = list(range(len(self)))
65
+ random.shuffle(self.patch_indices_B)
66
+
67
+ def __getitem__(self, index):
68
+ """Return a data point and its metadata information.
69
+
70
+ Parameters:
71
+ index (int) -- a random integer for data indexing
72
+
73
+ Returns a dictionary that contains A, B, A_paths and B_paths
74
+ A (tensor) -- an image in the input domain
75
+ B (tensor) -- its corresponding image in the target domain
76
+ A_paths (str) -- image paths
77
+ B_paths (str) -- image paths
78
+ """
79
+ A_path = self.A_paths[0]
80
+ B_path = self.B_paths[0]
81
+ A_img = self.A_img
82
+ B_img = self.B_img
83
+
84
+ # apply image transformation
85
+ if self.opt.phase == "train":
86
+ param = {'scale_factor': self.zoom_levels_A[index],
87
+ 'patch_index': self.patch_indices_A[index],
88
+ 'flip': random.random() > 0.5}
89
+
90
+ transform_A = get_transform(self.opt, params=param, method=Image.BILINEAR)
91
+ A = transform_A(A_img)
92
+
93
+ param = {'scale_factor': self.zoom_levels_B[index],
94
+ 'patch_index': self.patch_indices_B[index],
95
+ 'flip': random.random() > 0.5}
96
+ transform_B = get_transform(self.opt, params=param, method=Image.BILINEAR)
97
+ B = transform_B(B_img)
98
+ else:
99
+ transform = get_transform(self.opt, method=Image.BILINEAR)
100
+ A = transform(A_img)
101
+ B = transform(B_img)
102
+
103
+ return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
104
+
105
+ def __len__(self):
106
+ """ Let's pretend the single image contains 100,000 crops for convenience.
107
+ """
108
+ return 100000
data/template_dataset.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Dataset class template
2
+
3
+ This module provides a template for users to implement custom datasets.
4
+ You can specify '--dataset_mode template' to use this dataset.
5
+ The class name should be consistent with both the filename and its dataset_mode option.
6
+ The filename should be <dataset_mode>_dataset.py
7
+ The class name should be <Dataset_mode>Dataset.py
8
+ You need to implement the following functions:
9
+ -- <modify_commandline_options>: Add dataset-specific options and rewrite default values for existing options.
10
+ -- <__init__>: Initialize this dataset class.
11
+ -- <__getitem__>: Return a data point and its metadata information.
12
+ -- <__len__>: Return the number of images.
13
+ """
14
+ from data.base_dataset import BaseDataset, get_transform
15
+ # from data.image_folder import make_dataset
16
+ # from PIL import Image
17
+
18
+
19
+ class TemplateDataset(BaseDataset):
20
+ """A template dataset class for you to implement custom datasets."""
21
+ @staticmethod
22
+ def modify_commandline_options(parser, is_train):
23
+ """Add new dataset-specific options, and rewrite default values for existing options.
24
+
25
+ Parameters:
26
+ parser -- original option parser
27
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
28
+
29
+ Returns:
30
+ the modified parser.
31
+ """
32
+ parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option')
33
+ parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values
34
+ return parser
35
+
36
+ def __init__(self, opt):
37
+ """Initialize this dataset class.
38
+
39
+ Parameters:
40
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
41
+
42
+ A few things can be done here.
43
+ - save the options (have been done in BaseDataset)
44
+ - get image paths and meta information of the dataset.
45
+ - define the image transformation.
46
+ """
47
+ # save the option and dataset root
48
+ BaseDataset.__init__(self, opt)
49
+ # get the image paths of your dataset;
50
+ self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root
51
+ # define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function
52
+ self.transform = get_transform(opt)
53
+
54
+ def __getitem__(self, index):
55
+ """Return a data point and its metadata information.
56
+
57
+ Parameters:
58
+ index -- a random integer for data indexing
59
+
60
+ Returns:
61
+ a dictionary of data with their names. It usually contains the data itself and its metadata information.
62
+
63
+ Step 1: get a random image path: e.g., path = self.image_paths[index]
64
+ Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB').
65
+ Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image)
66
+ Step 4: return a data point as a dictionary.
67
+ """
68
+ path = 'temp' # needs to be a string
69
+ data_A = None # needs to be a tensor
70
+ data_B = None # needs to be a tensor
71
+ return {'data_A': data_A, 'data_B': data_B, 'path': path}
72
+
73
+ def __len__(self):
74
+ """Return the total number of images."""
75
+ return len(self.image_paths)
data/unaligned_dataset.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+ from data.base_dataset import BaseDataset, get_transform
3
+ from data.image_folder import make_dataset
4
+ from PIL import Image
5
+ import random
6
+ import util.util as util
7
+
8
+
9
+ class UnalignedDataset(BaseDataset):
10
+ """
11
+ This dataset class can load unaligned/unpaired datasets.
12
+
13
+ It requires two directories to host training images from domain A '/path/to/data/trainA'
14
+ and from domain B '/path/to/data/trainB' respectively.
15
+ You can train the model with the dataset flag '--dataroot /path/to/data'.
16
+ Similarly, you need to prepare two directories:
17
+ '/path/to/data/testA' and '/path/to/data/testB' during test time.
18
+ """
19
+
20
+ def __init__(self, opt):
21
+ """Initialize this dataset class.
22
+
23
+ Parameters:
24
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
25
+ """
26
+ BaseDataset.__init__(self, opt)
27
+ self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
28
+ self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB'
29
+
30
+ if opt.phase == "test" and not os.path.exists(self.dir_A) \
31
+ and os.path.exists(os.path.join(opt.dataroot, "valA")):
32
+ self.dir_A = os.path.join(opt.dataroot, "valA")
33
+ self.dir_B = os.path.join(opt.dataroot, "valB")
34
+
35
+ self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
36
+ self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
37
+ self.A_size = len(self.A_paths) # get the size of dataset A
38
+ self.B_size = len(self.B_paths) # get the size of dataset B
39
+
40
+ def __getitem__(self, index):
41
+ """Return a data point and its metadata information.
42
+
43
+ Parameters:
44
+ index (int) -- a random integer for data indexing
45
+
46
+ Returns a dictionary that contains A, B, A_paths and B_paths
47
+ A (tensor) -- an image in the input domain
48
+ B (tensor) -- its corresponding image in the target domain
49
+ A_paths (str) -- image paths
50
+ B_paths (str) -- image paths
51
+ """
52
+ A_path = self.A_paths[index % self.A_size] # make sure index is within then range
53
+ if self.opt.serial_batches: # make sure index is within then range
54
+ index_B = index % self.B_size
55
+ else: # randomize the index for domain B to avoid fixed pairs.
56
+ index_B = random.randint(0, self.B_size - 1)
57
+ B_path = self.B_paths[index_B]
58
+ A_img = Image.open(A_path).convert('RGB')
59
+ B_img = Image.open(B_path).convert('RGB')
60
+
61
+ # Apply image transformation
62
+ # For CUT/FastCUT mode, if in finetuning phase (learning rate is decaying),
63
+ # do not perform resize-crop data augmentation of CycleGAN.
64
+ is_finetuning = self.opt.isTrain and self.current_epoch > self.opt.n_epochs
65
+ modified_opt = util.copyconf(self.opt, load_size=self.opt.crop_size if is_finetuning else self.opt.load_size)
66
+ transform = get_transform(modified_opt)
67
+ A = transform(A_img)
68
+ B = transform(B_img)
69
+
70
+ return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
71
+
72
+ def __len__(self):
73
+ """Return the total number of images in the dataset.
74
+
75
+ As we have two datasets with potentially different number of images,
76
+ we take a maximum of
77
+ """
78
+ return max(self.A_size, self.B_size)
docs/datasets.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### CUT and CycleGAN Datasets
2
+ Download the CUT and CycleGAN datasets using the following script. Some of the datasets are collected by other researchers and papers. Please cite the original papers if you use the data.
3
+ ```bash
4
+ bash ./datasets/download_cut_dataset.sh dataset_name
5
+ ```
6
+ - `grumpifycat`: 88 Russian Blue cats from The Oxford-IIIT Pet [Dataset](http://www.robots.ox.ac.uk/~vgg/data/pets/) and 214 Grumpy cats. We use an OpenCV detector `./datasets/detect_cat_face.py` to detect cat faces.
7
+ - `facades`: 400 images from the [CMP Facades dataset](http://cmp.felk.cvut.cz/~tylecr1/facade). [[Citation](../datasets/bibtex/facades.tex)]
8
+ - `cityscapes`: 2975 images from the [Cityscapes training set](https://www.cityscapes-dataset.com). [[Citation](../datasets/bibtex/cityscapes.tex)]. Note: Due to license issue, we cannot directly provide the Cityscapes dataset. Please download the Cityscapes dataset from [https://cityscapes-dataset.com](https://cityscapes-dataset.com) and use the script `./datasets/prepare_cityscapes_dataset.py`.
9
+
10
+ Please cite the CycleGAN paper if you use the following datasets. [[Citation](../datasets/bibtex/cyclegan.tex)]
11
+ - `maps`: 1096 training images scraped from Google Maps.
12
+ - `horse2zebra`: 939 horse images and 1177 zebra images downloaded from [ImageNet](http://www.image-net.org) using keywords `wild horse` and `zebra`
13
+ - `apple2orange`: 996 apple images and 1020 orange images downloaded from [ImageNet](http://www.image-net.org) using keywords `apple` and `navel orange`.
14
+ - `summer2winter_yosemite`: 1273 summer Yosemite images and 854 winter Yosemite images were downloaded using Flickr API. See more details in our paper.
15
+ - `monet2photo`, `vangogh2photo`, `ukiyoe2photo`, `cezanne2photo`: The art images were downloaded from [Wikiart](https://www.wikiart.org/). The real photos are downloaded from Flickr using the combination of the tags *landscape* and *landscapephotography*. The training set size of each class is Monet:1074, Cezanne:584, Van Gogh:401, Ukiyo-e:1433, Photographs:6853.
16
+ - `iphone2dslr_flower`: both classes of images were downlaoded from Flickr. The training set size of each class is iPhone:1813, DSLR:3316. See more details in our paper.
17
+
18
+ To train a model on your own datasets, you need to create a data folder with two subdirectories `trainA` and `trainB` that contain images from domain A and B. You can test your model on your training set by setting `--phase train` in `test.py`. You can also create subdirectories `testA` and `testB` if you have test data.
19
+
20
+ You should **not** expect our method to work on just any random combination of input and output datasets (e.g. `cats<->keyboards`). From our experiments, we find it works better if two datasets share similar visual content. For example, `landscape painting<->landscape photographs` works much better than `portrait painting <-> landscape photographs`. `zebras<->horses` achieves compelling results while `cats<->dogs` completely fails.
21
+
22
+ ### pix2pix datasets
23
+ Download the pix2pix datasets using the following script. Some of the datasets are collected by other researchers. Please cite their papers if you use the data.
24
+ ```bash
25
+ bash ./datasets/download_pix2pix_dataset.sh dataset_name
26
+ ```
27
+ - `facades`: 400 images from [CMP Facades dataset](http://cmp.felk.cvut.cz/~tylecr1/facade). [[Citation](../datasets/bibtex/facades.tex)]
28
+ - `cityscapes`: 2975 images from the [Cityscapes training set](https://www.cityscapes-dataset.com). [[Citation](../datasets/bibtex/cityscapes.tex)]
29
+ - `maps`: 1096 training images scraped from Google Maps
30
+ - `edges2shoes`: 50k training images from [UT Zappos50K dataset](http://vision.cs.utexas.edu/projects/finegrained/utzap50k). Edges are computed by [HED](https://github.com/s9xie/hed) edge detector + post-processing. [[Citation](datasets/bibtex/shoes.tex)]
31
+ - `edges2handbags`: 137K Amazon Handbag images from [iGAN project](https://github.com/junyanz/iGAN). Edges are computed by [HED](https://github.com/s9xie/hed) edge detector + post-processing. [[Citation](datasets/bibtex/handbags.tex)]
32
+ - `night2day`: around 20K natural scene images from [Transient Attributes dataset](http://transattr.cs.brown.edu/) [[Citation](datasets/bibtex/transattr.tex)]. To train a `day2night` pix2pix model, you need to add `--direction BtoA`.
33
+
34
+ We provide a python script to generate pix2pix training data in the form of pairs of images {A,B}, where A and B are two different depictions of the same underlying scene. For example, these might be pairs {label map, photo} or {bw image, color image}. Then we can learn to translate A to B or B to A:
35
+
36
+ Create folder `/path/to/data` with subfolders `A` and `B`. `A` and `B` should each have their own subfolders `train`, `val`, `test`, etc. In `/path/to/data/A/train`, put training images in style A. In `/path/to/data/B/train`, put the corresponding images in style B. Repeat same for other data splits (`val`, `test`, etc).
37
+
38
+ Corresponding images in a pair {A,B} must be the same size and have the same filename, e.g., `/path/to/data/A/train/1.jpg` is considered to correspond to `/path/to/data/B/train/1.jpg`.
39
+
40
+ Once the data is formatted this way, call:
41
+ ```bash
42
+ python datasets/combine_A_and_B.py --fold_A /path/to/data/A --fold_B /path/to/data/B --fold_AB /path/to/data
43
+ ```
44
+
45
+ This will combine each pair of images (A,B) into a single image file, ready for training.
environment.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: contrastive-unpaired-translation
2
+ channels:
3
+ - pytorch
4
+ - defaults
5
+ dependencies:
6
+ - python=3.6
7
+ - pytorch=1.4.0
8
+ - scipy
9
+ - pip:
10
+ - dominate==2.4.0
11
+ - torchvision==0.5.0
12
+ - Pillow==6.1.0
13
+ - numpy==1.16.4
14
+ - visdom==0.1.8
15
+ - packaging
16
+ - GPUtil==1.4.0
experiments/__init__.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import importlib
3
+
4
+
5
+ def find_launcher_using_name(launcher_name):
6
+ # cur_dir = os.path.dirname(os.path.abspath(__file__))
7
+ # pythonfiles = glob.glob(cur_dir + '/**/*.py')
8
+ launcher_filename = "experiments.{}_launcher".format(launcher_name)
9
+ launcherlib = importlib.import_module(launcher_filename)
10
+
11
+ # In the file, the class called LauncherNameLauncher() will
12
+ # be instantiated. It has to be a subclass of BaseLauncher,
13
+ # and it is case-insensitive.
14
+ launcher = None
15
+ target_launcher_name = launcher_name.replace('_', '') + 'launcher'
16
+ for name, cls in launcherlib.__dict__.items():
17
+ if name.lower() == target_launcher_name.lower():
18
+ launcher = cls
19
+
20
+ if launcher is None:
21
+ raise ValueError("In %s.py, there should be a subclass of BaseLauncher "
22
+ "with class name that matches %s in lowercase." %
23
+ (launcher_filename, target_launcher_name))
24
+
25
+ return launcher
26
+
27
+
28
+ if __name__ == "__main__":
29
+ import sys
30
+ import pickle
31
+
32
+ assert len(sys.argv) >= 3
33
+
34
+ name = sys.argv[1]
35
+ Launcher = find_launcher_using_name(name)
36
+
37
+ cache = "/tmp/tmux_launcher/{}".format(name)
38
+ if os.path.isfile(cache):
39
+ instance = pickle.load(open(cache, 'r'))
40
+ else:
41
+ instance = Launcher()
42
+
43
+ cmd = sys.argv[2]
44
+ if cmd == "launch":
45
+ instance.launch()
46
+ elif cmd == "stop":
47
+ instance.stop()
48
+ elif cmd == "send":
49
+ expid = int(sys.argv[3])
50
+ cmd = int(sys.argv[4])
51
+ instance.send_command(expid, cmd)
52
+
53
+ os.makedirs("/tmp/tmux_launcher/", exist_ok=True)
54
+ pickle.dump(instance, open(cache, 'w'))
experiments/__main__.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import importlib
3
+
4
+
5
+ def find_launcher_using_name(launcher_name):
6
+ # cur_dir = os.path.dirname(os.path.abspath(__file__))
7
+ # pythonfiles = glob.glob(cur_dir + '/**/*.py')
8
+ launcher_filename = "experiments.{}_launcher".format(launcher_name)
9
+ launcherlib = importlib.import_module(launcher_filename)
10
+
11
+ # In the file, the class called LauncherNameLauncher() will
12
+ # be instantiated. It has to be a subclass of BaseLauncher,
13
+ # and it is case-insensitive.
14
+ launcher = None
15
+ # target_launcher_name = launcher_name.replace('_', '') + 'launcher'
16
+ for name, cls in launcherlib.__dict__.items():
17
+ if name.lower() == "launcher":
18
+ launcher = cls
19
+
20
+ if launcher is None:
21
+ raise ValueError("In %s.py, there should be a class named Launcher")
22
+
23
+ return launcher
24
+
25
+
26
+ if __name__ == "__main__":
27
+ import argparse
28
+
29
+ parser = argparse.ArgumentParser()
30
+ parser.add_argument('name')
31
+ parser.add_argument('cmd')
32
+ parser.add_argument('id', nargs='+', type=str)
33
+ parser.add_argument('--mode', default=None)
34
+ parser.add_argument('--which_epoch', default=None)
35
+ parser.add_argument('--continue_train', action='store_true')
36
+ parser.add_argument('--subdir', default='')
37
+ parser.add_argument('--title', default='')
38
+ parser.add_argument('--gpu_id', default=None, type=int)
39
+ parser.add_argument('--phase', default='test')
40
+
41
+ opt = parser.parse_args()
42
+
43
+ name = opt.name
44
+ Launcher = find_launcher_using_name(name)
45
+
46
+ instance = Launcher()
47
+
48
+ cmd = opt.cmd
49
+ ids = 'all' if 'all' in opt.id else [int(i) for i in opt.id]
50
+ if cmd == "launch":
51
+ instance.launch(ids, continue_train=opt.continue_train)
52
+ elif cmd == "stop":
53
+ instance.stop()
54
+ elif cmd == "send":
55
+ assert False
56
+ elif cmd == "close":
57
+ instance.close()
58
+ elif cmd == "dry":
59
+ instance.dry()
60
+ elif cmd == "relaunch":
61
+ instance.close()
62
+ instance.launch(ids, continue_train=opt.continue_train)
63
+ elif cmd == "run" or cmd == "train":
64
+ assert len(ids) == 1, '%s is invalid for run command' % (' '.join(opt.id))
65
+ expid = ids[0]
66
+ instance.run_command(instance.commands(), expid,
67
+ continue_train=opt.continue_train,
68
+ gpu_id=opt.gpu_id)
69
+ elif cmd == 'launch_test':
70
+ instance.launch(ids, test=True)
71
+ elif cmd == "run_test" or cmd == "test":
72
+ test_commands = instance.test_commands()
73
+ if ids == "all":
74
+ ids = list(range(len(test_commands)))
75
+ for expid in ids:
76
+ instance.run_command(test_commands, expid, opt.which_epoch,
77
+ gpu_id=opt.gpu_id)
78
+ if expid < len(ids) - 1:
79
+ os.system("sleep 5s")
80
+ elif cmd == "print_names":
81
+ instance.print_names(ids, test=False)
82
+ elif cmd == "print_test_names":
83
+ instance.print_names(ids, test=True)
84
+ elif cmd == "create_comparison_html":
85
+ instance.create_comparison_html(name, ids, opt.subdir, opt.title, opt.phase)
86
+ else:
87
+ raise ValueError("Command not recognized")
experiments/grumpifycat_launcher.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .tmux_launcher import Options, TmuxLauncher
2
+
3
+
4
+ class Launcher(TmuxLauncher):
5
+ def common_options(self):
6
+ return [
7
+ # Command 0
8
+ Options(
9
+ dataroot="./datasets/grumpifycat",
10
+ name="grumpifycat_CUT",
11
+ CUT_mode="CUT"
12
+ ),
13
+
14
+ # Command 1
15
+ Options(
16
+ dataroot="./datasets/grumpifycat",
17
+ name="grumpifycat_FastCUT",
18
+ CUT_mode="FastCUT",
19
+ )
20
+ ]
21
+
22
+ def commands(self):
23
+ return ["python train.py " + str(opt) for opt in self.common_options()]
24
+
25
+ def test_commands(self):
26
+ # RussianBlue -> Grumpy Cats dataset does not have test split.
27
+ # Therefore, let's set the test split to be the "train" set.
28
+ return ["python test.py " + str(opt.set(phase='train')) for opt in self.common_options()]
experiments/placeholder_launcher.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .tmux_launcher import Options, TmuxLauncher
2
+
3
+
4
+ class Launcher(TmuxLauncher):
5
+
6
+ # List of training commands
7
+ def commands(self):
8
+ opt = Options()
9
+
10
+ # common options for all training sessions defined in this launcher
11
+ opt.set(dataroot="~/datasets/cityscapes/", # specify --dataroot option here
12
+ model="contrastive_cycle_gan",
13
+ pool_size=0,
14
+ no_dropout="",
15
+ init_type="xavier",
16
+ batch_size=1,
17
+ display_freq=400,
18
+ evaluation_metrics="fid,cityscapes",
19
+ evaluation_freq=10000,
20
+ direction="BtoA",
21
+ use_recommended_options="",
22
+ nce_idt_freq=0.1,
23
+ )
24
+
25
+ # Specify individual options here
26
+ commands = [
27
+
28
+ # first command.
29
+ # This command can be run using python -m experiments placeholder run 0
30
+ # It will output python train.py [OPTIONS], where OPTIONS are everything defined in the variable opt
31
+ "python train.py " + str(opt.clone().set(
32
+ name="cityscapes_placeholder_noidt", # name of experiments
33
+ nce_idt=False,
34
+ )),
35
+
36
+ # second command.
37
+ # This command can be run using python -m experiments placeholder run 1
38
+ # It removes the option --nce_idt_freq 0.1 that was defined by our common options
39
+ "python train.py " + str(opt.clone().set(
40
+ name="cityscapes_placeholder_singlelayer",
41
+ nce_layers="16",
42
+ ).remove("nce_idt_freq")),
43
+
44
+
45
+ # third command that performs multigpu training
46
+ # This command can be run using python -m experiments placeholder run 2
47
+ "python train.py " + str(opt.clone().set(
48
+ name="cityscapes_placeholder_multigpu",
49
+ nce_layers="16",
50
+ batch_size=4,
51
+ gpu_ids="0,1",
52
+ )),
53
+
54
+ ]
55
+
56
+ return commands
57
+
58
+ # This is the command used for testing.
59
+ # They can be run using python -m experiments placeholder run_test $i
60
+ def test_commands(self):
61
+ opt = Options()
62
+ opt.set(dataroot="~/datasets/cityscapes_unaligned/cityscapes",
63
+ model="contrastive_cycle_gan",
64
+ no_dropout="",
65
+ init_type="xavier",
66
+ batch_size=1,
67
+ direction="BtoA",
68
+ epoch=40,
69
+ phase='train',
70
+ evaluation_metrics="fid",
71
+ )
72
+
73
+ commands = [
74
+ "python test.py " + str(opt.clone().set(
75
+ name="cityscapes_nce",
76
+ nce_layers="0,8,16",
77
+ direction="BtoA",
78
+ )),
79
+ ]
80
+
81
+ return commands
experiments/pretrained_launcher.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .tmux_launcher import Options, TmuxLauncher
2
+
3
+
4
+ class Launcher(TmuxLauncher):
5
+ def common_options(self):
6
+ return [
7
+ # Command 0
8
+ Options(
9
+ # NOTE: download the resized (and compressed) val set from
10
+ # http://efrosgans.eecs.berkeley.edu/CUT/datasets/cityscapes_val_for_CUT.tar
11
+ dataroot="datasets/cityscapes/cityscapes_val/",
12
+ direction="BtoA",
13
+ phase="val",
14
+ name="cityscapes_cut_pretrained",
15
+ CUT_mode="CUT",
16
+ ),
17
+
18
+ # Command 1
19
+ Options(
20
+ dataroot="./datasets/cityscapes_unaligned/cityscapes/",
21
+ direction="BtoA",
22
+ name="cityscapes_fastcut_pretrained",
23
+ CUT_mode="FastCUT",
24
+ ),
25
+
26
+ # Command 2
27
+ Options(
28
+ dataroot="./datasets/horse2zebra/",
29
+ name="horse2zebra_cut_pretrained",
30
+ CUT_mode="CUT"
31
+ ),
32
+
33
+ # Command 3
34
+ Options(
35
+ dataroot="./datasets/horse2zebra/",
36
+ name="horse2zebra_fastcut_pretrained",
37
+ CUT_mode="FastCUT",
38
+ ),
39
+
40
+ # Command 4
41
+ Options(
42
+ dataroot="./datasets/afhq/cat2dog/",
43
+ name="cat2dog_cut_pretrained",
44
+ CUT_mode="CUT"
45
+ ),
46
+
47
+ # Command 5
48
+ Options(
49
+ dataroot="./datasets/afhq/cat2dog/",
50
+ name="cat2dog_fastcut_pretrained",
51
+ CUT_mode="FastCUT",
52
+ ),
53
+
54
+
55
+ ]
56
+
57
+ def commands(self):
58
+ return ["python train.py " + str(opt) for opt in self.common_options()]
59
+
60
+ def test_commands(self):
61
+ return ["python test.py " + str(opt.set(num_test=500)) for opt in self.common_options()]
experiments/singleimage_launcher.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .tmux_launcher import Options, TmuxLauncher
2
+
3
+
4
+ class Launcher(TmuxLauncher):
5
+ def common_options(self):
6
+ return [
7
+ Options(
8
+ name="singleimage_monet_etretat",
9
+ dataroot="./datasets/single_image_monet_etretat",
10
+ model="sincut"
11
+ )
12
+ ]
13
+
14
+ def commands(self):
15
+ return ["python train.py " + str(opt) for opt in self.common_options()]
16
+
17
+ def test_commands(self):
18
+ return ["python test.py " + str(opt) for opt in self.common_options()]
experiments/tmux_launcher.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ experiment launcher using tmux panes
3
+ """
4
+ import os
5
+ import math
6
+ import GPUtil
7
+ import re
8
+
9
+ available_gpu_devices = None
10
+
11
+
12
+ class Options():
13
+ def __init__(self, *args, **kwargs):
14
+ self.args = []
15
+ self.kvs = {"gpu_ids": "0"}
16
+ self.set(*args, **kwargs)
17
+
18
+ def set(self, *args, **kwargs):
19
+ for a in args:
20
+ self.args.append(a)
21
+ for k, v in kwargs.items():
22
+ self.kvs[k] = v
23
+
24
+ return self
25
+
26
+ def remove(self, *args):
27
+ for a in args:
28
+ if a in self.args:
29
+ self.args.remove(a)
30
+ if a in self.kvs:
31
+ del self.kvs[a]
32
+
33
+ return self
34
+
35
+ def update(self, opt):
36
+ self.args += opt.args
37
+ self.kvs.update(opt.kvs)
38
+ return self
39
+
40
+ def __str__(self):
41
+ final = " ".join(self.args)
42
+ for k, v in self.kvs.items():
43
+ final += " --{} {}".format(k, v)
44
+
45
+ return final
46
+
47
+ def clone(self):
48
+ opt = Options()
49
+ opt.args = self.args.copy()
50
+ opt.kvs = self.kvs.copy()
51
+ return opt
52
+
53
+
54
+ def grab_pattern(pattern, text):
55
+ found = re.search(pattern, text)
56
+ if found is not None:
57
+ return found[1]
58
+ else:
59
+ None
60
+
61
+
62
+ # http://code.activestate.com/recipes/252177-find-the-common-beginning-in-a-list-of-strings/
63
+ def findcommonstart(strlist):
64
+ prefix_len = ([min([x[0] == elem for elem in x])
65
+ for x in zip(*strlist)] + [0]).index(0)
66
+ prefix_len = max(1, prefix_len - 4)
67
+ return strlist[0][:prefix_len]
68
+
69
+
70
+ class TmuxLauncher():
71
+ def __init__(self):
72
+ super().__init__()
73
+ self.tmux_prepared = False
74
+
75
+ def prepare_tmux_panes(self, num_experiments, dry=False):
76
+ self.pane_per_window = 1
77
+ self.n_windows = int(math.ceil(num_experiments / self.pane_per_window))
78
+ print('preparing {} tmux panes'.format(num_experiments))
79
+ for w in range(self.n_windows):
80
+ if dry:
81
+ continue
82
+ window_name = "experiments_{}".format(w)
83
+ os.system("tmux new-window -n {}".format(window_name))
84
+ self.tmux_prepared = True
85
+
86
+ def refine_command(self, command, which_epoch, continue_train, gpu_id=None):
87
+ command = str(command)
88
+ if "--gpu_ids" in command:
89
+ gpu_ids = re.search(r'--gpu_ids ([\d,?]+)', command)[1]
90
+ else:
91
+ gpu_ids = "0"
92
+
93
+ gpu_ids = gpu_ids.split(",")
94
+ num_gpus = len(gpu_ids)
95
+ global available_gpu_devices
96
+ if available_gpu_devices is None and gpu_id is None:
97
+ available_gpu_devices = [str(g) for g in GPUtil.getAvailable(limit=8, maxMemory=0.5)]
98
+ if gpu_id is not None:
99
+ available_gpu_devices = [i for i in str(gpu_id)]
100
+ if len(available_gpu_devices) < num_gpus:
101
+ raise ValueError("{} GPU(s) required for the command {} is not available".format(num_gpus, command))
102
+ active_devices = ",".join(available_gpu_devices[:num_gpus])
103
+ if which_epoch is not None:
104
+ which_epoch = " --epoch %s " % which_epoch
105
+ else:
106
+ which_epoch = ""
107
+ command = "CUDA_VISIBLE_DEVICES={} {} {}".format(active_devices, command, which_epoch)
108
+ if continue_train:
109
+ command += " --continue_train "
110
+
111
+ # available_gpu_devices = [str(g) for g in GPUtil.getAvailable(limit=8, maxMemory=0.8)]
112
+ available_gpu_devices = available_gpu_devices[num_gpus:]
113
+
114
+ return command
115
+
116
+ def send_command(self, exp_id, command, dry=False, continue_train=False):
117
+ command = self.refine_command(command, None, continue_train=continue_train)
118
+ pane_name = "experiments_{windowid}.{paneid}".format(windowid=exp_id // self.pane_per_window,
119
+ paneid=exp_id % self.pane_per_window)
120
+ if dry is False:
121
+ os.system("tmux send-keys -t {} \"{}\" Enter".format(pane_name, command))
122
+
123
+ print("{}: {}".format(pane_name, command))
124
+ return pane_name
125
+
126
+ def run_command(self, command, ids, which_epoch=None, continue_train=False, gpu_id=None):
127
+ if type(command) is not list:
128
+ command = [command]
129
+ if ids is None:
130
+ ids = list(range(len(command)))
131
+ if type(ids) is not list:
132
+ ids = [ids]
133
+
134
+ for id in ids:
135
+ this_command = command[id]
136
+ refined_command = self.refine_command(this_command, which_epoch, continue_train=continue_train, gpu_id=gpu_id)
137
+ print(refined_command)
138
+ os.system(refined_command)
139
+
140
+ def commands(self):
141
+ return []
142
+
143
+ def launch(self, ids, test=False, dry=False, continue_train=False):
144
+ commands = self.test_commands() if test else self.commands()
145
+ if type(ids) is list:
146
+ commands = [commands[i] for i in ids]
147
+ if not self.tmux_prepared:
148
+ self.prepare_tmux_panes(len(commands), dry)
149
+ assert self.tmux_prepared
150
+
151
+ for i, command in enumerate(commands):
152
+ self.send_command(i, command, dry, continue_train=continue_train)
153
+
154
+ def dry(self):
155
+ self.launch(dry=True)
156
+
157
+ def stop(self):
158
+ num_experiments = len(self.commands())
159
+ self.pane_per_window = 4
160
+ self.n_windows = int(math.ceil(num_experiments / self.pane_per_window))
161
+ for w in range(self.n_windows):
162
+ window_name = "experiments_{}".format(w)
163
+ for i in range(self.pane_per_window):
164
+ os.system("tmux send-keys -t {window}.{pane} C-c".format(window=window_name, pane=i))
165
+
166
+ def close(self):
167
+ num_experiments = len(self.commands())
168
+ self.pane_per_window = 1
169
+ self.n_windows = int(math.ceil(num_experiments / self.pane_per_window))
170
+ for w in range(self.n_windows):
171
+ window_name = "experiments_{}".format(w)
172
+ os.system("tmux kill-window -t {}".format(window_name))
173
+
174
+ def print_names(self, ids, test=False):
175
+ if test:
176
+ cmds = self.test_commands()
177
+ else:
178
+ cmds = self.commands()
179
+ if type(ids) is list:
180
+ cmds = [cmds[i] for i in ids]
181
+
182
+ for cmdid, cmd in enumerate(cmds):
183
+ name = grab_pattern(r'--name ([^ ]+)', cmd)
184
+ print(name)
185
+
186
+ def create_comparison_html(self, expr_name, ids, subdir, title, phase):
187
+ cmds = self.test_commands()
188
+ if type(ids) is list:
189
+ cmds = [cmds[i] for i in ids]
190
+
191
+ no_easy_label = True
192
+ dirs = []
193
+ labels = []
194
+ for cmdid, cmd in enumerate(cmds):
195
+ name = grab_pattern(r'--name ([^ ]+)', cmd)
196
+ which_epoch = grab_pattern(r'--epoch ([^ ]+)', cmd)
197
+ if which_epoch is None:
198
+ which_epoch = "latest"
199
+ label = grab_pattern(r'--easy_label "([^"]+)"', cmd)
200
+ if label is None:
201
+ label = name
202
+ else:
203
+ no_easy_label = False
204
+ labels.append(label)
205
+ dir = "results/%s/%s_%s/%s/" % (name, phase, which_epoch, subdir)
206
+ dirs.append(dir)
207
+
208
+ commonprefix = findcommonstart(labels) if no_easy_label else ""
209
+ labels = ['"' + label[len(commonprefix):] + '"' for label in labels]
210
+ dirstr = ' '.join(dirs)
211
+ labelstr = ' '.join(labels)
212
+
213
+ command = "python ~/tools/html.py --web_dir_prefix results/comparison_ --name %s --dirs %s --labels %s --image_width 256" % (expr_name + '_' + title, dirstr, labelstr)
214
+ print(command)
215
+ os.system(command)
imgs/gif_cut.gif ADDED

Git LFS Details

  • SHA256: 1855628146188891cfad08ed48c33e42c4495611c5918d8ea827fadd38825aa8
  • Pointer size: 132 Bytes
  • Size of remote file: 4.48 MB
imgs/grumpycat.jpg ADDED
imgs/horse2zebra_comparison.jpg ADDED

Git LFS Details

  • SHA256: 2485875664fbd61cd0199e6422e32c00157782745e6417997eb1b90add2a8d46
  • Pointer size: 132 Bytes
  • Size of remote file: 3.13 MB
imgs/paris.jpg ADDED
imgs/patchnce.gif ADDED

Git LFS Details

  • SHA256: a43d2053c824d66702edd87607f84dcc8febfe4d509c751cb6c8c2bbd6b6de7c
  • Pointer size: 132 Bytes
  • Size of remote file: 1.06 MB
imgs/results.gif ADDED

Git LFS Details

  • SHA256: 600346609917eadb196046ed151838b7333f0e8f4d87388ebb9fbe74a9e8de21
  • Pointer size: 132 Bytes
  • Size of remote file: 4.95 MB
imgs/singleimage.gif ADDED

Git LFS Details

  • SHA256: 39ebbbeaf3229f4a35d7d33eadde52dc2e0e867ebbbb961a0993e5220a2cf0fc
  • Pointer size: 132 Bytes
  • Size of remote file: 2.27 MB
models/__init__.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This package contains modules related to objective functions, optimizations, and network architectures.
2
+
3
+ To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.
4
+ You need to implement the following five functions:
5
+ -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
6
+ -- <set_input>: unpack data from dataset and apply preprocessing.
7
+ -- <forward>: produce intermediate results.
8
+ -- <optimize_parameters>: calculate loss, gradients, and update network weights.
9
+ -- <modify_commandline_options>: (optionally) add model-specific options and set default options.
10
+
11
+ In the function <__init__>, you need to define four lists:
12
+ -- self.loss_names (str list): specify the training losses that you want to plot and save.
13
+ -- self.model_names (str list): define networks used in our training.
14
+ -- self.visual_names (str list): specify the images that you want to display and save.
15
+ -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.
16
+
17
+ Now you can use the model class by specifying flag '--model dummy'.
18
+ See our template model class 'template_model.py' for more details.
19
+ """
20
+
21
+ import importlib
22
+ from models.base_model import BaseModel
23
+
24
+
25
+ def find_model_using_name(model_name):
26
+ """Import the module "models/[model_name]_model.py".
27
+
28
+ In the file, the class called DatasetNameModel() will
29
+ be instantiated. It has to be a subclass of BaseModel,
30
+ and it is case-insensitive.
31
+ """
32
+ model_filename = "models." + model_name + "_model"
33
+ modellib = importlib.import_module(model_filename)
34
+ model = None
35
+ target_model_name = model_name.replace('_', '') + 'model'
36
+ for name, cls in modellib.__dict__.items():
37
+ if name.lower() == target_model_name.lower() \
38
+ and issubclass(cls, BaseModel):
39
+ model = cls
40
+
41
+ if model is None:
42
+ print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
43
+ exit(0)
44
+
45
+ return model
46
+
47
+
48
+ def get_option_setter(model_name):
49
+ """Return the static method <modify_commandline_options> of the model class."""
50
+ model_class = find_model_using_name(model_name)
51
+ return model_class.modify_commandline_options
52
+
53
+
54
+ def create_model(opt):
55
+ """Create a model given the option.
56
+
57
+ This function warps the class CustomDatasetDataLoader.
58
+ This is the main interface between this package and 'train.py'/'test.py'
59
+
60
+ Example:
61
+ >>> from models import create_model
62
+ >>> model = create_model(opt)
63
+ """
64
+ model = find_model_using_name(opt.model)
65
+ instance = model(opt)
66
+ print("model [%s] was created" % type(instance).__name__)
67
+ return instance
models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.23 kB). View file
 
models/__pycache__/base_model.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
models/__pycache__/cut_model.cpython-310.pyc ADDED
Binary file (8.13 kB). View file
 
models/__pycache__/cycle_gan_model.cpython-310.pyc ADDED
Binary file (8.4 kB). View file
 
models/__pycache__/networks.cpython-310.pyc ADDED
Binary file (47.9 kB). View file
 
models/__pycache__/patchnce.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
models/__pycache__/stylegan_networks.cpython-310.pyc ADDED
Binary file (22.3 kB). View file
 
models/base_model.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from collections import OrderedDict
4
+ from abc import ABC, abstractmethod
5
+ from . import networks
6
+
7
+
8
+ class BaseModel(ABC):
9
+ """This class is an abstract base class (ABC) for models.
10
+ To create a subclass, you need to implement the following five functions:
11
+ -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
12
+ -- <set_input>: unpack data from dataset and apply preprocessing.
13
+ -- <forward>: produce intermediate results.
14
+ -- <optimize_parameters>: calculate losses, gradients, and update network weights.
15
+ -- <modify_commandline_options>: (optionally) add model-specific options and set default options.
16
+ """
17
+
18
+ def __init__(self, opt):
19
+ """Initialize the BaseModel class.
20
+
21
+ Parameters:
22
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
23
+
24
+ When creating your custom class, you need to implement your own initialization.
25
+ In this fucntion, you should first call <BaseModel.__init__(self, opt)>
26
+ Then, you need to define four lists:
27
+ -- self.loss_names (str list): specify the training losses that you want to plot and save.
28
+ -- self.model_names (str list): specify the images that you want to display and save.
29
+ -- self.visual_names (str list): define networks used in our training.
30
+ -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
31
+ """
32
+ self.opt = opt
33
+ self.gpu_ids = opt.gpu_ids
34
+ self.isTrain = opt.isTrain
35
+ self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
36
+ self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
37
+ if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
38
+ torch.backends.cudnn.benchmark = True
39
+ self.loss_names = []
40
+ self.model_names = []
41
+ self.visual_names = []
42
+ self.optimizers = []
43
+ self.image_paths = []
44
+ self.metric = 0 # used for learning rate policy 'plateau'
45
+
46
+ @staticmethod
47
+ def dict_grad_hook_factory(add_func=lambda x: x):
48
+ saved_dict = dict()
49
+
50
+ def hook_gen(name):
51
+ def grad_hook(grad):
52
+ saved_vals = add_func(grad)
53
+ saved_dict[name] = saved_vals
54
+ return grad_hook
55
+ return hook_gen, saved_dict
56
+
57
+ @staticmethod
58
+ def modify_commandline_options(parser, is_train):
59
+ """Add new model-specific options, and rewrite default values for existing options.
60
+
61
+ Parameters:
62
+ parser -- original option parser
63
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
64
+
65
+ Returns:
66
+ the modified parser.
67
+ """
68
+ return parser
69
+
70
+ @abstractmethod
71
+ def set_input(self, input):
72
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
73
+
74
+ Parameters:
75
+ input (dict): includes the data itself and its metadata information.
76
+ """
77
+ pass
78
+
79
+ @abstractmethod
80
+ def forward(self):
81
+ """Run forward pass; called by both functions <optimize_parameters> and <test>."""
82
+ pass
83
+
84
+ @abstractmethod
85
+ def optimize_parameters(self):
86
+ """Calculate losses, gradients, and update network weights; called in every training iteration"""
87
+ pass
88
+
89
+ def setup(self, opt):
90
+ """Load and print networks; create schedulers
91
+
92
+ Parameters:
93
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
94
+ """
95
+ if self.isTrain:
96
+ self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
97
+ if not self.isTrain or opt.continue_train:
98
+ load_suffix = opt.epoch
99
+ self.load_networks(load_suffix)
100
+
101
+ self.print_networks(opt.verbose)
102
+
103
+ def parallelize(self):
104
+ for name in self.model_names:
105
+ if isinstance(name, str):
106
+ net = getattr(self, 'net' + name)
107
+ setattr(self, 'net' + name, torch.nn.DataParallel(net, self.opt.gpu_ids))
108
+
109
+ def data_dependent_initialize(self, data):
110
+ pass
111
+
112
+ def eval(self):
113
+ """Make models eval mode during test time"""
114
+ for name in self.model_names:
115
+ if isinstance(name, str):
116
+ net = getattr(self, 'net' + name)
117
+ net.eval()
118
+
119
+ def test(self):
120
+ """Forward function used in test time.
121
+
122
+ This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
123
+ It also calls <compute_visuals> to produce additional visualization results
124
+ """
125
+ with torch.no_grad():
126
+ self.forward()
127
+ self.compute_visuals()
128
+
129
+ def compute_visuals(self):
130
+ """Calculate additional output images for visdom and HTML visualization"""
131
+ pass
132
+
133
+ def get_image_paths(self):
134
+ """ Return image paths that are used to load current data"""
135
+ return self.image_paths
136
+
137
+ def update_learning_rate(self):
138
+ """Update learning rates for all the networks; called at the end of every epoch"""
139
+ for scheduler in self.schedulers:
140
+ if self.opt.lr_policy == 'plateau':
141
+ scheduler.step(self.metric)
142
+ else:
143
+ scheduler.step()
144
+
145
+ lr = self.optimizers[0].param_groups[0]['lr']
146
+ print('learning rate = %.7f' % lr)
147
+
148
+ def get_current_visuals(self):
149
+ """Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
150
+ visual_ret = OrderedDict()
151
+ for name in self.visual_names:
152
+ if isinstance(name, str):
153
+ visual_ret[name] = getattr(self, name)
154
+ return visual_ret
155
+
156
+ def get_current_losses(self):
157
+ """Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
158
+ errors_ret = OrderedDict()
159
+ for name in self.loss_names:
160
+ if isinstance(name, str):
161
+ errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
162
+ return errors_ret
163
+
164
+ def save_networks(self, epoch):
165
+ """Save all the networks to the disk.
166
+
167
+ Parameters:
168
+ epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
169
+ """
170
+ for name in self.model_names:
171
+ if isinstance(name, str):
172
+ save_filename = '%s_net_%s.pth' % (epoch, name)
173
+ save_path = os.path.join(self.save_dir, save_filename)
174
+ net = getattr(self, 'net' + name)
175
+
176
+ if len(self.gpu_ids) > 0 and torch.cuda.is_available():
177
+ torch.save(net.module.cpu().state_dict(), save_path)
178
+ net.cuda(self.gpu_ids[0])
179
+ else:
180
+ torch.save(net.cpu().state_dict(), save_path)
181
+
182
+ def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
183
+ """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
184
+ key = keys[i]
185
+ if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
186
+ if module.__class__.__name__.startswith('InstanceNorm') and \
187
+ (key == 'running_mean' or key == 'running_var'):
188
+ if getattr(module, key) is None:
189
+ state_dict.pop('.'.join(keys))
190
+ if module.__class__.__name__.startswith('InstanceNorm') and \
191
+ (key == 'num_batches_tracked'):
192
+ state_dict.pop('.'.join(keys))
193
+ else:
194
+ self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
195
+
196
+ def load_networks(self, epoch):
197
+ """Load all the networks from the disk.
198
+
199
+ Parameters:
200
+ epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
201
+ """
202
+ for name in self.model_names:
203
+ if isinstance(name, str):
204
+ load_filename = '%s_net_%s.pth' % (epoch, name)
205
+ if self.opt.isTrain and self.opt.pretrained_name is not None:
206
+ load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name)
207
+ else:
208
+ load_dir = self.save_dir
209
+
210
+ load_path = os.path.join(load_dir, load_filename)
211
+ net = getattr(self, 'net' + name)
212
+ if isinstance(net, torch.nn.DataParallel):
213
+ net = net.module
214
+ print('loading the model from %s' % load_path)
215
+ # if you are using PyTorch newer than 0.4 (e.g., built from
216
+ # GitHub source), you can remove str() on self.device
217
+ state_dict = torch.load(load_path, map_location=str(self.device))
218
+ if hasattr(state_dict, '_metadata'):
219
+ del state_dict._metadata
220
+
221
+ # patch InstanceNorm checkpoints prior to 0.4
222
+ # for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
223
+ # self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
224
+ net.load_state_dict(state_dict)
225
+
226
+ def print_networks(self, verbose):
227
+ """Print the total number of parameters in the network and (if verbose) network architecture
228
+
229
+ Parameters:
230
+ verbose (bool) -- if verbose: print the network architecture
231
+ """
232
+ print('---------- Networks initialized -------------')
233
+ for name in self.model_names:
234
+ if isinstance(name, str):
235
+ net = getattr(self, 'net' + name)
236
+ num_params = 0
237
+ for param in net.parameters():
238
+ num_params += param.numel()
239
+ if verbose:
240
+ print(net)
241
+ print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
242
+ print('-----------------------------------------------')
243
+
244
+ def set_requires_grad(self, nets, requires_grad=False):
245
+ """Set requies_grad=Fasle for all the networks to avoid unnecessary computations
246
+ Parameters:
247
+ nets (network list) -- a list of networks
248
+ requires_grad (bool) -- whether the networks require gradients or not
249
+ """
250
+ if not isinstance(nets, list):
251
+ nets = [nets]
252
+ for net in nets:
253
+ if net is not None:
254
+ for param in net.parameters():
255
+ param.requires_grad = requires_grad
256
+
257
+ def generate_visuals_for_evaluation(self, data, mode):
258
+ return {}
models/cut_model.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from .base_model import BaseModel
4
+ from . import networks
5
+ from .patchnce import PatchNCELoss
6
+ import util.util as util
7
+
8
+
9
+ class CUTModel(BaseModel):
10
+ """ This class implements CUT and FastCUT model, described in the paper
11
+ Contrastive Learning for Unpaired Image-to-Image Translation
12
+ Taesung Park, Alexei A. Efros, Richard Zhang, Jun-Yan Zhu
13
+ ECCV, 2020
14
+
15
+ The code borrows heavily from the PyTorch implementation of CycleGAN
16
+ https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
17
+ """
18
+ @staticmethod
19
+ def modify_commandline_options(parser, is_train=True):
20
+ """ Configures options specific for CUT model
21
+ """
22
+ parser.add_argument('--CUT_mode', type=str, default="CUT", choices='(CUT, cut, FastCUT, fastcut)')
23
+
24
+ parser.add_argument('--lambda_GAN', type=float, default=1.0, help='weight for GAN loss:GAN(G(X))')
25
+ parser.add_argument('--lambda_NCE', type=float, default=1.0, help='weight for NCE loss: NCE(G(X), X)')
26
+ parser.add_argument('--nce_idt', type=util.str2bool, nargs='?', const=True, default=False, help='use NCE loss for identity mapping: NCE(G(Y), Y))')
27
+ parser.add_argument('--nce_layers', type=str, default='0,4,8,12,16', help='compute NCE loss on which layers')
28
+ parser.add_argument('--nce_includes_all_negatives_from_minibatch',
29
+ type=util.str2bool, nargs='?', const=True, default=False,
30
+ help='(used for single image translation) If True, include the negatives from the other samples of the minibatch when computing the contrastive loss. Please see models/patchnce.py for more details.')
31
+ parser.add_argument('--netF', type=str, default='mlp_sample', choices=['sample', 'reshape', 'mlp_sample'], help='how to downsample the feature map')
32
+ parser.add_argument('--netF_nc', type=int, default=256)
33
+ parser.add_argument('--nce_T', type=float, default=0.07, help='temperature for NCE loss')
34
+ parser.add_argument('--num_patches', type=int, default=256, help='number of patches per layer')
35
+ parser.add_argument('--flip_equivariance',
36
+ type=util.str2bool, nargs='?', const=True, default=False,
37
+ help="Enforce flip-equivariance as additional regularization. It's used by FastCUT, but not CUT")
38
+
39
+ parser.set_defaults(pool_size=0) # no image pooling
40
+
41
+ opt, _ = parser.parse_known_args()
42
+
43
+ # Set default parameters for CUT and FastCUT
44
+ if opt.CUT_mode.lower() == "cut":
45
+ parser.set_defaults(nce_idt=True, lambda_NCE=1.0)
46
+ elif opt.CUT_mode.lower() == "fastcut":
47
+ parser.set_defaults(
48
+ nce_idt=False, lambda_NCE=10.0, flip_equivariance=True,
49
+ n_epochs=150, n_epochs_decay=50
50
+ )
51
+ else:
52
+ raise ValueError(opt.CUT_mode)
53
+
54
+ return parser
55
+
56
+ def __init__(self, opt):
57
+ BaseModel.__init__(self, opt)
58
+
59
+ # specify the training losses you want to print out.
60
+ # The training/test scripts will call <BaseModel.get_current_losses>
61
+ self.loss_names = ['G_GAN', 'D_real', 'D_fake', 'G', 'NCE']
62
+ self.visual_names = ['real_A', 'fake_B', 'real_B']
63
+ self.nce_layers = [int(i) for i in self.opt.nce_layers.split(',')]
64
+
65
+ if opt.nce_idt and self.isTrain:
66
+ self.loss_names += ['NCE_Y']
67
+ self.visual_names += ['idt_B']
68
+
69
+ if self.isTrain:
70
+ self.model_names = ['G', 'F', 'D']
71
+ else: # during test time, only load G
72
+ self.model_names = ['G']
73
+
74
+ # define networks (both generator and discriminator)
75
+ self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt)
76
+ self.netF = networks.define_F(opt.input_nc, opt.netF, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt)
77
+
78
+ if self.isTrain:
79
+ self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt)
80
+
81
+ # define loss functions
82
+ self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
83
+ self.criterionNCE = []
84
+
85
+ for nce_layer in self.nce_layers:
86
+ self.criterionNCE.append(PatchNCELoss(opt).to(self.device))
87
+
88
+ self.criterionIdt = torch.nn.L1Loss().to(self.device)
89
+ self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
90
+ self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
91
+ self.optimizers.append(self.optimizer_G)
92
+ self.optimizers.append(self.optimizer_D)
93
+
94
+ def data_dependent_initialize(self, data):
95
+ """
96
+ The feature network netF is defined in terms of the shape of the intermediate, extracted
97
+ features of the encoder portion of netG. Because of this, the weights of netF are
98
+ initialized at the first feedforward pass with some input images.
99
+ Please also see PatchSampleF.create_mlp(), which is called at the first forward() call.
100
+ """
101
+ bs_per_gpu = data["A"].size(0) // max(len(self.opt.gpu_ids), 1)
102
+ self.set_input(data)
103
+ self.real_A = self.real_A[:bs_per_gpu]
104
+ self.real_B = self.real_B[:bs_per_gpu]
105
+ self.forward() # compute fake images: G(A)
106
+ if self.opt.isTrain:
107
+ self.compute_D_loss().backward() # calculate gradients for D
108
+ self.compute_G_loss().backward() # calculate graidents for G
109
+ if self.opt.lambda_NCE > 0.0:
110
+ self.optimizer_F = torch.optim.Adam(self.netF.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, self.opt.beta2))
111
+ self.optimizers.append(self.optimizer_F)
112
+
113
+ def optimize_parameters(self):
114
+ # forward
115
+ self.forward()
116
+
117
+ # update D
118
+ self.set_requires_grad(self.netD, True)
119
+ self.optimizer_D.zero_grad()
120
+ self.loss_D = self.compute_D_loss()
121
+ self.loss_D.backward()
122
+ self.optimizer_D.step()
123
+
124
+ # update G
125
+ self.set_requires_grad(self.netD, False)
126
+ self.optimizer_G.zero_grad()
127
+ if self.opt.netF == 'mlp_sample':
128
+ self.optimizer_F.zero_grad()
129
+ self.loss_G = self.compute_G_loss()
130
+ self.loss_G.backward()
131
+ self.optimizer_G.step()
132
+ if self.opt.netF == 'mlp_sample':
133
+ self.optimizer_F.step()
134
+
135
+ def set_input(self, input):
136
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
137
+ Parameters:
138
+ input (dict): include the data itself and its metadata information.
139
+ The option 'direction' can be used to swap domain A and domain B.
140
+ """
141
+ AtoB = self.opt.direction == 'AtoB'
142
+ self.real_A = input['A' if AtoB else 'B'].to(self.device)
143
+ self.real_B = input['B' if AtoB else 'A'].to(self.device)
144
+ self.image_paths = input['A_paths' if AtoB else 'B_paths']
145
+
146
+ def forward(self):
147
+ """Run forward pass; called by both functions <optimize_parameters> and <test>."""
148
+ self.real = torch.cat((self.real_A, self.real_B), dim=0) if self.opt.nce_idt and self.opt.isTrain else self.real_A
149
+ if self.opt.flip_equivariance:
150
+ self.flipped_for_equivariance = self.opt.isTrain and (np.random.random() < 0.5)
151
+ if self.flipped_for_equivariance:
152
+ self.real = torch.flip(self.real, [3])
153
+
154
+ self.fake = self.netG(self.real)
155
+ self.fake_B = self.fake[:self.real_A.size(0)]
156
+ if self.opt.nce_idt:
157
+ self.idt_B = self.fake[self.real_A.size(0):]
158
+
159
+ def compute_D_loss(self):
160
+ """Calculate GAN loss for the discriminator"""
161
+ fake = self.fake_B.detach()
162
+ # Fake; stop backprop to the generator by detaching fake_B
163
+ pred_fake = self.netD(fake)
164
+ self.loss_D_fake = self.criterionGAN(pred_fake, False).mean()
165
+ # Real
166
+ self.pred_real = self.netD(self.real_B)
167
+ loss_D_real = self.criterionGAN(self.pred_real, True)
168
+ self.loss_D_real = loss_D_real.mean()
169
+
170
+ # combine loss and calculate gradients
171
+ self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
172
+ return self.loss_D
173
+
174
+ def compute_G_loss(self):
175
+ """Calculate GAN and NCE loss for the generator"""
176
+ fake = self.fake_B
177
+ # First, G(A) should fake the discriminator
178
+ if self.opt.lambda_GAN > 0.0:
179
+ pred_fake = self.netD(fake)
180
+ self.loss_G_GAN = self.criterionGAN(pred_fake, True).mean() * self.opt.lambda_GAN
181
+ else:
182
+ self.loss_G_GAN = 0.0
183
+
184
+ if self.opt.lambda_NCE > 0.0:
185
+ self.loss_NCE = self.calculate_NCE_loss(self.real_A, self.fake_B)
186
+ else:
187
+ self.loss_NCE, self.loss_NCE_bd = 0.0, 0.0
188
+
189
+ if self.opt.nce_idt and self.opt.lambda_NCE > 0.0:
190
+ self.loss_NCE_Y = self.calculate_NCE_loss(self.real_B, self.idt_B)
191
+ loss_NCE_both = (self.loss_NCE + self.loss_NCE_Y) * 0.5
192
+ else:
193
+ loss_NCE_both = self.loss_NCE
194
+
195
+ self.loss_G = self.loss_G_GAN + loss_NCE_both
196
+ return self.loss_G
197
+
198
+ def calculate_NCE_loss(self, src, tgt):
199
+ n_layers = len(self.nce_layers)
200
+ feat_q = self.netG(tgt, self.nce_layers, encode_only=True)
201
+
202
+ if self.opt.flip_equivariance and self.flipped_for_equivariance:
203
+ feat_q = [torch.flip(fq, [3]) for fq in feat_q]
204
+
205
+ feat_k = self.netG(src, self.nce_layers, encode_only=True)
206
+ feat_k_pool, sample_ids = self.netF(feat_k, self.opt.num_patches, None)
207
+ feat_q_pool, _ = self.netF(feat_q, self.opt.num_patches, sample_ids)
208
+
209
+ total_nce_loss = 0.0
210
+ for f_q, f_k, crit, nce_layer in zip(feat_q_pool, feat_k_pool, self.criterionNCE, self.nce_layers):
211
+ loss = crit(f_q, f_k) * self.opt.lambda_NCE
212
+ total_nce_loss += loss.mean()
213
+
214
+ return total_nce_loss / n_layers
models/cycle_gan_model.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import itertools
3
+ from util.image_pool import ImagePool
4
+ from .base_model import BaseModel
5
+ from . import networks
6
+
7
+ try:
8
+ from apex import amp
9
+ except ImportError as error:
10
+ print(error)
11
+
12
+
13
+ class CycleGANModel(BaseModel):
14
+ """
15
+ This class implements the CycleGAN model, for learning image-to-image translation without paired data.
16
+
17
+ The model training requires '--dataset_mode unaligned' dataset.
18
+ By default, it uses a '--netG resnet_9blocks' ResNet generator,
19
+ a '--netD basic' discriminator (PatchGAN introduced by pix2pix),
20
+ and a least-square GANs objective ('--gan_mode lsgan').
21
+
22
+ CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf
23
+ """
24
+
25
+ @staticmethod
26
+ def modify_commandline_options(parser, is_train=True):
27
+ """Add new dataset-specific options, and rewrite default values for existing options.
28
+
29
+ Parameters:
30
+ parser -- original option parser
31
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
32
+
33
+ Returns:
34
+ the modified parser.
35
+
36
+ For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
37
+ A (source domain), B (target domain).
38
+ Generators: G_A: A -> B; G_B: B -> A.
39
+ Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
40
+ Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
41
+ Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
42
+ Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
43
+ Dropout is not used in the original CycleGAN paper.
44
+ """
45
+ # parser.set_defaults(no_dropout=True, no_antialias=True, no_antialias_up=True) # default CycleGAN did not use dropout
46
+ # parser.set_defaults(no_dropout=True)
47
+ if is_train:
48
+ parser.add_argument(
49
+ "--lambda_A",
50
+ type=float,
51
+ default=10.0,
52
+ help="weight for cycle loss (A -> B -> A)",
53
+ )
54
+ parser.add_argument(
55
+ "--lambda_B",
56
+ type=float,
57
+ default=10.0,
58
+ help="weight for cycle loss (B -> A -> B)",
59
+ )
60
+ parser.add_argument(
61
+ "--lambda_identity",
62
+ type=float,
63
+ default=0.5,
64
+ help="use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1",
65
+ )
66
+
67
+ return parser
68
+
69
+ def __init__(self, opt):
70
+ """Initialize the CycleGAN class.
71
+
72
+ Parameters:
73
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
74
+ """
75
+ BaseModel.__init__(self, opt)
76
+ # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
77
+ self.loss_names = [
78
+ "D_A",
79
+ "G_A",
80
+ "cycle_A",
81
+ "idt_A",
82
+ "D_B",
83
+ "G_B",
84
+ "cycle_B",
85
+ "idt_B",
86
+ ]
87
+ # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
88
+ visual_names_A = ["real_A", "fake_B", "rec_A"]
89
+ visual_names_B = ["real_B", "fake_A", "rec_B"]
90
+ if (
91
+ self.isTrain and self.opt.lambda_identity > 0.0
92
+ ): # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
93
+ visual_names_A.append("idt_B")
94
+ visual_names_B.append("idt_A")
95
+
96
+ self.visual_names = (
97
+ visual_names_A + visual_names_B
98
+ ) # combine visualizations for A and B
99
+ # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
100
+ if self.isTrain:
101
+ self.model_names = ["G_A", "G_B", "D_A", "D_B"]
102
+ else: # during test time, only load Gs
103
+ self.model_names = ["G_A", "G_B"]
104
+
105
+ # define networks (both Generators and discriminators)
106
+ # The naming is different from those used in the paper.
107
+ # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
108
+ self.netG_A = networks.define_G(
109
+ opt.input_nc,
110
+ opt.output_nc,
111
+ opt.ngf,
112
+ opt.netG,
113
+ opt.normG,
114
+ not opt.no_dropout,
115
+ opt.init_type,
116
+ opt.init_gain,
117
+ opt.no_antialias,
118
+ opt.no_antialias_up,
119
+ self.gpu_ids,
120
+ opt=opt,
121
+ )
122
+ self.netG_B = networks.define_G(
123
+ opt.output_nc,
124
+ opt.input_nc,
125
+ opt.ngf,
126
+ opt.netG,
127
+ opt.normG,
128
+ not opt.no_dropout,
129
+ opt.init_type,
130
+ opt.init_gain,
131
+ opt.no_antialias,
132
+ opt.no_antialias_up,
133
+ self.gpu_ids,
134
+ opt=opt,
135
+ )
136
+
137
+ if self.isTrain: # define discriminators
138
+ self.netD_A = networks.define_D(
139
+ opt.output_nc,
140
+ opt.ndf,
141
+ opt.netD,
142
+ opt.n_layers_D,
143
+ opt.normD,
144
+ opt.init_type,
145
+ opt.init_gain,
146
+ opt.no_antialias,
147
+ self.gpu_ids,
148
+ opt=opt,
149
+ )
150
+ self.netD_B = networks.define_D(
151
+ opt.input_nc,
152
+ opt.ndf,
153
+ opt.netD,
154
+ opt.n_layers_D,
155
+ opt.normD,
156
+ opt.init_type,
157
+ opt.init_gain,
158
+ opt.no_antialias,
159
+ self.gpu_ids,
160
+ opt=opt,
161
+ )
162
+
163
+ if self.isTrain:
164
+ if (
165
+ opt.lambda_identity > 0.0
166
+ ): # only works when input and output images have the same number of channels
167
+ assert opt.input_nc == opt.output_nc
168
+ self.fake_A_pool = ImagePool(
169
+ opt.pool_size
170
+ ) # create image buffer to store previously generated images
171
+ self.fake_B_pool = ImagePool(
172
+ opt.pool_size
173
+ ) # create image buffer to store previously generated images
174
+ # define loss functions
175
+ self.criterionGAN = networks.GANLoss(opt.gan_mode).to(
176
+ self.device
177
+ ) # define GAN loss.
178
+ self.criterionCycle = torch.nn.L1Loss()
179
+ self.criterionIdt = torch.nn.L1Loss()
180
+ # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
181
+ self.optimizer_G = torch.optim.Adam(
182
+ itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
183
+ lr=opt.lr,
184
+ betas=(opt.beta1, 0.999),
185
+ )
186
+ self.optimizer_D = torch.optim.Adam(
187
+ itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),
188
+ lr=opt.lr,
189
+ betas=(opt.beta1, 0.999),
190
+ )
191
+ self.optimizers.append(self.optimizer_G)
192
+ self.optimizers.append(self.optimizer_D)
193
+
194
+ def set_input(self, input):
195
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
196
+
197
+ Parameters:
198
+ input (dict): include the data itself and its metadata information.
199
+
200
+ The option 'direction' can be used to swap domain A and domain B.
201
+ """
202
+ AtoB = self.opt.direction == "AtoB"
203
+ self.real_A = input["A" if AtoB else "B"].to(self.device)
204
+ self.real_B = input["B" if AtoB else "A"].to(self.device)
205
+ self.image_paths = input["A_paths" if AtoB else "B_paths"]
206
+
207
+ def forward(self):
208
+ """Run forward pass; called by both functions <optimize_parameters> and <test>."""
209
+ self.fake_B = self.netG_A(self.real_A) # G_A(A)
210
+ self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))
211
+ self.fake_A = self.netG_B(self.real_B) # G_B(B)
212
+ self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))
213
+
214
+ def backward_D_basic(self, netD, real, fake):
215
+ """Calculate GAN loss for the discriminator
216
+
217
+ Parameters:
218
+ netD (network) -- the discriminator D
219
+ real (tensor array) -- real images
220
+ fake (tensor array) -- images generated by a generator
221
+
222
+ Return the discriminator loss.
223
+ We also call loss_D.backward() to calculate the gradients.
224
+ """
225
+ # Real
226
+ pred_real = netD(real)
227
+ loss_D_real = self.criterionGAN(pred_real, True)
228
+ # Fake
229
+ pred_fake = netD(fake.detach())
230
+ loss_D_fake = self.criterionGAN(pred_fake, False)
231
+ # Combined loss and calculate gradients
232
+ loss_D = (loss_D_real + loss_D_fake) * 0.5
233
+ # if self.opt.amp:
234
+ # with amp.scale_loss(loss_D, self.optimizer_D) as scaled_loss:
235
+ # scaled_loss.backward()
236
+ # else:
237
+ loss_D.backward()
238
+ return loss_D
239
+
240
+ def backward_D_A(self):
241
+ """Calculate GAN loss for discriminator D_A"""
242
+ fake_B = self.fake_B_pool.query(self.fake_B)
243
+ self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
244
+
245
+ def backward_D_B(self):
246
+ """Calculate GAN loss for discriminator D_B"""
247
+ fake_A = self.fake_A_pool.query(self.fake_A)
248
+ self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
249
+
250
+ def backward_G(self):
251
+ """Calculate the loss for generators G_A and G_B"""
252
+ lambda_idt = self.opt.lambda_identity
253
+ lambda_A = self.opt.lambda_A
254
+ lambda_B = self.opt.lambda_B
255
+ # Identity loss
256
+ if lambda_idt > 0:
257
+ # G_A should be identity if real_B is fed: ||G_A(B) - B||
258
+ self.idt_A = self.netG_A(self.real_B)
259
+ self.loss_idt_A = (
260
+ self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
261
+ )
262
+ # G_B should be identity if real_A is fed: ||G_B(A) - A||
263
+ self.idt_B = self.netG_B(self.real_A)
264
+ self.loss_idt_B = (
265
+ self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
266
+ )
267
+ else:
268
+ self.loss_idt_A = 0
269
+ self.loss_idt_B = 0
270
+
271
+ # GAN loss D_A(G_A(A))
272
+ self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
273
+ # GAN loss D_B(G_B(B))
274
+ self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
275
+ # Forward cycle loss || G_B(G_A(A)) - A||
276
+ self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
277
+ # Backward cycle loss || G_A(G_B(B)) - B||
278
+ self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
279
+ # combined loss and calculate gradients
280
+ self.loss_G = (
281
+ self.loss_G_A
282
+ + self.loss_G_B
283
+ + self.loss_cycle_A
284
+ + self.loss_cycle_B
285
+ + self.loss_idt_A
286
+ + self.loss_idt_B
287
+ )
288
+ # if self.opt.amp:
289
+ # with amp.scale_loss(self.loss_G, self.optimizer_G) as scaled_loss:
290
+ # scaled_loss.backward()
291
+ # else:
292
+ self.loss_G.backward()
293
+
294
+ def data_dependent_initialize(self, *args, **kwargs):
295
+ return
296
+
297
+ def generate_visuals_for_evaluation(self, data, mode):
298
+ with torch.no_grad():
299
+ visuals = {}
300
+ AtoB = self.opt.direction == "AtoB"
301
+ G = self.netG_A
302
+ source = data["A" if AtoB else "B"].to(self.device)
303
+ if mode == "forward":
304
+ visuals["fake_B"] = G(source)
305
+ else:
306
+ raise ValueError("mode %s is not recognized" % mode)
307
+ return visuals
308
+
309
+ def optimize_parameters(self):
310
+ """Calculate losses, gradients, and update network weights; called in every training iteration"""
311
+ # forward
312
+ self.forward() # compute fake images and reconstruction images.
313
+ # G_A and G_B
314
+ self.set_requires_grad(
315
+ [self.netD_A, self.netD_B], False
316
+ ) # Ds require no gradients when optimizing Gs
317
+ self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
318
+ self.backward_G() # calculate gradients for G_A and G_B
319
+ self.optimizer_G.step() # update G_A and G_B's weights
320
+ # D_A and D_B
321
+ self.set_requires_grad([self.netD_A, self.netD_B], True)
322
+ self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
323
+ self.backward_D_A() # calculate gradients for D_A
324
+ self.backward_D_B() # calculate graidents for D_B
325
+ self.optimizer_D.step() # update D_A and D_B's weights
models/networks.py ADDED
@@ -0,0 +1,1403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from torch.nn import init
5
+ import functools
6
+ from torch.optim import lr_scheduler
7
+ import numpy as np
8
+ from .stylegan_networks import StyleGAN2Discriminator, StyleGAN2Generator, TileStyleGAN2Discriminator
9
+
10
+ ###############################################################################
11
+ # Helper Functions
12
+ ###############################################################################
13
+
14
+
15
+ def get_filter(filt_size=3):
16
+ if(filt_size == 1):
17
+ a = np.array([1., ])
18
+ elif(filt_size == 2):
19
+ a = np.array([1., 1.])
20
+ elif(filt_size == 3):
21
+ a = np.array([1., 2., 1.])
22
+ elif(filt_size == 4):
23
+ a = np.array([1., 3., 3., 1.])
24
+ elif(filt_size == 5):
25
+ a = np.array([1., 4., 6., 4., 1.])
26
+ elif(filt_size == 6):
27
+ a = np.array([1., 5., 10., 10., 5., 1.])
28
+ elif(filt_size == 7):
29
+ a = np.array([1., 6., 15., 20., 15., 6., 1.])
30
+
31
+ filt = torch.Tensor(a[:, None] * a[None, :])
32
+ filt = filt / torch.sum(filt)
33
+
34
+ return filt
35
+
36
+
37
+ class Downsample(nn.Module):
38
+ def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):
39
+ super(Downsample, self).__init__()
40
+ self.filt_size = filt_size
41
+ self.pad_off = pad_off
42
+ self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]
43
+ self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
44
+ self.stride = stride
45
+ self.off = int((self.stride - 1) / 2.)
46
+ self.channels = channels
47
+
48
+ filt = get_filter(filt_size=self.filt_size)
49
+ self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
50
+
51
+ self.pad = get_pad_layer(pad_type)(self.pad_sizes)
52
+
53
+ def forward(self, inp):
54
+ if(self.filt_size == 1):
55
+ if(self.pad_off == 0):
56
+ return inp[:, :, ::self.stride, ::self.stride]
57
+ else:
58
+ return self.pad(inp)[:, :, ::self.stride, ::self.stride]
59
+ else:
60
+ return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
61
+
62
+
63
+ class Upsample2(nn.Module):
64
+ def __init__(self, scale_factor, mode='nearest'):
65
+ super().__init__()
66
+ self.factor = scale_factor
67
+ self.mode = mode
68
+
69
+ def forward(self, x):
70
+ return torch.nn.functional.interpolate(x, scale_factor=self.factor, mode=self.mode)
71
+
72
+
73
+ class Upsample(nn.Module):
74
+ def __init__(self, channels, pad_type='repl', filt_size=4, stride=2):
75
+ super(Upsample, self).__init__()
76
+ self.filt_size = filt_size
77
+ self.filt_odd = np.mod(filt_size, 2) == 1
78
+ self.pad_size = int((filt_size - 1) / 2)
79
+ self.stride = stride
80
+ self.off = int((self.stride - 1) / 2.)
81
+ self.channels = channels
82
+
83
+ filt = get_filter(filt_size=self.filt_size) * (stride**2)
84
+ self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
85
+
86
+ self.pad = get_pad_layer(pad_type)([1, 1, 1, 1])
87
+
88
+ def forward(self, inp):
89
+ ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:]
90
+ if(self.filt_odd):
91
+ return ret_val
92
+ else:
93
+ return ret_val[:, :, :-1, :-1]
94
+
95
+
96
+ def get_pad_layer(pad_type):
97
+ if(pad_type in ['refl', 'reflect']):
98
+ PadLayer = nn.ReflectionPad2d
99
+ elif(pad_type in ['repl', 'replicate']):
100
+ PadLayer = nn.ReplicationPad2d
101
+ elif(pad_type == 'zero'):
102
+ PadLayer = nn.ZeroPad2d
103
+ else:
104
+ print('Pad type [%s] not recognized' % pad_type)
105
+ return PadLayer
106
+
107
+
108
+ class Identity(nn.Module):
109
+ def forward(self, x):
110
+ return x
111
+
112
+
113
+ def get_norm_layer(norm_type='instance'):
114
+ """Return a normalization layer
115
+
116
+ Parameters:
117
+ norm_type (str) -- the name of the normalization layer: batch | instance | none
118
+
119
+ For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
120
+ For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
121
+ """
122
+ if norm_type == 'batch':
123
+ norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
124
+ elif norm_type == 'instance':
125
+ norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
126
+ elif norm_type == 'none':
127
+ def norm_layer(x):
128
+ return Identity()
129
+ else:
130
+ raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
131
+ return norm_layer
132
+
133
+
134
+ def get_scheduler(optimizer, opt):
135
+ """Return a learning rate scheduler
136
+
137
+ Parameters:
138
+ optimizer -- the optimizer of the network
139
+ opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. 
140
+ opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
141
+
142
+ For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
143
+ and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
144
+ For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
145
+ See https://pytorch.org/docs/stable/optim.html for more details.
146
+ """
147
+ if opt.lr_policy == 'linear':
148
+ def lambda_rule(epoch):
149
+ lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
150
+ return lr_l
151
+ scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
152
+ elif opt.lr_policy == 'step':
153
+ scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
154
+ elif opt.lr_policy == 'plateau':
155
+ scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
156
+ elif opt.lr_policy == 'cosine':
157
+ scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
158
+ else:
159
+ return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
160
+ return scheduler
161
+
162
+
163
+ def init_weights(net, init_type='normal', init_gain=0.02, debug=False):
164
+ """Initialize network weights.
165
+
166
+ Parameters:
167
+ net (network) -- network to be initialized
168
+ init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
169
+ init_gain (float) -- scaling factor for normal, xavier and orthogonal.
170
+
171
+ We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
172
+ work better for some applications. Feel free to try yourself.
173
+ """
174
+ def init_func(m): # define the initialization function
175
+ classname = m.__class__.__name__
176
+ if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
177
+ if debug:
178
+ print(classname)
179
+ if init_type == 'normal':
180
+ init.normal_(m.weight.data, 0.0, init_gain)
181
+ elif init_type == 'xavier':
182
+ init.xavier_normal_(m.weight.data, gain=init_gain)
183
+ elif init_type == 'kaiming':
184
+ init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
185
+ elif init_type == 'orthogonal':
186
+ init.orthogonal_(m.weight.data, gain=init_gain)
187
+ else:
188
+ raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
189
+ if hasattr(m, 'bias') and m.bias is not None:
190
+ init.constant_(m.bias.data, 0.0)
191
+ elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
192
+ init.normal_(m.weight.data, 1.0, init_gain)
193
+ init.constant_(m.bias.data, 0.0)
194
+
195
+ net.apply(init_func) # apply the initialization function <init_func>
196
+
197
+
198
+ def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True):
199
+ """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
200
+ Parameters:
201
+ net (network) -- the network to be initialized
202
+ init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
203
+ gain (float) -- scaling factor for normal, xavier and orthogonal.
204
+ gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
205
+
206
+ Return an initialized network.
207
+ """
208
+ if len(gpu_ids) > 0:
209
+ assert(torch.cuda.is_available())
210
+ net.to(gpu_ids[0])
211
+ # if not amp:
212
+ # net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs for non-AMP training
213
+ if initialize_weights:
214
+ init_weights(net, init_type, init_gain=init_gain, debug=debug)
215
+ return net
216
+
217
+
218
+ def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal',
219
+ init_gain=0.02, no_antialias=False, no_antialias_up=False, gpu_ids=[], opt=None):
220
+ """Create a generator
221
+
222
+ Parameters:
223
+ input_nc (int) -- the number of channels in input images
224
+ output_nc (int) -- the number of channels in output images
225
+ ngf (int) -- the number of filters in the last conv layer
226
+ netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
227
+ norm (str) -- the name of normalization layers used in the network: batch | instance | none
228
+ use_dropout (bool) -- if use dropout layers.
229
+ init_type (str) -- the name of our initialization method.
230
+ init_gain (float) -- scaling factor for normal, xavier and orthogonal.
231
+ gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
232
+
233
+ Returns a generator
234
+
235
+ Our current implementation provides two types of generators:
236
+ U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
237
+ The original U-Net paper: https://arxiv.org/abs/1505.04597
238
+
239
+ Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
240
+ Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
241
+ We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
242
+
243
+
244
+ The generator has been initialized by <init_net>. It uses RELU for non-linearity.
245
+ """
246
+ net = None
247
+ norm_layer = get_norm_layer(norm_type=norm)
248
+
249
+ if netG == 'resnet_9blocks':
250
+ net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=9, opt=opt)
251
+ elif netG == 'resnet_6blocks':
252
+ net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=6, opt=opt)
253
+ elif netG == 'resnet_4blocks':
254
+ net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=4, opt=opt)
255
+ elif netG == 'unet_128':
256
+ net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
257
+ elif netG == 'unet_256':
258
+ net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
259
+ elif netG == 'stylegan2':
260
+ net = StyleGAN2Generator(input_nc, output_nc, ngf, use_dropout=use_dropout, opt=opt)
261
+ elif netG == 'smallstylegan2':
262
+ net = StyleGAN2Generator(input_nc, output_nc, ngf, use_dropout=use_dropout, n_blocks=2, opt=opt)
263
+ elif netG == 'resnet_cat':
264
+ n_blocks = 8
265
+ net = G_Resnet(input_nc, output_nc, opt.nz, num_downs=2, n_res=n_blocks - 4, ngf=ngf, norm='inst', nl_layer='relu')
266
+ else:
267
+ raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
268
+ return init_net(net, init_type, init_gain, gpu_ids, initialize_weights=('stylegan2' not in netG))
269
+
270
+
271
+ def define_F(input_nc, netF, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
272
+ if netF == 'global_pool':
273
+ net = PoolingF()
274
+ elif netF == 'reshape':
275
+ net = ReshapeF()
276
+ elif netF == 'sample':
277
+ net = PatchSampleF(use_mlp=False, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
278
+ elif netF == 'mlp_sample':
279
+ net = PatchSampleF(use_mlp=True, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
280
+ elif netF == 'strided_conv':
281
+ net = StridedConvF(init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids)
282
+ else:
283
+ raise NotImplementedError('projection model name [%s] is not recognized' % netF)
284
+ return init_net(net, init_type, init_gain, gpu_ids)
285
+
286
+
287
+ def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
288
+ """Create a discriminator
289
+
290
+ Parameters:
291
+ input_nc (int) -- the number of channels in input images
292
+ ndf (int) -- the number of filters in the first conv layer
293
+ netD (str) -- the architecture's name: basic | n_layers | pixel
294
+ n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
295
+ norm (str) -- the type of normalization layers used in the network.
296
+ init_type (str) -- the name of the initialization method.
297
+ init_gain (float) -- scaling factor for normal, xavier and orthogonal.
298
+ gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
299
+
300
+ Returns a discriminator
301
+
302
+ Our current implementation provides three types of discriminators:
303
+ [basic]: 'PatchGAN' classifier described in the original pix2pix paper.
304
+ It can classify whether 70×70 overlapping patches are real or fake.
305
+ Such a patch-level discriminator architecture has fewer parameters
306
+ than a full-image discriminator and can work on arbitrarily-sized images
307
+ in a fully convolutional fashion.
308
+
309
+ [n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
310
+ with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
311
+
312
+ [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
313
+ It encourages greater color diversity but has no effect on spatial statistics.
314
+
315
+ The discriminator has been initialized by <init_net>. It uses Leaky RELU for non-linearity.
316
+ """
317
+ net = None
318
+ norm_layer = get_norm_layer(norm_type=norm)
319
+
320
+ if netD == 'basic': # default PatchGAN classifier
321
+ net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, no_antialias=no_antialias,)
322
+ elif netD == 'n_layers': # more options
323
+ net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias,)
324
+ elif netD == 'pixel': # classify if each pixel is real or fake
325
+ net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
326
+ elif 'stylegan2' in netD:
327
+ net = StyleGAN2Discriminator(input_nc, ndf, n_layers_D, no_antialias=no_antialias, opt=opt)
328
+ else:
329
+ raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
330
+ return init_net(net, init_type, init_gain, gpu_ids,
331
+ initialize_weights=('stylegan2' not in netD))
332
+
333
+
334
+ ##############################################################################
335
+ # Classes
336
+ ##############################################################################
337
+ class GANLoss(nn.Module):
338
+ """Define different GAN objectives.
339
+
340
+ The GANLoss class abstracts away the need to create the target label tensor
341
+ that has the same size as the input.
342
+ """
343
+
344
+ def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
345
+ """ Initialize the GANLoss class.
346
+
347
+ Parameters:
348
+ gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
349
+ target_real_label (bool) - - label for a real image
350
+ target_fake_label (bool) - - label of a fake image
351
+
352
+ Note: Do not use sigmoid as the last layer of Discriminator.
353
+ LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
354
+ """
355
+ super(GANLoss, self).__init__()
356
+ self.register_buffer('real_label', torch.tensor(target_real_label))
357
+ self.register_buffer('fake_label', torch.tensor(target_fake_label))
358
+ self.gan_mode = gan_mode
359
+ if gan_mode == 'lsgan':
360
+ self.loss = nn.MSELoss()
361
+ elif gan_mode == 'vanilla':
362
+ self.loss = nn.BCEWithLogitsLoss()
363
+ elif gan_mode in ['wgangp', 'nonsaturating']:
364
+ self.loss = None
365
+ else:
366
+ raise NotImplementedError('gan mode %s not implemented' % gan_mode)
367
+
368
+ def get_target_tensor(self, prediction, target_is_real):
369
+ """Create label tensors with the same size as the input.
370
+
371
+ Parameters:
372
+ prediction (tensor) - - tpyically the prediction from a discriminator
373
+ target_is_real (bool) - - if the ground truth label is for real images or fake images
374
+
375
+ Returns:
376
+ A label tensor filled with ground truth label, and with the size of the input
377
+ """
378
+
379
+ if target_is_real:
380
+ target_tensor = self.real_label
381
+ else:
382
+ target_tensor = self.fake_label
383
+ return target_tensor.expand_as(prediction)
384
+
385
+ def __call__(self, prediction, target_is_real):
386
+ """Calculate loss given Discriminator's output and grount truth labels.
387
+
388
+ Parameters:
389
+ prediction (tensor) - - tpyically the prediction output from a discriminator
390
+ target_is_real (bool) - - if the ground truth label is for real images or fake images
391
+
392
+ Returns:
393
+ the calculated loss.
394
+ """
395
+ bs = prediction.size(0)
396
+ if self.gan_mode in ['lsgan', 'vanilla']:
397
+ target_tensor = self.get_target_tensor(prediction, target_is_real)
398
+ loss = self.loss(prediction, target_tensor)
399
+ elif self.gan_mode == 'wgangp':
400
+ if target_is_real:
401
+ loss = -prediction.mean()
402
+ else:
403
+ loss = prediction.mean()
404
+ elif self.gan_mode == 'nonsaturating':
405
+ if target_is_real:
406
+ loss = F.softplus(-prediction).view(bs, -1).mean(dim=1)
407
+ else:
408
+ loss = F.softplus(prediction).view(bs, -1).mean(dim=1)
409
+ return loss
410
+
411
+
412
+ def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
413
+ """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
414
+
415
+ Arguments:
416
+ netD (network) -- discriminator network
417
+ real_data (tensor array) -- real images
418
+ fake_data (tensor array) -- generated images from the generator
419
+ device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
420
+ type (str) -- if we mix real and fake data or not [real | fake | mixed].
421
+ constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
422
+ lambda_gp (float) -- weight for this loss
423
+
424
+ Returns the gradient penalty loss
425
+ """
426
+ if lambda_gp > 0.0:
427
+ if type == 'real': # either use real images, fake images, or a linear interpolation of two.
428
+ interpolatesv = real_data
429
+ elif type == 'fake':
430
+ interpolatesv = fake_data
431
+ elif type == 'mixed':
432
+ alpha = torch.rand(real_data.shape[0], 1, device=device)
433
+ alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
434
+ interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
435
+ else:
436
+ raise NotImplementedError('{} not implemented'.format(type))
437
+ interpolatesv.requires_grad_(True)
438
+ disc_interpolates = netD(interpolatesv)
439
+ gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
440
+ grad_outputs=torch.ones(disc_interpolates.size()).to(device),
441
+ create_graph=True, retain_graph=True, only_inputs=True)
442
+ gradients = gradients[0].view(real_data.size(0), -1) # flat the data
443
+ gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
444
+ return gradient_penalty, gradients
445
+ else:
446
+ return 0.0, None
447
+
448
+
449
+ class Normalize(nn.Module):
450
+
451
+ def __init__(self, power=2):
452
+ super(Normalize, self).__init__()
453
+ self.power = power
454
+
455
+ def forward(self, x):
456
+ norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
457
+ out = x.div(norm + 1e-7)
458
+ return out
459
+
460
+
461
+ class PoolingF(nn.Module):
462
+ def __init__(self):
463
+ super(PoolingF, self).__init__()
464
+ model = [nn.AdaptiveMaxPool2d(1)]
465
+ self.model = nn.Sequential(*model)
466
+ self.l2norm = Normalize(2)
467
+
468
+ def forward(self, x):
469
+ return self.l2norm(self.model(x))
470
+
471
+
472
+ class ReshapeF(nn.Module):
473
+ def __init__(self):
474
+ super(ReshapeF, self).__init__()
475
+ model = [nn.AdaptiveAvgPool2d(4)]
476
+ self.model = nn.Sequential(*model)
477
+ self.l2norm = Normalize(2)
478
+
479
+ def forward(self, x):
480
+ x = self.model(x)
481
+ x_reshape = x.permute(0, 2, 3, 1).flatten(0, 2)
482
+ return self.l2norm(x_reshape)
483
+
484
+
485
+ class StridedConvF(nn.Module):
486
+ def __init__(self, init_type='normal', init_gain=0.02, gpu_ids=[]):
487
+ super().__init__()
488
+ # self.conv1 = nn.Conv2d(256, 128, 3, stride=2)
489
+ # self.conv2 = nn.Conv2d(128, 64, 3, stride=1)
490
+ self.l2_norm = Normalize(2)
491
+ self.mlps = {}
492
+ self.moving_averages = {}
493
+ self.init_type = init_type
494
+ self.init_gain = init_gain
495
+ self.gpu_ids = gpu_ids
496
+
497
+ def create_mlp(self, x):
498
+ C, H = x.shape[1], x.shape[2]
499
+ n_down = int(np.rint(np.log2(H / 32)))
500
+ mlp = []
501
+ for i in range(n_down):
502
+ mlp.append(nn.Conv2d(C, max(C // 2, 64), 3, stride=2))
503
+ mlp.append(nn.ReLU())
504
+ C = max(C // 2, 64)
505
+ mlp.append(nn.Conv2d(C, 64, 3))
506
+ mlp = nn.Sequential(*mlp)
507
+ init_net(mlp, self.init_type, self.init_gain, self.gpu_ids)
508
+ return mlp
509
+
510
+ def update_moving_average(self, key, x):
511
+ if key not in self.moving_averages:
512
+ self.moving_averages[key] = x.detach()
513
+
514
+ self.moving_averages[key] = self.moving_averages[key] * 0.999 + x.detach() * 0.001
515
+
516
+ def forward(self, x, use_instance_norm=False):
517
+ C, H = x.shape[1], x.shape[2]
518
+ key = '%d_%d' % (C, H)
519
+ if key not in self.mlps:
520
+ self.mlps[key] = self.create_mlp(x)
521
+ self.add_module("child_%s" % key, self.mlps[key])
522
+ mlp = self.mlps[key]
523
+ x = mlp(x)
524
+ self.update_moving_average(key, x)
525
+ x = x - self.moving_averages[key]
526
+ if use_instance_norm:
527
+ x = F.instance_norm(x)
528
+ return self.l2_norm(x)
529
+
530
+
531
+ class PatchSampleF(nn.Module):
532
+ def __init__(self, use_mlp=False, init_type='normal', init_gain=0.02, nc=256, gpu_ids=[]):
533
+ # potential issues: currently, we use the same patch_ids for multiple images in the batch
534
+ super(PatchSampleF, self).__init__()
535
+ self.l2norm = Normalize(2)
536
+ self.use_mlp = use_mlp
537
+ self.nc = nc # hard-coded
538
+ self.mlp_init = False
539
+ self.init_type = init_type
540
+ self.init_gain = init_gain
541
+ self.gpu_ids = gpu_ids
542
+
543
+ def create_mlp(self, feats):
544
+ for mlp_id, feat in enumerate(feats):
545
+ input_nc = feat.shape[1]
546
+ mlp = nn.Sequential(*[nn.Linear(input_nc, self.nc), nn.ReLU(), nn.Linear(self.nc, self.nc)])
547
+ if len(self.gpu_ids) > 0:
548
+ mlp.cuda()
549
+ setattr(self, 'mlp_%d' % mlp_id, mlp)
550
+ init_net(self, self.init_type, self.init_gain, self.gpu_ids)
551
+ self.mlp_init = True
552
+
553
+ def forward(self, feats, num_patches=64, patch_ids=None):
554
+ return_ids = []
555
+ return_feats = []
556
+ if self.use_mlp and not self.mlp_init:
557
+ self.create_mlp(feats)
558
+ for feat_id, feat in enumerate(feats):
559
+ B, H, W = feat.shape[0], feat.shape[2], feat.shape[3]
560
+ feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2)
561
+ if num_patches > 0:
562
+ if patch_ids is not None:
563
+ patch_id = patch_ids[feat_id]
564
+ else:
565
+ # torch.randperm produces cudaErrorIllegalAddress for newer versions of PyTorch. https://github.com/taesungp/contrastive-unpaired-translation/issues/83
566
+ #patch_id = torch.randperm(feat_reshape.shape[1], device=feats[0].device)
567
+ patch_id = np.random.permutation(feat_reshape.shape[1])
568
+ patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device)
569
+ patch_id = torch.tensor(patch_id, dtype=torch.long, device=feat.device)
570
+ x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1])
571
+ else:
572
+ x_sample = feat_reshape
573
+ patch_id = []
574
+ if self.use_mlp:
575
+ mlp = getattr(self, 'mlp_%d' % feat_id)
576
+ x_sample = mlp(x_sample)
577
+ return_ids.append(patch_id)
578
+ x_sample = self.l2norm(x_sample)
579
+
580
+ if num_patches == 0:
581
+ x_sample = x_sample.permute(0, 2, 1).reshape([B, x_sample.shape[-1], H, W])
582
+ return_feats.append(x_sample)
583
+ return return_feats, return_ids
584
+
585
+
586
+ class G_Resnet(nn.Module):
587
+ def __init__(self, input_nc, output_nc, nz, num_downs, n_res, ngf=64,
588
+ norm=None, nl_layer=None):
589
+ super(G_Resnet, self).__init__()
590
+ n_downsample = num_downs
591
+ pad_type = 'reflect'
592
+ self.enc_content = ContentEncoder(n_downsample, n_res, input_nc, ngf, norm, nl_layer, pad_type=pad_type)
593
+ if nz == 0:
594
+ self.dec = Decoder(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
595
+ else:
596
+ self.dec = Decoder_all(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
597
+
598
+ def decode(self, content, style=None):
599
+ return self.dec(content, style)
600
+
601
+ def forward(self, image, style=None, nce_layers=[], encode_only=False):
602
+ content, feats = self.enc_content(image, nce_layers=nce_layers, encode_only=encode_only)
603
+ if encode_only:
604
+ return feats
605
+ else:
606
+ images_recon = self.decode(content, style)
607
+ if len(nce_layers) > 0:
608
+ return images_recon, feats
609
+ else:
610
+ return images_recon
611
+
612
+ ##################################################################################
613
+ # Encoder and Decoders
614
+ ##################################################################################
615
+
616
+
617
+ class E_adaIN(nn.Module):
618
+ def __init__(self, input_nc, output_nc=1, nef=64, n_layers=4,
619
+ norm=None, nl_layer=None, vae=False):
620
+ # style encoder
621
+ super(E_adaIN, self).__init__()
622
+ self.enc_style = StyleEncoder(n_layers, input_nc, nef, output_nc, norm='none', activ='relu', vae=vae)
623
+
624
+ def forward(self, image):
625
+ style = self.enc_style(image)
626
+ return style
627
+
628
+
629
+ class StyleEncoder(nn.Module):
630
+ def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, vae=False):
631
+ super(StyleEncoder, self).__init__()
632
+ self.vae = vae
633
+ self.model = []
634
+ self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
635
+ for i in range(2):
636
+ self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
637
+ dim *= 2
638
+ for i in range(n_downsample - 2):
639
+ self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
640
+ self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling
641
+ if self.vae:
642
+ self.fc_mean = nn.Linear(dim, style_dim) # , 1, 1, 0)
643
+ self.fc_var = nn.Linear(dim, style_dim) # , 1, 1, 0)
644
+ else:
645
+ self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)]
646
+
647
+ self.model = nn.Sequential(*self.model)
648
+ self.output_dim = dim
649
+
650
+ def forward(self, x):
651
+ if self.vae:
652
+ output = self.model(x)
653
+ output = output.view(x.size(0), -1)
654
+ output_mean = self.fc_mean(output)
655
+ output_var = self.fc_var(output)
656
+ return output_mean, output_var
657
+ else:
658
+ return self.model(x).view(x.size(0), -1)
659
+
660
+
661
+ class ContentEncoder(nn.Module):
662
+ def __init__(self, n_downsample, n_res, input_dim, dim, norm, activ, pad_type='zero'):
663
+ super(ContentEncoder, self).__init__()
664
+ self.model = []
665
+ self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
666
+ # downsampling blocks
667
+ for i in range(n_downsample):
668
+ self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
669
+ dim *= 2
670
+ # residual blocks
671
+ self.model += [ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type)]
672
+ self.model = nn.Sequential(*self.model)
673
+ self.output_dim = dim
674
+
675
+ def forward(self, x, nce_layers=[], encode_only=False):
676
+ if len(nce_layers) > 0:
677
+ feat = x
678
+ feats = []
679
+ for layer_id, layer in enumerate(self.model):
680
+ feat = layer(feat)
681
+ if layer_id in nce_layers:
682
+ feats.append(feat)
683
+ if layer_id == nce_layers[-1] and encode_only:
684
+ return None, feats
685
+ return feat, feats
686
+ else:
687
+ return self.model(x), None
688
+
689
+ for layer_id, layer in enumerate(self.model):
690
+ print(layer_id, layer)
691
+
692
+
693
+ class Decoder_all(nn.Module):
694
+ def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
695
+ super(Decoder_all, self).__init__()
696
+ # AdaIN residual blocks
697
+ self.resnet_block = ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)
698
+ self.n_blocks = 0
699
+ # upsampling blocks
700
+ for i in range(n_upsample):
701
+ block = [Upsample2(scale_factor=2), Conv2dBlock(dim + nz, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
702
+ setattr(self, 'block_{:d}'.format(self.n_blocks), nn.Sequential(*block))
703
+ self.n_blocks += 1
704
+ dim //= 2
705
+ # use reflection padding in the last conv layer
706
+ setattr(self, 'block_{:d}'.format(self.n_blocks), Conv2dBlock(dim + nz, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect'))
707
+ self.n_blocks += 1
708
+
709
+ def forward(self, x, y=None):
710
+ if y is not None:
711
+ output = self.resnet_block(cat_feature(x, y))
712
+ for n in range(self.n_blocks):
713
+ block = getattr(self, 'block_{:d}'.format(n))
714
+ if n > 0:
715
+ output = block(cat_feature(output, y))
716
+ else:
717
+ output = block(output)
718
+ return output
719
+
720
+
721
+ class Decoder(nn.Module):
722
+ def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
723
+ super(Decoder, self).__init__()
724
+
725
+ self.model = []
726
+ # AdaIN residual blocks
727
+ self.model += [ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)]
728
+ # upsampling blocks
729
+ for i in range(n_upsample):
730
+ if i == 0:
731
+ input_dim = dim + nz
732
+ else:
733
+ input_dim = dim
734
+ self.model += [Upsample2(scale_factor=2), Conv2dBlock(input_dim, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
735
+ dim //= 2
736
+ # use reflection padding in the last conv layer
737
+ self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect')]
738
+ self.model = nn.Sequential(*self.model)
739
+
740
+ def forward(self, x, y=None):
741
+ if y is not None:
742
+ return self.model(cat_feature(x, y))
743
+ else:
744
+ return self.model(x)
745
+
746
+ ##################################################################################
747
+ # Sequential Models
748
+ ##################################################################################
749
+
750
+
751
+ class ResBlocks(nn.Module):
752
+ def __init__(self, num_blocks, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
753
+ super(ResBlocks, self).__init__()
754
+ self.model = []
755
+ for i in range(num_blocks):
756
+ self.model += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type, nz=nz)]
757
+ self.model = nn.Sequential(*self.model)
758
+
759
+ def forward(self, x):
760
+ return self.model(x)
761
+
762
+
763
+ ##################################################################################
764
+ # Basic Blocks
765
+ ##################################################################################
766
+ def cat_feature(x, y):
767
+ y_expand = y.view(y.size(0), y.size(1), 1, 1).expand(
768
+ y.size(0), y.size(1), x.size(2), x.size(3))
769
+ x_cat = torch.cat([x, y_expand], 1)
770
+ return x_cat
771
+
772
+
773
+ class ResBlock(nn.Module):
774
+ def __init__(self, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
775
+ super(ResBlock, self).__init__()
776
+
777
+ model = []
778
+ model += [Conv2dBlock(dim + nz, dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type)]
779
+ model += [Conv2dBlock(dim, dim + nz, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type)]
780
+ self.model = nn.Sequential(*model)
781
+
782
+ def forward(self, x):
783
+ residual = x
784
+ out = self.model(x)
785
+ out += residual
786
+ return out
787
+
788
+
789
+ class Conv2dBlock(nn.Module):
790
+ def __init__(self, input_dim, output_dim, kernel_size, stride,
791
+ padding=0, norm='none', activation='relu', pad_type='zero'):
792
+ super(Conv2dBlock, self).__init__()
793
+ self.use_bias = True
794
+ # initialize padding
795
+ if pad_type == 'reflect':
796
+ self.pad = nn.ReflectionPad2d(padding)
797
+ elif pad_type == 'zero':
798
+ self.pad = nn.ZeroPad2d(padding)
799
+ else:
800
+ assert 0, "Unsupported padding type: {}".format(pad_type)
801
+
802
+ # initialize normalization
803
+ norm_dim = output_dim
804
+ if norm == 'batch':
805
+ self.norm = nn.BatchNorm2d(norm_dim)
806
+ elif norm == 'inst':
807
+ self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=False)
808
+ elif norm == 'ln':
809
+ self.norm = LayerNorm(norm_dim)
810
+ elif norm == 'none':
811
+ self.norm = None
812
+ else:
813
+ assert 0, "Unsupported normalization: {}".format(norm)
814
+
815
+ # initialize activation
816
+ if activation == 'relu':
817
+ self.activation = nn.ReLU(inplace=True)
818
+ elif activation == 'lrelu':
819
+ self.activation = nn.LeakyReLU(0.2, inplace=True)
820
+ elif activation == 'prelu':
821
+ self.activation = nn.PReLU()
822
+ elif activation == 'selu':
823
+ self.activation = nn.SELU(inplace=True)
824
+ elif activation == 'tanh':
825
+ self.activation = nn.Tanh()
826
+ elif activation == 'none':
827
+ self.activation = None
828
+ else:
829
+ assert 0, "Unsupported activation: {}".format(activation)
830
+
831
+ # initialize convolution
832
+ self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)
833
+
834
+ def forward(self, x):
835
+ x = self.conv(self.pad(x))
836
+ if self.norm:
837
+ x = self.norm(x)
838
+ if self.activation:
839
+ x = self.activation(x)
840
+ return x
841
+
842
+
843
+ class LinearBlock(nn.Module):
844
+ def __init__(self, input_dim, output_dim, norm='none', activation='relu'):
845
+ super(LinearBlock, self).__init__()
846
+ use_bias = True
847
+ # initialize fully connected layer
848
+ self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)
849
+
850
+ # initialize normalization
851
+ norm_dim = output_dim
852
+ if norm == 'batch':
853
+ self.norm = nn.BatchNorm1d(norm_dim)
854
+ elif norm == 'inst':
855
+ self.norm = nn.InstanceNorm1d(norm_dim)
856
+ elif norm == 'ln':
857
+ self.norm = LayerNorm(norm_dim)
858
+ elif norm == 'none':
859
+ self.norm = None
860
+ else:
861
+ assert 0, "Unsupported normalization: {}".format(norm)
862
+
863
+ # initialize activation
864
+ if activation == 'relu':
865
+ self.activation = nn.ReLU(inplace=True)
866
+ elif activation == 'lrelu':
867
+ self.activation = nn.LeakyReLU(0.2, inplace=True)
868
+ elif activation == 'prelu':
869
+ self.activation = nn.PReLU()
870
+ elif activation == 'selu':
871
+ self.activation = nn.SELU(inplace=True)
872
+ elif activation == 'tanh':
873
+ self.activation = nn.Tanh()
874
+ elif activation == 'none':
875
+ self.activation = None
876
+ else:
877
+ assert 0, "Unsupported activation: {}".format(activation)
878
+
879
+ def forward(self, x):
880
+ out = self.fc(x)
881
+ if self.norm:
882
+ out = self.norm(out)
883
+ if self.activation:
884
+ out = self.activation(out)
885
+ return out
886
+
887
+ ##################################################################################
888
+ # Normalization layers
889
+ ##################################################################################
890
+
891
+
892
+ class LayerNorm(nn.Module):
893
+ def __init__(self, num_features, eps=1e-5, affine=True):
894
+ super(LayerNorm, self).__init__()
895
+ self.num_features = num_features
896
+ self.affine = affine
897
+ self.eps = eps
898
+
899
+ if self.affine:
900
+ self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
901
+ self.beta = nn.Parameter(torch.zeros(num_features))
902
+
903
+ def forward(self, x):
904
+ shape = [-1] + [1] * (x.dim() - 1)
905
+ mean = x.view(x.size(0), -1).mean(1).view(*shape)
906
+ std = x.view(x.size(0), -1).std(1).view(*shape)
907
+ x = (x - mean) / (std + self.eps)
908
+
909
+ if self.affine:
910
+ shape = [1, -1] + [1] * (x.dim() - 2)
911
+ x = x * self.gamma.view(*shape) + self.beta.view(*shape)
912
+ return x
913
+
914
+
915
+ class ResnetGenerator(nn.Module):
916
+ """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
917
+
918
+ We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
919
+ """
920
+
921
+ def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, no_antialias_up=False, opt=None):
922
+ """Construct a Resnet-based generator
923
+
924
+ Parameters:
925
+ input_nc (int) -- the number of channels in input images
926
+ output_nc (int) -- the number of channels in output images
927
+ ngf (int) -- the number of filters in the last conv layer
928
+ norm_layer -- normalization layer
929
+ use_dropout (bool) -- if use dropout layers
930
+ n_blocks (int) -- the number of ResNet blocks
931
+ padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
932
+ """
933
+ assert(n_blocks >= 0)
934
+ super(ResnetGenerator, self).__init__()
935
+ self.opt = opt
936
+ if type(norm_layer) == functools.partial:
937
+ use_bias = norm_layer.func == nn.InstanceNorm2d
938
+ else:
939
+ use_bias = norm_layer == nn.InstanceNorm2d
940
+
941
+ model = [nn.ReflectionPad2d(3),
942
+ nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
943
+ norm_layer(ngf),
944
+ nn.ReLU(True)]
945
+
946
+ n_downsampling = 2
947
+ for i in range(n_downsampling): # add downsampling layers
948
+ mult = 2 ** i
949
+ if(no_antialias):
950
+ model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
951
+ norm_layer(ngf * mult * 2),
952
+ nn.ReLU(True)]
953
+ else:
954
+ model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
955
+ norm_layer(ngf * mult * 2),
956
+ nn.ReLU(True),
957
+ Downsample(ngf * mult * 2)]
958
+
959
+ mult = 2 ** n_downsampling
960
+ for i in range(n_blocks): # add ResNet blocks
961
+
962
+ model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
963
+
964
+ for i in range(n_downsampling): # add upsampling layers
965
+ mult = 2 ** (n_downsampling - i)
966
+ if no_antialias_up:
967
+ model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
968
+ kernel_size=3, stride=2,
969
+ padding=1, output_padding=1,
970
+ bias=use_bias),
971
+ norm_layer(int(ngf * mult / 2)),
972
+ nn.ReLU(True)]
973
+ else:
974
+ model += [Upsample(ngf * mult),
975
+ nn.Conv2d(ngf * mult, int(ngf * mult / 2),
976
+ kernel_size=3, stride=1,
977
+ padding=1, # output_padding=1,
978
+ bias=use_bias),
979
+ norm_layer(int(ngf * mult / 2)),
980
+ nn.ReLU(True)]
981
+ model += [nn.ReflectionPad2d(3)]
982
+ model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
983
+ model += [nn.Tanh()]
984
+
985
+ self.model = nn.Sequential(*model)
986
+
987
+ def forward(self, input, layers=[], encode_only=False):
988
+ if -1 in layers:
989
+ layers.append(len(self.model))
990
+ if len(layers) > 0:
991
+ feat = input
992
+ feats = []
993
+ for layer_id, layer in enumerate(self.model):
994
+ # print(layer_id, layer)
995
+ feat = layer(feat)
996
+ if layer_id in layers:
997
+ # print("%d: adding the output of %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
998
+ feats.append(feat)
999
+ else:
1000
+ # print("%d: skipping %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
1001
+ pass
1002
+ if layer_id == layers[-1] and encode_only:
1003
+ # print('encoder only return features')
1004
+ return feats # return intermediate features alone; stop in the last layers
1005
+
1006
+ return feat, feats # return both output and intermediate features
1007
+ else:
1008
+ """Standard forward"""
1009
+ fake = self.model(input)
1010
+ return fake
1011
+
1012
+
1013
+ class ResnetDecoder(nn.Module):
1014
+ """Resnet-based decoder that consists of a few Resnet blocks + a few upsampling operations.
1015
+ """
1016
+
1017
+ def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
1018
+ """Construct a Resnet-based decoder
1019
+
1020
+ Parameters:
1021
+ input_nc (int) -- the number of channels in input images
1022
+ output_nc (int) -- the number of channels in output images
1023
+ ngf (int) -- the number of filters in the last conv layer
1024
+ norm_layer -- normalization layer
1025
+ use_dropout (bool) -- if use dropout layers
1026
+ n_blocks (int) -- the number of ResNet blocks
1027
+ padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
1028
+ """
1029
+ assert(n_blocks >= 0)
1030
+ super(ResnetDecoder, self).__init__()
1031
+ if type(norm_layer) == functools.partial:
1032
+ use_bias = norm_layer.func == nn.InstanceNorm2d
1033
+ else:
1034
+ use_bias = norm_layer == nn.InstanceNorm2d
1035
+ model = []
1036
+ n_downsampling = 2
1037
+ mult = 2 ** n_downsampling
1038
+ for i in range(n_blocks): # add ResNet blocks
1039
+
1040
+ model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
1041
+
1042
+ for i in range(n_downsampling): # add upsampling layers
1043
+ mult = 2 ** (n_downsampling - i)
1044
+ if(no_antialias):
1045
+ model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
1046
+ kernel_size=3, stride=2,
1047
+ padding=1, output_padding=1,
1048
+ bias=use_bias),
1049
+ norm_layer(int(ngf * mult / 2)),
1050
+ nn.ReLU(True)]
1051
+ else:
1052
+ model += [Upsample(ngf * mult),
1053
+ nn.Conv2d(ngf * mult, int(ngf * mult / 2),
1054
+ kernel_size=3, stride=1,
1055
+ padding=1,
1056
+ bias=use_bias),
1057
+ norm_layer(int(ngf * mult / 2)),
1058
+ nn.ReLU(True)]
1059
+ model += [nn.ReflectionPad2d(3)]
1060
+ model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
1061
+ model += [nn.Tanh()]
1062
+
1063
+ self.model = nn.Sequential(*model)
1064
+
1065
+ def forward(self, input):
1066
+ """Standard forward"""
1067
+ return self.model(input)
1068
+
1069
+
1070
+ class ResnetEncoder(nn.Module):
1071
+ """Resnet-based encoder that consists of a few downsampling + several Resnet blocks
1072
+ """
1073
+
1074
+ def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
1075
+ """Construct a Resnet-based encoder
1076
+
1077
+ Parameters:
1078
+ input_nc (int) -- the number of channels in input images
1079
+ output_nc (int) -- the number of channels in output images
1080
+ ngf (int) -- the number of filters in the last conv layer
1081
+ norm_layer -- normalization layer
1082
+ use_dropout (bool) -- if use dropout layers
1083
+ n_blocks (int) -- the number of ResNet blocks
1084
+ padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
1085
+ """
1086
+ assert(n_blocks >= 0)
1087
+ super(ResnetEncoder, self).__init__()
1088
+ if type(norm_layer) == functools.partial:
1089
+ use_bias = norm_layer.func == nn.InstanceNorm2d
1090
+ else:
1091
+ use_bias = norm_layer == nn.InstanceNorm2d
1092
+
1093
+ model = [nn.ReflectionPad2d(3),
1094
+ nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
1095
+ norm_layer(ngf),
1096
+ nn.ReLU(True)]
1097
+
1098
+ n_downsampling = 2
1099
+ for i in range(n_downsampling): # add downsampling layers
1100
+ mult = 2 ** i
1101
+ if(no_antialias):
1102
+ model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
1103
+ norm_layer(ngf * mult * 2),
1104
+ nn.ReLU(True)]
1105
+ else:
1106
+ model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
1107
+ norm_layer(ngf * mult * 2),
1108
+ nn.ReLU(True),
1109
+ Downsample(ngf * mult * 2)]
1110
+
1111
+ mult = 2 ** n_downsampling
1112
+ for i in range(n_blocks): # add ResNet blocks
1113
+
1114
+ model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
1115
+
1116
+ self.model = nn.Sequential(*model)
1117
+
1118
+ def forward(self, input):
1119
+ """Standard forward"""
1120
+ return self.model(input)
1121
+
1122
+
1123
+ class ResnetBlock(nn.Module):
1124
+ """Define a Resnet block"""
1125
+
1126
+ def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
1127
+ """Initialize the Resnet block
1128
+
1129
+ A resnet block is a conv block with skip connections
1130
+ We construct a conv block with build_conv_block function,
1131
+ and implement skip connections in <forward> function.
1132
+ Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
1133
+ """
1134
+ super(ResnetBlock, self).__init__()
1135
+ self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
1136
+
1137
+ def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
1138
+ """Construct a convolutional block.
1139
+
1140
+ Parameters:
1141
+ dim (int) -- the number of channels in the conv layer.
1142
+ padding_type (str) -- the name of padding layer: reflect | replicate | zero
1143
+ norm_layer -- normalization layer
1144
+ use_dropout (bool) -- if use dropout layers.
1145
+ use_bias (bool) -- if the conv layer uses bias or not
1146
+
1147
+ Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
1148
+ """
1149
+ conv_block = []
1150
+ p = 0
1151
+ if padding_type == 'reflect':
1152
+ conv_block += [nn.ReflectionPad2d(1)]
1153
+ elif padding_type == 'replicate':
1154
+ conv_block += [nn.ReplicationPad2d(1)]
1155
+ elif padding_type == 'zero':
1156
+ p = 1
1157
+ else:
1158
+ raise NotImplementedError('padding [%s] is not implemented' % padding_type)
1159
+
1160
+ conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
1161
+ if use_dropout:
1162
+ conv_block += [nn.Dropout(0.5)]
1163
+
1164
+ p = 0
1165
+ if padding_type == 'reflect':
1166
+ conv_block += [nn.ReflectionPad2d(1)]
1167
+ elif padding_type == 'replicate':
1168
+ conv_block += [nn.ReplicationPad2d(1)]
1169
+ elif padding_type == 'zero':
1170
+ p = 1
1171
+ else:
1172
+ raise NotImplementedError('padding [%s] is not implemented' % padding_type)
1173
+ conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
1174
+
1175
+ return nn.Sequential(*conv_block)
1176
+
1177
+ def forward(self, x):
1178
+ """Forward function (with skip connections)"""
1179
+ out = x + self.conv_block(x) # add skip connections
1180
+ return out
1181
+
1182
+
1183
+ class UnetGenerator(nn.Module):
1184
+ """Create a Unet-based generator"""
1185
+
1186
+ def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
1187
+ """Construct a Unet generator
1188
+ Parameters:
1189
+ input_nc (int) -- the number of channels in input images
1190
+ output_nc (int) -- the number of channels in output images
1191
+ num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
1192
+ image of size 128x128 will become of size 1x1 # at the bottleneck
1193
+ ngf (int) -- the number of filters in the last conv layer
1194
+ norm_layer -- normalization layer
1195
+
1196
+ We construct the U-Net from the innermost layer to the outermost layer.
1197
+ It is a recursive process.
1198
+ """
1199
+ super(UnetGenerator, self).__init__()
1200
+ # construct unet structure
1201
+ unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
1202
+ for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
1203
+ unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
1204
+ # gradually reduce the number of filters from ngf * 8 to ngf
1205
+ unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
1206
+ unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
1207
+ unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
1208
+ self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
1209
+
1210
+ def forward(self, input):
1211
+ """Standard forward"""
1212
+ return self.model(input)
1213
+
1214
+
1215
+ class UnetSkipConnectionBlock(nn.Module):
1216
+ """Defines the Unet submodule with skip connection.
1217
+ X -------------------identity----------------------
1218
+ |-- downsampling -- |submodule| -- upsampling --|
1219
+ """
1220
+
1221
+ def __init__(self, outer_nc, inner_nc, input_nc=None,
1222
+ submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
1223
+ """Construct a Unet submodule with skip connections.
1224
+
1225
+ Parameters:
1226
+ outer_nc (int) -- the number of filters in the outer conv layer
1227
+ inner_nc (int) -- the number of filters in the inner conv layer
1228
+ input_nc (int) -- the number of channels in input images/features
1229
+ submodule (UnetSkipConnectionBlock) -- previously defined submodules
1230
+ outermost (bool) -- if this module is the outermost module
1231
+ innermost (bool) -- if this module is the innermost module
1232
+ norm_layer -- normalization layer
1233
+ use_dropout (bool) -- if use dropout layers.
1234
+ """
1235
+ super(UnetSkipConnectionBlock, self).__init__()
1236
+ self.outermost = outermost
1237
+ if type(norm_layer) == functools.partial:
1238
+ use_bias = norm_layer.func == nn.InstanceNorm2d
1239
+ else:
1240
+ use_bias = norm_layer == nn.InstanceNorm2d
1241
+ if input_nc is None:
1242
+ input_nc = outer_nc
1243
+ downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
1244
+ stride=2, padding=1, bias=use_bias)
1245
+ downrelu = nn.LeakyReLU(0.2, True)
1246
+ downnorm = norm_layer(inner_nc)
1247
+ uprelu = nn.ReLU(True)
1248
+ upnorm = norm_layer(outer_nc)
1249
+
1250
+ if outermost:
1251
+ upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
1252
+ kernel_size=4, stride=2,
1253
+ padding=1)
1254
+ down = [downconv]
1255
+ up = [uprelu, upconv, nn.Tanh()]
1256
+ model = down + [submodule] + up
1257
+ elif innermost:
1258
+ upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
1259
+ kernel_size=4, stride=2,
1260
+ padding=1, bias=use_bias)
1261
+ down = [downrelu, downconv]
1262
+ up = [uprelu, upconv, upnorm]
1263
+ model = down + up
1264
+ else:
1265
+ upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
1266
+ kernel_size=4, stride=2,
1267
+ padding=1, bias=use_bias)
1268
+ down = [downrelu, downconv, downnorm]
1269
+ up = [uprelu, upconv, upnorm]
1270
+
1271
+ if use_dropout:
1272
+ model = down + [submodule] + up + [nn.Dropout(0.5)]
1273
+ else:
1274
+ model = down + [submodule] + up
1275
+
1276
+ self.model = nn.Sequential(*model)
1277
+
1278
+ def forward(self, x):
1279
+ if self.outermost:
1280
+ return self.model(x)
1281
+ else: # add skip connections
1282
+ return torch.cat([x, self.model(x)], 1)
1283
+
1284
+
1285
+ class NLayerDiscriminator(nn.Module):
1286
+ """Defines a PatchGAN discriminator"""
1287
+
1288
+ def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
1289
+ """Construct a PatchGAN discriminator
1290
+
1291
+ Parameters:
1292
+ input_nc (int) -- the number of channels in input images
1293
+ ndf (int) -- the number of filters in the last conv layer
1294
+ n_layers (int) -- the number of conv layers in the discriminator
1295
+ norm_layer -- normalization layer
1296
+ """
1297
+ super(NLayerDiscriminator, self).__init__()
1298
+ if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
1299
+ use_bias = norm_layer.func == nn.InstanceNorm2d
1300
+ else:
1301
+ use_bias = norm_layer == nn.InstanceNorm2d
1302
+
1303
+ kw = 4
1304
+ padw = 1
1305
+ if(no_antialias):
1306
+ sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
1307
+ else:
1308
+ sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]
1309
+ nf_mult = 1
1310
+ nf_mult_prev = 1
1311
+ for n in range(1, n_layers): # gradually increase the number of filters
1312
+ nf_mult_prev = nf_mult
1313
+ nf_mult = min(2 ** n, 8)
1314
+ if(no_antialias):
1315
+ sequence += [
1316
+ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
1317
+ norm_layer(ndf * nf_mult),
1318
+ nn.LeakyReLU(0.2, True)
1319
+ ]
1320
+ else:
1321
+ sequence += [
1322
+ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
1323
+ norm_layer(ndf * nf_mult),
1324
+ nn.LeakyReLU(0.2, True),
1325
+ Downsample(ndf * nf_mult)]
1326
+
1327
+ nf_mult_prev = nf_mult
1328
+ nf_mult = min(2 ** n_layers, 8)
1329
+ sequence += [
1330
+ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
1331
+ norm_layer(ndf * nf_mult),
1332
+ nn.LeakyReLU(0.2, True)
1333
+ ]
1334
+
1335
+ sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
1336
+ self.model = nn.Sequential(*sequence)
1337
+
1338
+ def forward(self, input):
1339
+ """Standard forward."""
1340
+ return self.model(input)
1341
+
1342
+
1343
+ class PixelDiscriminator(nn.Module):
1344
+ """Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
1345
+
1346
+ def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
1347
+ """Construct a 1x1 PatchGAN discriminator
1348
+
1349
+ Parameters:
1350
+ input_nc (int) -- the number of channels in input images
1351
+ ndf (int) -- the number of filters in the last conv layer
1352
+ norm_layer -- normalization layer
1353
+ """
1354
+ super(PixelDiscriminator, self).__init__()
1355
+ if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
1356
+ use_bias = norm_layer.func == nn.InstanceNorm2d
1357
+ else:
1358
+ use_bias = norm_layer == nn.InstanceNorm2d
1359
+
1360
+ self.net = [
1361
+ nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
1362
+ nn.LeakyReLU(0.2, True),
1363
+ nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
1364
+ norm_layer(ndf * 2),
1365
+ nn.LeakyReLU(0.2, True),
1366
+ nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
1367
+
1368
+ self.net = nn.Sequential(*self.net)
1369
+
1370
+ def forward(self, input):
1371
+ """Standard forward."""
1372
+ return self.net(input)
1373
+
1374
+
1375
+ class PatchDiscriminator(NLayerDiscriminator):
1376
+ """Defines a PatchGAN discriminator"""
1377
+
1378
+ def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
1379
+ super().__init__(input_nc, ndf, 2, norm_layer, no_antialias)
1380
+
1381
+ def forward(self, input):
1382
+ B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3)
1383
+ size = 16
1384
+ Y = H // size
1385
+ X = W // size
1386
+ input = input.view(B, C, Y, size, X, size)
1387
+ input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size)
1388
+ return super().forward(input)
1389
+
1390
+
1391
+ class GroupedChannelNorm(nn.Module):
1392
+ def __init__(self, num_groups):
1393
+ super().__init__()
1394
+ self.num_groups = num_groups
1395
+
1396
+ def forward(self, x):
1397
+ shape = list(x.shape)
1398
+ new_shape = [shape[0], self.num_groups, shape[1] // self.num_groups] + shape[2:]
1399
+ x = x.view(*new_shape)
1400
+ mean = x.mean(dim=2, keepdim=True)
1401
+ std = x.std(dim=2, keepdim=True)
1402
+ x_norm = (x - mean) / (std + 1e-7)
1403
+ return x_norm.view(*shape)
models/patchnce.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from packaging import version
2
+ import torch
3
+ from torch import nn
4
+
5
+
6
+ class PatchNCELoss(nn.Module):
7
+ def __init__(self, opt):
8
+ super().__init__()
9
+ self.opt = opt
10
+ self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
11
+ self.mask_dtype = torch.uint8 if version.parse(torch.__version__) < version.parse('1.2.0') else torch.bool
12
+
13
+ def forward(self, feat_q, feat_k):
14
+ num_patches = feat_q.shape[0]
15
+ dim = feat_q.shape[1]
16
+ feat_k = feat_k.detach()
17
+
18
+ # pos logit
19
+ l_pos = torch.bmm(
20
+ feat_q.view(num_patches, 1, -1), feat_k.view(num_patches, -1, 1))
21
+ l_pos = l_pos.view(num_patches, 1)
22
+
23
+ # neg logit
24
+
25
+ # Should the negatives from the other samples of a minibatch be utilized?
26
+ # In CUT and FastCUT, we found that it's best to only include negatives
27
+ # from the same image. Therefore, we set
28
+ # --nce_includes_all_negatives_from_minibatch as False
29
+ # However, for single-image translation, the minibatch consists of
30
+ # crops from the "same" high-resolution image.
31
+ # Therefore, we will include the negatives from the entire minibatch.
32
+ if self.opt.nce_includes_all_negatives_from_minibatch:
33
+ # reshape features as if they are all negatives of minibatch of size 1.
34
+ batch_dim_for_bmm = 1
35
+ else:
36
+ batch_dim_for_bmm = self.opt.batch_size
37
+
38
+ # reshape features to batch size
39
+ feat_q = feat_q.view(batch_dim_for_bmm, -1, dim)
40
+ feat_k = feat_k.view(batch_dim_for_bmm, -1, dim)
41
+ npatches = feat_q.size(1)
42
+ l_neg_curbatch = torch.bmm(feat_q, feat_k.transpose(2, 1))
43
+
44
+ # diagonal entries are similarity between same features, and hence meaningless.
45
+ # just fill the diagonal with very small number, which is exp(-10) and almost zero
46
+ diagonal = torch.eye(npatches, device=feat_q.device, dtype=self.mask_dtype)[None, :, :]
47
+ l_neg_curbatch.masked_fill_(diagonal, -10.0)
48
+ l_neg = l_neg_curbatch.view(-1, npatches)
49
+
50
+ out = torch.cat((l_pos, l_neg), dim=1) / self.opt.nce_T
51
+
52
+ loss = self.cross_entropy_loss(out, torch.zeros(out.size(0), dtype=torch.long,
53
+ device=feat_q.device))
54
+
55
+ return loss
models/sincut_model.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from .cut_model import CUTModel
3
+
4
+
5
+ class SinCUTModel(CUTModel):
6
+ """ This class implements the single image translation model (Fig 9) of
7
+ Contrastive Learning for Unpaired Image-to-Image Translation
8
+ Taesung Park, Alexei A. Efros, Richard Zhang, Jun-Yan Zhu
9
+ ECCV, 2020
10
+ """
11
+
12
+ @staticmethod
13
+ def modify_commandline_options(parser, is_train=True):
14
+ parser = CUTModel.modify_commandline_options(parser, is_train)
15
+ parser.add_argument('--lambda_R1', type=float, default=1.0,
16
+ help='weight for the R1 gradient penalty')
17
+ parser.add_argument('--lambda_identity', type=float, default=1.0,
18
+ help='the "identity preservation loss"')
19
+
20
+ parser.set_defaults(nce_includes_all_negatives_from_minibatch=True,
21
+ dataset_mode="singleimage",
22
+ netG="stylegan2",
23
+ stylegan2_G_num_downsampling=1,
24
+ netD="stylegan2",
25
+ gan_mode="nonsaturating",
26
+ num_patches=1,
27
+ nce_layers="0,2,4",
28
+ lambda_NCE=4.0,
29
+ ngf=10,
30
+ ndf=8,
31
+ lr=0.002,
32
+ beta1=0.0,
33
+ beta2=0.99,
34
+ load_size=1024,
35
+ crop_size=64,
36
+ preprocess="zoom_and_patch",
37
+ )
38
+
39
+ if is_train:
40
+ parser.set_defaults(preprocess="zoom_and_patch",
41
+ batch_size=16,
42
+ save_epoch_freq=1,
43
+ save_latest_freq=20000,
44
+ n_epochs=8,
45
+ n_epochs_decay=8,
46
+
47
+ )
48
+ else:
49
+ parser.set_defaults(preprocess="none", # load the whole image as it is
50
+ batch_size=1,
51
+ num_test=1,
52
+ )
53
+
54
+ return parser
55
+
56
+ def __init__(self, opt):
57
+ super().__init__(opt)
58
+ if self.isTrain:
59
+ if opt.lambda_R1 > 0.0:
60
+ self.loss_names += ['D_R1']
61
+ if opt.lambda_identity > 0.0:
62
+ self.loss_names += ['idt']
63
+
64
+ def compute_D_loss(self):
65
+ self.real_B.requires_grad_()
66
+ GAN_loss_D = super().compute_D_loss()
67
+ self.loss_D_R1 = self.R1_loss(self.pred_real, self.real_B)
68
+ self.loss_D = GAN_loss_D + self.loss_D_R1
69
+ return self.loss_D
70
+
71
+ def compute_G_loss(self):
72
+ CUT_loss_G = super().compute_G_loss()
73
+ self.loss_idt = torch.nn.functional.l1_loss(self.idt_B, self.real_B) * self.opt.lambda_identity
74
+ return CUT_loss_G + self.loss_idt
75
+
76
+ def R1_loss(self, real_pred, real_img):
77
+ grad_real, = torch.autograd.grad(outputs=real_pred.sum(), inputs=real_img, create_graph=True, retain_graph=True)
78
+ grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean()
79
+ return grad_penalty * (self.opt.lambda_R1 * 0.5)
models/stylegan_networks.py ADDED
@@ -0,0 +1,914 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The network architectures is based on PyTorch implemenation of StyleGAN2Encoder.
3
+ Original PyTorch repo: https://github.com/rosinality/style-based-gan-pytorch
4
+ Origianl StyelGAN2 paper: https://github.com/NVlabs/stylegan2
5
+ We use the network architeture for our single-image traning setting.
6
+ """
7
+
8
+ import math
9
+ import numpy as np
10
+ import random
11
+
12
+ import torch
13
+ from torch import nn
14
+ from torch.nn import functional as F
15
+
16
+
17
+ def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
18
+ return F.leaky_relu(input + bias, negative_slope) * scale
19
+
20
+
21
+ class FusedLeakyReLU(nn.Module):
22
+ def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
23
+ super().__init__()
24
+ self.bias = nn.Parameter(torch.zeros(1, channel, 1, 1))
25
+ self.negative_slope = negative_slope
26
+ self.scale = scale
27
+
28
+ def forward(self, input):
29
+ # print("FusedLeakyReLU: ", input.abs().mean())
30
+ out = fused_leaky_relu(input, self.bias,
31
+ self.negative_slope,
32
+ self.scale)
33
+ # print("FusedLeakyReLU: ", out.abs().mean())
34
+ return out
35
+
36
+
37
+ def upfirdn2d_native(
38
+ input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
39
+ ):
40
+ _, minor, in_h, in_w = input.shape
41
+ kernel_h, kernel_w = kernel.shape
42
+
43
+ out = input.view(-1, minor, in_h, 1, in_w, 1)
44
+ out = F.pad(out, [0, up_x - 1, 0, 0, 0, up_y - 1, 0, 0])
45
+ out = out.view(-1, minor, in_h * up_y, in_w * up_x)
46
+
47
+ out = F.pad(
48
+ out, [max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
49
+ )
50
+ out = out[
51
+ :,
52
+ :,
53
+ max(-pad_y0, 0): out.shape[2] - max(-pad_y1, 0),
54
+ max(-pad_x0, 0): out.shape[3] - max(-pad_x1, 0),
55
+ ]
56
+
57
+ # out = out.permute(0, 3, 1, 2)
58
+ out = out.reshape(
59
+ [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
60
+ )
61
+ w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
62
+ out = F.conv2d(out, w)
63
+ out = out.reshape(
64
+ -1,
65
+ minor,
66
+ in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
67
+ in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
68
+ )
69
+ # out = out.permute(0, 2, 3, 1)
70
+
71
+ return out[:, :, ::down_y, ::down_x]
72
+
73
+
74
+ def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
75
+ return upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1])
76
+
77
+
78
+ class PixelNorm(nn.Module):
79
+ def __init__(self):
80
+ super().__init__()
81
+
82
+ def forward(self, input):
83
+ return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
84
+
85
+
86
+ def make_kernel(k):
87
+ k = torch.tensor(k, dtype=torch.float32)
88
+
89
+ if len(k.shape) == 1:
90
+ k = k[None, :] * k[:, None]
91
+
92
+ k /= k.sum()
93
+
94
+ return k
95
+
96
+
97
+ class Upsample(nn.Module):
98
+ def __init__(self, kernel, factor=2):
99
+ super().__init__()
100
+
101
+ self.factor = factor
102
+ kernel = make_kernel(kernel) * (factor ** 2)
103
+ self.register_buffer('kernel', kernel)
104
+
105
+ p = kernel.shape[0] - factor
106
+
107
+ pad0 = (p + 1) // 2 + factor - 1
108
+ pad1 = p // 2
109
+
110
+ self.pad = (pad0, pad1)
111
+
112
+ def forward(self, input):
113
+ out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
114
+
115
+ return out
116
+
117
+
118
+ class Downsample(nn.Module):
119
+ def __init__(self, kernel, factor=2):
120
+ super().__init__()
121
+
122
+ self.factor = factor
123
+ kernel = make_kernel(kernel)
124
+ self.register_buffer('kernel', kernel)
125
+
126
+ p = kernel.shape[0] - factor
127
+
128
+ pad0 = (p + 1) // 2
129
+ pad1 = p // 2
130
+
131
+ self.pad = (pad0, pad1)
132
+
133
+ def forward(self, input):
134
+ out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
135
+
136
+ return out
137
+
138
+
139
+ class Blur(nn.Module):
140
+ def __init__(self, kernel, pad, upsample_factor=1):
141
+ super().__init__()
142
+
143
+ kernel = make_kernel(kernel)
144
+
145
+ if upsample_factor > 1:
146
+ kernel = kernel * (upsample_factor ** 2)
147
+
148
+ self.register_buffer('kernel', kernel)
149
+
150
+ self.pad = pad
151
+
152
+ def forward(self, input):
153
+ out = upfirdn2d(input, self.kernel, pad=self.pad)
154
+
155
+ return out
156
+
157
+
158
+ class EqualConv2d(nn.Module):
159
+ def __init__(
160
+ self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
161
+ ):
162
+ super().__init__()
163
+
164
+ self.weight = nn.Parameter(
165
+ torch.randn(out_channel, in_channel, kernel_size, kernel_size)
166
+ )
167
+ self.scale = math.sqrt(1) / math.sqrt(in_channel * (kernel_size ** 2))
168
+
169
+ self.stride = stride
170
+ self.padding = padding
171
+
172
+ if bias:
173
+ self.bias = nn.Parameter(torch.zeros(out_channel))
174
+
175
+ else:
176
+ self.bias = None
177
+
178
+ def forward(self, input):
179
+ # print("Before EqualConv2d: ", input.abs().mean())
180
+ out = F.conv2d(
181
+ input,
182
+ self.weight * self.scale,
183
+ bias=self.bias,
184
+ stride=self.stride,
185
+ padding=self.padding,
186
+ )
187
+ # print("After EqualConv2d: ", out.abs().mean(), (self.weight * self.scale).abs().mean())
188
+
189
+ return out
190
+
191
+ def __repr__(self):
192
+ return (
193
+ f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
194
+ f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
195
+ )
196
+
197
+
198
+ class EqualLinear(nn.Module):
199
+ def __init__(
200
+ self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
201
+ ):
202
+ super().__init__()
203
+
204
+ self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
205
+
206
+ if bias:
207
+ self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
208
+
209
+ else:
210
+ self.bias = None
211
+
212
+ self.activation = activation
213
+
214
+ self.scale = (math.sqrt(1) / math.sqrt(in_dim)) * lr_mul
215
+ self.lr_mul = lr_mul
216
+
217
+ def forward(self, input):
218
+ if self.activation:
219
+ out = F.linear(input, self.weight * self.scale)
220
+ out = fused_leaky_relu(out, self.bias * self.lr_mul)
221
+
222
+ else:
223
+ out = F.linear(
224
+ input, self.weight * self.scale, bias=self.bias * self.lr_mul
225
+ )
226
+
227
+ return out
228
+
229
+ def __repr__(self):
230
+ return (
231
+ f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
232
+ )
233
+
234
+
235
+ class ScaledLeakyReLU(nn.Module):
236
+ def __init__(self, negative_slope=0.2):
237
+ super().__init__()
238
+
239
+ self.negative_slope = negative_slope
240
+
241
+ def forward(self, input):
242
+ out = F.leaky_relu(input, negative_slope=self.negative_slope)
243
+
244
+ return out * math.sqrt(2)
245
+
246
+
247
+ class ModulatedConv2d(nn.Module):
248
+ def __init__(
249
+ self,
250
+ in_channel,
251
+ out_channel,
252
+ kernel_size,
253
+ style_dim,
254
+ demodulate=True,
255
+ upsample=False,
256
+ downsample=False,
257
+ blur_kernel=[1, 3, 3, 1],
258
+ ):
259
+ super().__init__()
260
+
261
+ self.eps = 1e-8
262
+ self.kernel_size = kernel_size
263
+ self.in_channel = in_channel
264
+ self.out_channel = out_channel
265
+ self.upsample = upsample
266
+ self.downsample = downsample
267
+
268
+ if upsample:
269
+ factor = 2
270
+ p = (len(blur_kernel) - factor) - (kernel_size - 1)
271
+ pad0 = (p + 1) // 2 + factor - 1
272
+ pad1 = p // 2 + 1
273
+
274
+ self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
275
+
276
+ if downsample:
277
+ factor = 2
278
+ p = (len(blur_kernel) - factor) + (kernel_size - 1)
279
+ pad0 = (p + 1) // 2
280
+ pad1 = p // 2
281
+
282
+ self.blur = Blur(blur_kernel, pad=(pad0, pad1))
283
+
284
+ fan_in = in_channel * kernel_size ** 2
285
+ self.scale = math.sqrt(1) / math.sqrt(fan_in)
286
+ self.padding = kernel_size // 2
287
+
288
+ self.weight = nn.Parameter(
289
+ torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
290
+ )
291
+
292
+ if style_dim is not None and style_dim > 0:
293
+ self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
294
+
295
+ self.demodulate = demodulate
296
+
297
+ def __repr__(self):
298
+ return (
299
+ f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
300
+ f'upsample={self.upsample}, downsample={self.downsample})'
301
+ )
302
+
303
+ def forward(self, input, style):
304
+ batch, in_channel, height, width = input.shape
305
+
306
+ if style is not None:
307
+ style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
308
+ else:
309
+ style = torch.ones(batch, 1, in_channel, 1, 1).cuda()
310
+ weight = self.scale * self.weight * style
311
+
312
+ if self.demodulate:
313
+ demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
314
+ weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
315
+
316
+ weight = weight.view(
317
+ batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
318
+ )
319
+
320
+ if self.upsample:
321
+ input = input.view(1, batch * in_channel, height, width)
322
+ weight = weight.view(
323
+ batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
324
+ )
325
+ weight = weight.transpose(1, 2).reshape(
326
+ batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
327
+ )
328
+ out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
329
+ _, _, height, width = out.shape
330
+ out = out.view(batch, self.out_channel, height, width)
331
+ out = self.blur(out)
332
+
333
+ elif self.downsample:
334
+ input = self.blur(input)
335
+ _, _, height, width = input.shape
336
+ input = input.view(1, batch * in_channel, height, width)
337
+ out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
338
+ _, _, height, width = out.shape
339
+ out = out.view(batch, self.out_channel, height, width)
340
+
341
+ else:
342
+ input = input.view(1, batch * in_channel, height, width)
343
+ out = F.conv2d(input, weight, padding=self.padding, groups=batch)
344
+ _, _, height, width = out.shape
345
+ out = out.view(batch, self.out_channel, height, width)
346
+
347
+ return out
348
+
349
+
350
+ class NoiseInjection(nn.Module):
351
+ def __init__(self):
352
+ super().__init__()
353
+
354
+ self.weight = nn.Parameter(torch.zeros(1))
355
+
356
+ def forward(self, image, noise=None):
357
+ if noise is None:
358
+ batch, _, height, width = image.shape
359
+ noise = image.new_empty(batch, 1, height, width).normal_()
360
+
361
+ return image + self.weight * noise
362
+
363
+
364
+ class ConstantInput(nn.Module):
365
+ def __init__(self, channel, size=4):
366
+ super().__init__()
367
+
368
+ self.input = nn.Parameter(torch.randn(1, channel, size, size))
369
+
370
+ def forward(self, input):
371
+ batch = input.shape[0]
372
+ out = self.input.repeat(batch, 1, 1, 1)
373
+
374
+ return out
375
+
376
+
377
+ class StyledConv(nn.Module):
378
+ def __init__(
379
+ self,
380
+ in_channel,
381
+ out_channel,
382
+ kernel_size,
383
+ style_dim=None,
384
+ upsample=False,
385
+ blur_kernel=[1, 3, 3, 1],
386
+ demodulate=True,
387
+ inject_noise=True,
388
+ ):
389
+ super().__init__()
390
+
391
+ self.inject_noise = inject_noise
392
+ self.conv = ModulatedConv2d(
393
+ in_channel,
394
+ out_channel,
395
+ kernel_size,
396
+ style_dim,
397
+ upsample=upsample,
398
+ blur_kernel=blur_kernel,
399
+ demodulate=demodulate,
400
+ )
401
+
402
+ self.noise = NoiseInjection()
403
+ # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
404
+ # self.activate = ScaledLeakyReLU(0.2)
405
+ self.activate = FusedLeakyReLU(out_channel)
406
+
407
+ def forward(self, input, style=None, noise=None):
408
+ out = self.conv(input, style)
409
+ if self.inject_noise:
410
+ out = self.noise(out, noise=noise)
411
+ # out = out + self.bias
412
+ out = self.activate(out)
413
+
414
+ return out
415
+
416
+
417
+ class ToRGB(nn.Module):
418
+ def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
419
+ super().__init__()
420
+
421
+ if upsample:
422
+ self.upsample = Upsample(blur_kernel)
423
+
424
+ self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
425
+ self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
426
+
427
+ def forward(self, input, style, skip=None):
428
+ out = self.conv(input, style)
429
+ out = out + self.bias
430
+
431
+ if skip is not None:
432
+ skip = self.upsample(skip)
433
+
434
+ out = out + skip
435
+
436
+ return out
437
+
438
+
439
+ class Generator(nn.Module):
440
+ def __init__(
441
+ self,
442
+ size,
443
+ style_dim,
444
+ n_mlp,
445
+ channel_multiplier=2,
446
+ blur_kernel=[1, 3, 3, 1],
447
+ lr_mlp=0.01,
448
+ ):
449
+ super().__init__()
450
+
451
+ self.size = size
452
+
453
+ self.style_dim = style_dim
454
+
455
+ layers = [PixelNorm()]
456
+
457
+ for i in range(n_mlp):
458
+ layers.append(
459
+ EqualLinear(
460
+ style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
461
+ )
462
+ )
463
+
464
+ self.style = nn.Sequential(*layers)
465
+
466
+ self.channels = {
467
+ 4: 512,
468
+ 8: 512,
469
+ 16: 512,
470
+ 32: 512,
471
+ 64: 256 * channel_multiplier,
472
+ 128: 128 * channel_multiplier,
473
+ 256: 64 * channel_multiplier,
474
+ 512: 32 * channel_multiplier,
475
+ 1024: 16 * channel_multiplier,
476
+ }
477
+
478
+ self.input = ConstantInput(self.channels[4])
479
+ self.conv1 = StyledConv(
480
+ self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
481
+ )
482
+ self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
483
+
484
+ self.log_size = int(math.log(size, 2))
485
+ self.num_layers = (self.log_size - 2) * 2 + 1
486
+
487
+ self.convs = nn.ModuleList()
488
+ self.upsamples = nn.ModuleList()
489
+ self.to_rgbs = nn.ModuleList()
490
+ self.noises = nn.Module()
491
+
492
+ in_channel = self.channels[4]
493
+
494
+ for layer_idx in range(self.num_layers):
495
+ res = (layer_idx + 5) // 2
496
+ shape = [1, 1, 2 ** res, 2 ** res]
497
+ self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape))
498
+
499
+ for i in range(3, self.log_size + 1):
500
+ out_channel = self.channels[2 ** i]
501
+
502
+ self.convs.append(
503
+ StyledConv(
504
+ in_channel,
505
+ out_channel,
506
+ 3,
507
+ style_dim,
508
+ upsample=True,
509
+ blur_kernel=blur_kernel,
510
+ )
511
+ )
512
+
513
+ self.convs.append(
514
+ StyledConv(
515
+ out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
516
+ )
517
+ )
518
+
519
+ self.to_rgbs.append(ToRGB(out_channel, style_dim))
520
+
521
+ in_channel = out_channel
522
+
523
+ self.n_latent = self.log_size * 2 - 2
524
+
525
+ def make_noise(self):
526
+ device = self.input.input.device
527
+
528
+ noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
529
+
530
+ for i in range(3, self.log_size + 1):
531
+ for _ in range(2):
532
+ noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
533
+
534
+ return noises
535
+
536
+ def mean_latent(self, n_latent):
537
+ latent_in = torch.randn(
538
+ n_latent, self.style_dim, device=self.input.input.device
539
+ )
540
+ latent = self.style(latent_in).mean(0, keepdim=True)
541
+
542
+ return latent
543
+
544
+ def get_latent(self, input):
545
+ return self.style(input)
546
+
547
+ def forward(
548
+ self,
549
+ styles,
550
+ return_latents=False,
551
+ inject_index=None,
552
+ truncation=1,
553
+ truncation_latent=None,
554
+ input_is_latent=False,
555
+ noise=None,
556
+ randomize_noise=True,
557
+ ):
558
+ if not input_is_latent:
559
+ styles = [self.style(s) for s in styles]
560
+
561
+ if noise is None:
562
+ if randomize_noise:
563
+ noise = [None] * self.num_layers
564
+ else:
565
+ noise = [
566
+ getattr(self.noises, f'noise_{i}') for i in range(self.num_layers)
567
+ ]
568
+
569
+ if truncation < 1:
570
+ style_t = []
571
+
572
+ for style in styles:
573
+ style_t.append(
574
+ truncation_latent + truncation * (style - truncation_latent)
575
+ )
576
+
577
+ styles = style_t
578
+
579
+ if len(styles) < 2:
580
+ inject_index = self.n_latent
581
+
582
+ if len(styles[0].shape) < 3:
583
+ latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
584
+
585
+ else:
586
+ latent = styles[0]
587
+
588
+ else:
589
+ if inject_index is None:
590
+ inject_index = random.randint(1, self.n_latent - 1)
591
+
592
+ latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
593
+ latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
594
+
595
+ latent = torch.cat([latent, latent2], 1)
596
+
597
+ out = self.input(latent)
598
+ out = self.conv1(out, latent[:, 0], noise=noise[0])
599
+
600
+ skip = self.to_rgb1(out, latent[:, 1])
601
+
602
+ i = 1
603
+ for conv1, conv2, noise1, noise2, to_rgb in zip(
604
+ self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
605
+ ):
606
+ out = conv1(out, latent[:, i], noise=noise1)
607
+ out = conv2(out, latent[:, i + 1], noise=noise2)
608
+ skip = to_rgb(out, latent[:, i + 2], skip)
609
+
610
+ i += 2
611
+
612
+ image = skip
613
+
614
+ if return_latents:
615
+ return image, latent
616
+
617
+ else:
618
+ return image, None
619
+
620
+
621
+ class ConvLayer(nn.Sequential):
622
+ def __init__(
623
+ self,
624
+ in_channel,
625
+ out_channel,
626
+ kernel_size,
627
+ downsample=False,
628
+ blur_kernel=[1, 3, 3, 1],
629
+ bias=True,
630
+ activate=True,
631
+ ):
632
+ layers = []
633
+
634
+ if downsample:
635
+ factor = 2
636
+ p = (len(blur_kernel) - factor) + (kernel_size - 1)
637
+ pad0 = (p + 1) // 2
638
+ pad1 = p // 2
639
+
640
+ layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
641
+
642
+ stride = 2
643
+ self.padding = 0
644
+
645
+ else:
646
+ stride = 1
647
+ self.padding = kernel_size // 2
648
+
649
+ layers.append(
650
+ EqualConv2d(
651
+ in_channel,
652
+ out_channel,
653
+ kernel_size,
654
+ padding=self.padding,
655
+ stride=stride,
656
+ bias=bias and not activate,
657
+ )
658
+ )
659
+
660
+ if activate:
661
+ if bias:
662
+ layers.append(FusedLeakyReLU(out_channel))
663
+
664
+ else:
665
+ layers.append(ScaledLeakyReLU(0.2))
666
+
667
+ super().__init__(*layers)
668
+
669
+
670
+ class ResBlock(nn.Module):
671
+ def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1], downsample=True, skip_gain=1.0):
672
+ super().__init__()
673
+
674
+ self.skip_gain = skip_gain
675
+ self.conv1 = ConvLayer(in_channel, in_channel, 3)
676
+ self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=downsample, blur_kernel=blur_kernel)
677
+
678
+ if in_channel != out_channel or downsample:
679
+ self.skip = ConvLayer(
680
+ in_channel, out_channel, 1, downsample=downsample, activate=False, bias=False
681
+ )
682
+ else:
683
+ self.skip = nn.Identity()
684
+
685
+ def forward(self, input):
686
+ out = self.conv1(input)
687
+ out = self.conv2(out)
688
+
689
+ skip = self.skip(input)
690
+ out = (out * self.skip_gain + skip) / math.sqrt(self.skip_gain ** 2 + 1.0)
691
+
692
+ return out
693
+
694
+
695
+ class StyleGAN2Discriminator(nn.Module):
696
+ def __init__(self, input_nc, ndf=64, n_layers=3, no_antialias=False, size=None, opt=None):
697
+ super().__init__()
698
+ self.opt = opt
699
+ self.stddev_group = 16
700
+ if size is None:
701
+ size = 2 ** int((np.rint(np.log2(min(opt.load_size, opt.crop_size)))))
702
+ if "patch" in self.opt.netD and self.opt.D_patch_size is not None:
703
+ size = 2 ** int(np.log2(self.opt.D_patch_size))
704
+
705
+ blur_kernel = [1, 3, 3, 1]
706
+ channel_multiplier = ndf / 64
707
+ channels = {
708
+ 4: min(384, int(4096 * channel_multiplier)),
709
+ 8: min(384, int(2048 * channel_multiplier)),
710
+ 16: min(384, int(1024 * channel_multiplier)),
711
+ 32: min(384, int(512 * channel_multiplier)),
712
+ 64: int(256 * channel_multiplier),
713
+ 128: int(128 * channel_multiplier),
714
+ 256: int(64 * channel_multiplier),
715
+ 512: int(32 * channel_multiplier),
716
+ 1024: int(16 * channel_multiplier),
717
+ }
718
+
719
+ convs = [ConvLayer(3, channels[size], 1)]
720
+
721
+ log_size = int(math.log(size, 2))
722
+
723
+ in_channel = channels[size]
724
+
725
+ if "smallpatch" in self.opt.netD:
726
+ final_res_log2 = 4
727
+ elif "patch" in self.opt.netD:
728
+ final_res_log2 = 3
729
+ else:
730
+ final_res_log2 = 2
731
+
732
+ for i in range(log_size, final_res_log2, -1):
733
+ out_channel = channels[2 ** (i - 1)]
734
+
735
+ convs.append(ResBlock(in_channel, out_channel, blur_kernel))
736
+
737
+ in_channel = out_channel
738
+
739
+ self.convs = nn.Sequential(*convs)
740
+
741
+ if False and "tile" in self.opt.netD:
742
+ in_channel += 1
743
+ self.final_conv = ConvLayer(in_channel, channels[4], 3)
744
+ if "patch" in self.opt.netD:
745
+ self.final_linear = ConvLayer(channels[4], 1, 3, bias=False, activate=False)
746
+ else:
747
+ self.final_linear = nn.Sequential(
748
+ EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'),
749
+ EqualLinear(channels[4], 1),
750
+ )
751
+
752
+ def forward(self, input, get_minibatch_features=False):
753
+ if "patch" in self.opt.netD and self.opt.D_patch_size is not None:
754
+ h, w = input.size(2), input.size(3)
755
+ y = torch.randint(h - self.opt.D_patch_size, ())
756
+ x = torch.randint(w - self.opt.D_patch_size, ())
757
+ input = input[:, :, y:y + self.opt.D_patch_size, x:x + self.opt.D_patch_size]
758
+ out = input
759
+ for i, conv in enumerate(self.convs):
760
+ out = conv(out)
761
+ # print(i, out.abs().mean())
762
+ # out = self.convs(input)
763
+
764
+ batch, channel, height, width = out.shape
765
+
766
+ if False and "tile" in self.opt.netD:
767
+ group = min(batch, self.stddev_group)
768
+ stddev = out.view(
769
+ group, -1, 1, channel // 1, height, width
770
+ )
771
+ stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
772
+ stddev = stddev.mean([2, 3, 4], keepdim=True).squeeze(2)
773
+ stddev = stddev.repeat(group, 1, height, width)
774
+ out = torch.cat([out, stddev], 1)
775
+
776
+ out = self.final_conv(out)
777
+ # print(out.abs().mean())
778
+
779
+ if "patch" not in self.opt.netD:
780
+ out = out.view(batch, -1)
781
+ out = self.final_linear(out)
782
+
783
+ return out
784
+
785
+
786
+ class TileStyleGAN2Discriminator(StyleGAN2Discriminator):
787
+ def forward(self, input):
788
+ B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3)
789
+ size = self.opt.D_patch_size
790
+ Y = H // size
791
+ X = W // size
792
+ input = input.view(B, C, Y, size, X, size)
793
+ input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size)
794
+ return super().forward(input)
795
+
796
+
797
+ class StyleGAN2Encoder(nn.Module):
798
+ def __init__(self, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, opt=None):
799
+ super().__init__()
800
+ assert opt is not None
801
+ self.opt = opt
802
+ channel_multiplier = ngf / 32
803
+ channels = {
804
+ 4: min(512, int(round(4096 * channel_multiplier))),
805
+ 8: min(512, int(round(2048 * channel_multiplier))),
806
+ 16: min(512, int(round(1024 * channel_multiplier))),
807
+ 32: min(512, int(round(512 * channel_multiplier))),
808
+ 64: int(round(256 * channel_multiplier)),
809
+ 128: int(round(128 * channel_multiplier)),
810
+ 256: int(round(64 * channel_multiplier)),
811
+ 512: int(round(32 * channel_multiplier)),
812
+ 1024: int(round(16 * channel_multiplier)),
813
+ }
814
+
815
+ blur_kernel = [1, 3, 3, 1]
816
+
817
+ cur_res = 2 ** int((np.rint(np.log2(min(opt.load_size, opt.crop_size)))))
818
+ convs = [nn.Identity(),
819
+ ConvLayer(3, channels[cur_res], 1)]
820
+
821
+ num_downsampling = self.opt.stylegan2_G_num_downsampling
822
+ for i in range(num_downsampling):
823
+ in_channel = channels[cur_res]
824
+ out_channel = channels[cur_res // 2]
825
+ convs.append(ResBlock(in_channel, out_channel, blur_kernel, downsample=True))
826
+ cur_res = cur_res // 2
827
+
828
+ for i in range(n_blocks // 2):
829
+ n_channel = channels[cur_res]
830
+ convs.append(ResBlock(n_channel, n_channel, downsample=False))
831
+
832
+ self.convs = nn.Sequential(*convs)
833
+
834
+ def forward(self, input, layers=[], get_features=False):
835
+ feat = input
836
+ feats = []
837
+ if -1 in layers:
838
+ layers.append(len(self.convs) - 1)
839
+ for layer_id, layer in enumerate(self.convs):
840
+ feat = layer(feat)
841
+ # print(layer_id, " features ", feat.abs().mean())
842
+ if layer_id in layers:
843
+ feats.append(feat)
844
+
845
+ if get_features:
846
+ return feat, feats
847
+ else:
848
+ return feat
849
+
850
+
851
+ class StyleGAN2Decoder(nn.Module):
852
+ def __init__(self, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, opt=None):
853
+ super().__init__()
854
+ assert opt is not None
855
+ self.opt = opt
856
+
857
+ blur_kernel = [1, 3, 3, 1]
858
+
859
+ channel_multiplier = ngf / 32
860
+ channels = {
861
+ 4: min(512, int(round(4096 * channel_multiplier))),
862
+ 8: min(512, int(round(2048 * channel_multiplier))),
863
+ 16: min(512, int(round(1024 * channel_multiplier))),
864
+ 32: min(512, int(round(512 * channel_multiplier))),
865
+ 64: int(round(256 * channel_multiplier)),
866
+ 128: int(round(128 * channel_multiplier)),
867
+ 256: int(round(64 * channel_multiplier)),
868
+ 512: int(round(32 * channel_multiplier)),
869
+ 1024: int(round(16 * channel_multiplier)),
870
+ }
871
+
872
+ num_downsampling = self.opt.stylegan2_G_num_downsampling
873
+ cur_res = 2 ** int((np.rint(np.log2(min(opt.load_size, opt.crop_size))))) // (2 ** num_downsampling)
874
+ convs = []
875
+
876
+ for i in range(n_blocks // 2):
877
+ n_channel = channels[cur_res]
878
+ convs.append(ResBlock(n_channel, n_channel, downsample=False))
879
+
880
+ for i in range(num_downsampling):
881
+ in_channel = channels[cur_res]
882
+ out_channel = channels[cur_res * 2]
883
+ inject_noise = "small" not in self.opt.netG
884
+ convs.append(
885
+ StyledConv(in_channel, out_channel, 3, upsample=True, blur_kernel=blur_kernel, inject_noise=inject_noise)
886
+ )
887
+ cur_res = cur_res * 2
888
+
889
+ convs.append(ConvLayer(channels[cur_res], 3, 1))
890
+
891
+ self.convs = nn.Sequential(*convs)
892
+
893
+ def forward(self, input):
894
+ return self.convs(input)
895
+
896
+
897
+ class StyleGAN2Generator(nn.Module):
898
+ def __init__(self, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, opt=None):
899
+ super().__init__()
900
+ self.opt = opt
901
+ self.encoder = StyleGAN2Encoder(input_nc, output_nc, ngf, use_dropout, n_blocks, padding_type, no_antialias, opt)
902
+ self.decoder = StyleGAN2Decoder(input_nc, output_nc, ngf, use_dropout, n_blocks, padding_type, no_antialias, opt)
903
+
904
+ def forward(self, input, layers=[], encode_only=False):
905
+ feat, feats = self.encoder(input, layers, True)
906
+ if encode_only:
907
+ return feats
908
+ else:
909
+ fake = self.decoder(feat)
910
+
911
+ if len(layers) > 0:
912
+ return fake, feats
913
+ else:
914
+ return fake
models/template_model.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Model class template
2
+
3
+ This module provides a template for users to implement custom models.
4
+ You can specify '--model template' to use this model.
5
+ The class name should be consistent with both the filename and its model option.
6
+ The filename should be <model>_dataset.py
7
+ The class name should be <Model>Dataset.py
8
+ It implements a simple image-to-image translation baseline based on regression loss.
9
+ Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss:
10
+ min_<netG> ||netG(data_A) - data_B||_1
11
+ You need to implement the following functions:
12
+ <modify_commandline_options>: Add model-specific options and rewrite default values for existing options.
13
+ <__init__>: Initialize this model class.
14
+ <set_input>: Unpack input data and perform data pre-processing.
15
+ <forward>: Run forward pass. This will be called by both <optimize_parameters> and <test>.
16
+ <optimize_parameters>: Update network weights; it will be called in every training iteration.
17
+ """
18
+ import torch
19
+ from .base_model import BaseModel
20
+ from . import networks
21
+
22
+
23
+ class TemplateModel(BaseModel):
24
+ @staticmethod
25
+ def modify_commandline_options(parser, is_train=True):
26
+ """Add new model-specific options and rewrite default values for existing options.
27
+
28
+ Parameters:
29
+ parser -- the option parser
30
+ is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
31
+
32
+ Returns:
33
+ the modified parser.
34
+ """
35
+ parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset.
36
+ if is_train:
37
+ parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model.
38
+
39
+ return parser
40
+
41
+ def __init__(self, opt):
42
+ """Initialize this model class.
43
+
44
+ Parameters:
45
+ opt -- training/test options
46
+
47
+ A few things can be done here.
48
+ - (required) call the initialization function of BaseModel
49
+ - define loss function, visualization images, model names, and optimizers
50
+ """
51
+ BaseModel.__init__(self, opt) # call the initialization method of BaseModel
52
+ # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
53
+ self.loss_names = ['loss_G']
54
+ # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
55
+ self.visual_names = ['data_A', 'data_B', 'output']
56
+ # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
57
+ # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
58
+ self.model_names = ['G']
59
+ # define networks; you can use opt.isTrain to specify different behaviors for training and test.
60
+ self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids)
61
+ if self.isTrain: # only defined during training time
62
+ # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.
63
+ # We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device)
64
+ self.criterionLoss = torch.nn.L1Loss()
65
+ # define and initialize optimizers. You can define one optimizer for each network.
66
+ # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
67
+ self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
68
+ self.optimizers = [self.optimizer]
69
+
70
+ # Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
71
+
72
+ def set_input(self, input):
73
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
74
+
75
+ Parameters:
76
+ input: a dictionary that contains the data itself and its metadata information.
77
+ """
78
+ AtoB = self.opt.direction == 'AtoB' # use <direction> to swap data_A and data_B
79
+ self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A
80
+ self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B
81
+ self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths
82
+
83
+ def forward(self):
84
+ """Run forward pass. This will be called by both functions <optimize_parameters> and <test>."""
85
+ self.output = self.netG(self.data_A) # generate output image given the input data_A
86
+
87
+ def backward(self):
88
+ """Calculate losses, gradients, and update network weights; called in every training iteration"""
89
+ # caculate the intermediate results if necessary; here self.output has been computed during function <forward>
90
+ # calculate loss given the input and intermediate results
91
+ self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression
92
+ self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G
93
+
94
+ def optimize_parameters(self):
95
+ """Update network weights; it will be called in every training iteration."""
96
+ self.forward() # first call forward to calculate intermediate results
97
+ self.optimizer.zero_grad() # clear network G's existing gradients
98
+ self.backward() # calculate gradients for network G
99
+ self.optimizer.step() # update gradients for network G
options/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """This package options includes option modules: training options, test options, and basic options (used in both training and test)."""
options/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (287 Bytes). View file
 
options/__pycache__/base_options.cpython-310.pyc ADDED
Binary file (7.53 kB). View file
 
options/__pycache__/train_options.cpython-310.pyc ADDED
Binary file (3.09 kB). View file