diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..ddaa0d6dfc5a64c5a6f58a94e0bf67bec1969f7e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +imgs/gif_cut.gif filter=lfs diff=lfs merge=lfs -text +imgs/horse2zebra_comparison.jpg filter=lfs diff=lfs merge=lfs -text +imgs/patchnce.gif filter=lfs diff=lfs merge=lfs -text +imgs/results.gif filter=lfs diff=lfs merge=lfs -text +imgs/singleimage.gif filter=lfs diff=lfs merge=lfs -text diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2819812bd42e364c4799a20f293858dc4c35af27 --- /dev/null +++ b/LICENSE @@ -0,0 +1,212 @@ +Copyright (c) 2020, Taesung Park and Jun-Yan Zhu +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--------------------------- LICENSE FOR CycleGAN ------------------------------- +-------------------https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix------ +Copyright (c) 2017, Jun-Yan Zhu and Taesung Park +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--------------------------- LICENSE FOR stylegan2-pytorch ---------------------- +----------------https://github.com/rosinality/stylegan2-pytorch/---------------- +MIT License + +Copyright (c) 2019 Kim Seonghyeon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +--------------------------- LICENSE FOR pix2pix -------------------------------- +BSD License + +For pix2pix software +Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +----------------------------- LICENSE FOR DCGAN -------------------------------- +BSD License + +For dcgan.torch software + +Copyright (c) 2015, Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--------------------------- LICENSE FOR StyleGAN2 ------------------------------ +--------------------------- Inherited from stylegan2-pytorch ------------------- +Copyright (c) 2019, NVIDIA Corporation. All rights reserved. + + +Nvidia Source Code License-NC + +======================================================================= + +1. Definitions + +"Licensor" means any person or entity that distributes its Work. + +"Software" means the original work of authorship made available under +this License. + +"Work" means the Software and any additions to or derivative works of +the Software that are made available under this License. + +"Nvidia Processors" means any central processing unit (CPU), graphics +processing unit (GPU), field-programmable gate array (FPGA), +application-specific integrated circuit (ASIC) or any combination +thereof designed, made, sold, or provided by Nvidia or its affiliates. + +The terms "reproduce," "reproduction," "derivative works," and +"distribution" have the meaning as provided under U.S. copyright law; +provided, however, that for the purposes of this License, derivative +works shall not include works that remain separable from, or merely +link (or bind by name) to the interfaces of, the Work. + +Works, including the Software, are "made available" under this License +by including in or with the Work either (a) a copyright notice +referencing the applicability of this License to the Work, or (b) a +copy of this License. + +2. License Grants + + 2.1 Copyright Grant. Subject to the terms and conditions of this + License, each Licensor grants to you a perpetual, worldwide, + non-exclusive, royalty-free, copyright license to reproduce, + prepare derivative works of, publicly display, publicly perform, + sublicense and distribute its Work and any resulting derivative + works in any form. + +3. Limitations + + 3.1 Redistribution. You may reproduce or distribute the Work only + if (a) you do so under this License, (b) you include a complete + copy of this License with your distribution, and (c) you retain + without modification any copyright, patent, trademark, or + attribution notices that are present in the Work. + + 3.2 Derivative Works. You may specify that additional or different + terms apply to the use, reproduction, and distribution of your + derivative works of the Work ("Your Terms") only if (a) Your Terms + provide that the use limitation in Section 3.3 applies to your + derivative works, and (b) you identify the specific derivative + works that are subject to Your Terms. Notwithstanding Your Terms, + this License (including the redistribution requirements in Section + 3.1) will continue to apply to the Work itself. + + 3.3 Use Limitation. The Work and any derivative works thereof only + may be used or intended for use non-commercially. The Work or + derivative works thereof may be used or intended for use by Nvidia + or its affiliates commercially or non-commercially. As used herein, + "non-commercially" means for research or evaluation purposes only. + + 3.4 Patent Claims. If you bring or threaten to bring a patent claim + against any Licensor (including any claim, cross-claim or + counterclaim in a lawsuit) to enforce any patents that you allege + are infringed by any Work, then your rights under this License from + such Licensor (including the grants in Sections 2.1 and 2.2) will + terminate immediately. + + 3.5 Trademarks. This License does not grant any rights to use any + Licensor's or its affiliates' names, logos, or trademarks, except + as necessary to reproduce the notices described in this License. + + 3.6 Termination. If you violate any term of this License, then your + rights under this License (including the grants in Sections 2.1 and + 2.2) will terminate immediately. + +4. Disclaimer of Warranty. + +THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR +NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER +THIS LICENSE. + +5. Limitation of Liability. + +EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL +THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE +SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, +INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF +OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK +(INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, +LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER +COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF +THE POSSIBILITY OF SUCH DAMAGES. + +======================================================================= diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..22a5a0fa5030bb9c7ed07ff321b35c323ec36447 --- /dev/null +++ b/README.md @@ -0,0 +1,316 @@ + + +# Contrastive Unpaired Translation (CUT) + +### [video (1m)](https://youtu.be/Llg0vE_MVgk) | [video (10m)](https://youtu.be/jSGOzjmN8q0) | [website](http://taesung.me/ContrastiveUnpairedTranslation/) | [paper](https://arxiv.org/pdf/2007.15651) +
+ + + +


+ + + +We provide our PyTorch implementation of unpaired image-to-image translation based on patchwise contrastive learning and adversarial learning. No hand-crafted loss and inverse network is used. Compared to [CycleGAN](https://github.com/junyanz/CycleGAN), our model training is faster and less memory-intensive. In addition, our method can be extended to single image training, where each “domain” is only a *single* image. + + + + +[Contrastive Learning for Unpaired Image-to-Image Translation](http://taesung.me/ContrastiveUnpairedTranslation/) + [Taesung Park](https://taesung.me/), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros/), [Richard Zhang](https://richzhang.github.io/), [Jun-Yan Zhu](https://www.cs.cmu.edu/~junyanz/)
+UC Berkeley and Adobe Research
+ In ECCV 2020 + + + + +


+ +### Pseudo code +```python +import torch +cross_entropy_loss = torch.nn.CrossEntropyLoss() + +# Input: f_q (BxCxS) and sampled features from H(G_enc(x)) +# Input: f_k (BxCxS) are sampled features from H(G_enc(G(x)) +# Input: tau is the temperature used in PatchNCE loss. +# Output: PatchNCE loss +def PatchNCELoss(f_q, f_k, tau=0.07): + # batch size, channel size, and number of sample locations + B, C, S = f_q.shape + + # calculate v * v+: BxSx1 + l_pos = (f_k * f_q).sum(dim=1)[:, :, None] + + # calculate v * v-: BxSxS + l_neg = torch.bmm(f_q.transpose(1, 2), f_k) + + # The diagonal entries are not negatives. Remove them. + identity_matrix = torch.eye(S)[None, :, :] + l_neg.masked_fill_(identity_matrix, -float('inf')) + + # calculate logits: (B)x(S)x(S+1) + logits = torch.cat((l_pos, l_neg), dim=2) / tau + + # return PatchNCE loss + predictions = logits.flatten(0, 1) + targets = torch.zeros(B * S, dtype=torch.long) + return cross_entropy_loss(predictions, targets) +``` +## Example Results + +### Unpaired Image-to-Image Translation + + +### Single Image Unpaired Translation + + + +### Russian Blue Cat to Grumpy Cat + + +### Parisian Street to Burano's painted houses + + + + +## Prerequisites +- Linux or macOS +- Python 3 +- CPU or NVIDIA GPU + CUDA CuDNN + +### Update log + +9/12/2020: Added single-image translation. + +### Getting started + +- Clone this repo: +```bash +git clone https://github.com/taesungp/contrastive-unpaired-translation CUT +cd CUT +``` + +- Install PyTorch 1.1 and other dependencies (e.g., torchvision, visdom, dominate, gputil). + + For pip users, please type the command `pip install -r requirements.txt`. + + For Conda users, you can create a new Conda environment using `conda env create -f environment.yml`. + + +### CUT and FastCUT Training and Test + +- Download the `grumpifycat` dataset (Fig 8 of the paper. Russian Blue -> Grumpy Cats) +```bash +bash ./datasets/download_cut_dataset.sh grumpifycat +``` +The dataset is downloaded and unzipped at `./datasets/grumpifycat/`. + +- To view training results and loss plots, run `python -m visdom.server` and click the URL http://localhost:8097. + +- Train the CUT model: +```bash +python train.py --dataroot ./datasets/grumpifycat --name grumpycat_CUT --CUT_mode CUT +``` + Or train the FastCUT model + ```bash +python train.py --dataroot ./datasets/grumpifycat --name grumpycat_FastCUT --CUT_mode FastCUT +``` +The checkpoints will be stored at `./checkpoints/grumpycat_*/web`. + +- Test the CUT model: +```bash +python test.py --dataroot ./datasets/grumpifycat --name grumpycat_CUT --CUT_mode CUT --phase train +``` + +The test results will be saved to a html file here: `./results/grumpifycat/latest_train/index.html`. + +### CUT, FastCUT, and CycleGAN +
+ +CUT is trained with the identity preservation loss and with `lambda_NCE=1`, while FastCUT is trained without the identity loss but with higher `lambda_NCE=10.0`. Compared to CycleGAN, CUT learns to perform more powerful distribution matching, while FastCUT is designed as a lighter (half the GPU memory, can fit a larger image), and faster (twice faster to train) alternative to CycleGAN. Please refer to the [paper](https://arxiv.org/abs/2007.15651) for more details. + +In the above figure, we measure the percentage of pixels belonging to the horse/zebra bodies, using a pre-trained semantic segmentation model. We find a distribution mismatch between sizes of horses and zebras images -- zebras usually appear larger (36.8\% vs. 17.9\%). Our full method CUT has the flexibility to enlarge the horses, as a means of better matching of the training statistics than CycleGAN. FastCUT behaves more conservatively like CycleGAN. + +### Training using our launcher scripts + +Please see `experiments/grumpifycat_launcher.py` that generates the above command line arguments. The launcher scripts are useful for configuring rather complicated command-line arguments of training and testing. + +Using the launcher, the command below generates the training command of CUT and FastCUT. +```bash +python -m experiments grumpifycat train 0 # CUT +python -m experiments grumpifycat train 1 # FastCUT +``` + +To test using the launcher, +```bash +python -m experiments grumpifycat test 0 # CUT +python -m experiments grumpifycat test 1 # FastCUT +``` + +Possible commands are run, run_test, launch, close, and so on. Please see `experiments/__main__.py` for all commands. Launcher is easy and quick to define and use. For example, the grumpifycat launcher is defined in a few lines: +```python +from .tmux_launcher import Options, TmuxLauncher + + +class Launcher(TmuxLauncher): + def common_options(self): + return [ + Options( # Command 0 + dataroot="./datasets/grumpifycat", + name="grumpifycat_CUT", + CUT_mode="CUT" + ), + + Options( # Command 1 + dataroot="./datasets/grumpifycat", + name="grumpifycat_FastCUT", + CUT_mode="FastCUT", + ) + ] + + def commands(self): + return ["python train.py " + str(opt) for opt in self.common_options()] + + def test_commands(self): + # Russian Blue -> Grumpy Cats dataset does not have test split. + # Therefore, let's set the test split to be the "train" set. + return ["python test.py " + str(opt.set(phase='train')) for opt in self.common_options()] + +``` + + + +### Apply a pre-trained CUT model and evaluate FID + +To run the pretrained models, run the following. + +```bash + +# Download and unzip the pretrained models. The weights should be located at +# checkpoints/horse2zebra_cut_pretrained/latest_net_G.pth, for example. +wget http://efrosgans.eecs.berkeley.edu/CUT/pretrained_models.tar +tar -xf pretrained_models.tar + +# Generate outputs. The dataset paths might need to be adjusted. +# To do this, modify the lines of experiments/pretrained_launcher.py +# [id] corresponds to the respective commands defined in pretrained_launcher.py +# 0 - CUT on Cityscapes +# 1 - FastCUT on Cityscapes +# 2 - CUT on Horse2Zebra +# 3 - FastCUT on Horse2Zebra +# 4 - CUT on Cat2Dog +# 5 - FastCUT on Cat2Dog +python -m experiments pretrained run_test [id] + +# Evaluate FID. To do this, first install pytorch-fid of https://github.com/mseitzer/pytorch-fid +# pip install pytorch-fid +# For example, to evaluate horse2zebra FID of CUT, +# python -m pytorch_fid ./datasets/horse2zebra/testB/ results/horse2zebra_cut_pretrained/test_latest/images/fake_B/ +# To evaluate Cityscapes FID of FastCUT, +# python -m pytorch_fid ./datasets/cityscapes/valA/ ~/projects/contrastive-unpaired-translation/results/cityscapes_fastcut_pretrained/test_latest/images/fake_B/ +# Note that a special dataset needs to be used for the Cityscapes model. Please read below. +python -m pytorch_fid [path to real test images] [path to generated images] + +``` + +Note: the Cityscapes pretrained model was trained and evaluated on a resized and JPEG-compressed version of the original Cityscapes dataset. To perform evaluation, please download [this](http://efrosgans.eecs.berkeley.edu/CUT/datasets/cityscapes_val_for_CUT.tar) validation set and perform evaluation. + + +### SinCUT Single Image Unpaired Training + +To train SinCUT (single-image translation, shown in Fig 9, 13 and 14 of the paper), you need to + +1. set the `--model` option as `--model sincut`, which invokes the configuration and codes at `./models/sincut_model.py`, and +2. specify the dataset directory of one image in each domain, such as the example dataset included in this repo at `./datasets/single_image_monet_etretat/`. + +For example, to train a model for the [Etretat cliff (first image of Figure 13)](https://github.com/taesungp/contrastive-unpaired-translation/blob/master/imgs/singleimage.gif), please use the following command. + +```bash +python train.py --model sincut --name singleimage_monet_etretat --dataroot ./datasets/single_image_monet_etretat +``` + +or by using the experiment launcher script, +```bash +python -m experiments singleimage run 0 +``` + +For single-image translation, we adopt network architectural components of [StyleGAN2](https://github.com/NVlabs/stylegan2), as well as the pixel identity preservation loss used in [DTN](https://arxiv.org/abs/1611.02200) and [CycleGAN](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/cycle_gan_model.py#L160). In particular, we adopted the code of [rosinality](https://github.com/rosinality/stylegan2-pytorch), which exists at `models/stylegan_networks.py`. + +The training takes several hours. To generate the final image using the checkpoint, + +```bash +python test.py --model sincut --name singleimage_monet_etretat --dataroot ./datasets/single_image_monet_etretat +``` + +or simply + +```bash +python -m experiments singleimage run_test 0 +``` + +### [Datasets](./docs/datasets.md) +Download CUT/CycleGAN/pix2pix datasets. For example, + +```bash +bash datasets/download_cut_datasets.sh horse2zebra +``` + +The Cat2Dog dataset is prepared from the AFHQ dataset. Please visit https://github.com/clovaai/stargan-v2 and download the AFHQ dataset by `bash download.sh afhq-dataset` of the github repo. Then reorganize directories as follows. +```bash +mkdir datasets/cat2dog +ln -s datasets/cat2dog/trainA [path_to_afhq]/train/cat +ln -s datasets/cat2dog/trainB [path_to_afhq]/train/dog +ln -s datasets/cat2dog/testA [path_to_afhq]/test/cat +ln -s datasets/cat2dog/testB [path_to_afhq]/test/dog +``` + +The Cityscapes dataset can be downloaded from https://cityscapes-dataset.com. +After that, use the script `./datasets/prepare_cityscapes_dataset.py` to prepare the dataset. + + +#### Preprocessing of input images + +The preprocessing of the input images, such as resizing or random cropping, is controlled by the option `--preprocess`, `--load_size`, and `--crop_size`. The usage follows the [CycleGAN/pix2pix](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) repo. + +For example, the default setting `--preprocess resize_and_crop --load_size 286 --crop_size 256` resizes the input image to `286x286`, and then makes a random crop of size `256x256` as a way to perform data augmentation. There are other preprocessing options that can be specified, and they are specified in [base_dataset.py](https://github.com/taesungp/contrastive-unpaired-translation/blob/master/data/base_dataset.py#L82). Below are some example options. + + - `--preprocess none`: does not perform any preprocessing. Note that the image size is still scaled to be a closest multiple of 4, because the convolutional generator cannot maintain the same image size otherwise. + - `--preprocess scale_width --load_size 768`: scales the width of the image to be of size 768. + - `--preprocess scale_shortside_and_crop`: scales the image preserving aspect ratio so that the short side is `load_size`, and then performs random cropping of window size `crop_size`. + +More preprocessing options can be added by modifying [`get_transform()`](https://github.com/taesungp/contrastive-unpaired-translation/blob/master/data/base_dataset.py#L82) of `base_dataset.py`. + + +### Citation +If you use this code for your research, please cite our [paper](https://arxiv.org/pdf/2007.15651). +``` +@inproceedings{park2020cut, + title={Contrastive Learning for Unpaired Image-to-Image Translation}, + author={Taesung Park and Alexei A. Efros and Richard Zhang and Jun-Yan Zhu}, + booktitle={European Conference on Computer Vision}, + year={2020} +} +``` + +If you use the original [pix2pix](https://phillipi.github.io/pix2pix/) and [CycleGAN](https://junyanz.github.io/CycleGAN/) model included in this repo, please cite the following papers +``` +@inproceedings{CycleGAN2017, + title={Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks}, + author={Zhu, Jun-Yan and Park, Taesung and Isola, Phillip and Efros, Alexei A}, + booktitle={IEEE International Conference on Computer Vision (ICCV)}, + year={2017} +} + + +@inproceedings{isola2017image, + title={Image-to-Image Translation with Conditional Adversarial Networks}, + author={Isola, Phillip and Zhu, Jun-Yan and Zhou, Tinghui and Efros, Alexei A}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2017} +} +``` + + +### Acknowledgments +We thank Allan Jabri and Phillip Isola for helpful discussion and feedback. Our code is developed based on [pytorch-CycleGAN-and-pix2pix](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix). We also thank [pytorch-fid](https://github.com/mseitzer/pytorch-fid) for FID computation, [drn](https://github.com/fyu/drn) for mIoU computation, and [stylegan2-pytorch](https://github.com/rosinality/stylegan2-pytorch/) for the PyTorch implementation of StyleGAN2 used in our single-image translation setting. diff --git a/data/__init__.py b/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a7dd29b40d82d4695e351ddd67c58e768b1df500 --- /dev/null +++ b/data/__init__.py @@ -0,0 +1,98 @@ +"""This package includes all the modules related to data loading and preprocessing + + To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset. + You need to implement four functions: + -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). + -- <__len__>: return the size of dataset. + -- <__getitem__>: get a data point from data loader. + -- : (optionally) add dataset-specific options and set default options. + +Now you can use the dataset class by specifying flag '--dataset_mode dummy'. +See our template dataset class 'template_dataset.py' for more details. +""" +import importlib +import torch.utils.data +from data.base_dataset import BaseDataset + + +def find_dataset_using_name(dataset_name): + """Import the module "data/[dataset_name]_dataset.py". + + In the file, the class called DatasetNameDataset() will + be instantiated. It has to be a subclass of BaseDataset, + and it is case-insensitive. + """ + dataset_filename = "data." + dataset_name + "_dataset" + datasetlib = importlib.import_module(dataset_filename) + + dataset = None + target_dataset_name = dataset_name.replace('_', '') + 'dataset' + for name, cls in datasetlib.__dict__.items(): + if name.lower() == target_dataset_name.lower() \ + and issubclass(cls, BaseDataset): + dataset = cls + + if dataset is None: + raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name)) + + return dataset + + +def get_option_setter(dataset_name): + """Return the static method of the dataset class.""" + dataset_class = find_dataset_using_name(dataset_name) + return dataset_class.modify_commandline_options + + +def create_dataset(opt): + """Create a dataset given the option. + + This function wraps the class CustomDatasetDataLoader. + This is the main interface between this package and 'train.py'/'test.py' + + Example: + >>> from data import create_dataset + >>> dataset = create_dataset(opt) + """ + data_loader = CustomDatasetDataLoader(opt) + dataset = data_loader.load_data() + return dataset + + +class CustomDatasetDataLoader(): + """Wrapper class of Dataset class that performs multi-threaded data loading""" + + def __init__(self, opt): + """Initialize this class + + Step 1: create a dataset instance given the name [dataset_mode] + Step 2: create a multi-threaded data loader. + """ + self.opt = opt + dataset_class = find_dataset_using_name(opt.dataset_mode) + self.dataset = dataset_class(opt) + print("dataset [%s] was created" % type(self.dataset).__name__) + self.dataloader = torch.utils.data.DataLoader( + self.dataset, + batch_size=opt.batch_size, + shuffle=not opt.serial_batches, + num_workers=int(opt.num_threads), + drop_last=True if opt.isTrain else False, + ) + + def set_epoch(self, epoch): + self.dataset.current_epoch = epoch + + def load_data(self): + return self + + def __len__(self): + """Return the number of data in the dataset""" + return min(len(self.dataset), self.opt.max_dataset_size) + + def __iter__(self): + """Return a batch of data""" + for i, data in enumerate(self.dataloader): + if i * self.opt.batch_size >= self.opt.max_dataset_size: + break + yield data diff --git a/data/__pycache__/__init__.cpython-310.pyc b/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..908c5c19338a7bdf1e6e2b4d47aea4b43c2898e8 Binary files /dev/null and b/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/data/__pycache__/base_dataset.cpython-310.pyc b/data/__pycache__/base_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..295c9bc2dc2dfb19a0272d9e6489cf0285ced448 Binary files /dev/null and b/data/__pycache__/base_dataset.cpython-310.pyc differ diff --git a/data/__pycache__/image_folder.cpython-310.pyc b/data/__pycache__/image_folder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7c104128ef34272e447e2461682034f17455105 Binary files /dev/null and b/data/__pycache__/image_folder.cpython-310.pyc differ diff --git a/data/__pycache__/unaligned_dataset.cpython-310.pyc b/data/__pycache__/unaligned_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c347652e563df439f734b2780de97b672f928777 Binary files /dev/null and b/data/__pycache__/unaligned_dataset.cpython-310.pyc differ diff --git a/data/base_dataset.py b/data/base_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..5748a9da2bcfb8126b3f91e50309eace78344e7b --- /dev/null +++ b/data/base_dataset.py @@ -0,0 +1,230 @@ +"""This module implements an abstract base class (ABC) 'BaseDataset' for datasets. + +It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses. +""" +import random +import numpy as np +import torch.utils.data as data +from PIL import Image +import torchvision.transforms as transforms +from abc import ABC, abstractmethod + + +class BaseDataset(data.Dataset, ABC): + """This class is an abstract base class (ABC) for datasets. + + To create a subclass, you need to implement the following four functions: + -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). + -- <__len__>: return the size of dataset. + -- <__getitem__>: get a data point. + -- : (optionally) add dataset-specific options and set default options. + """ + + def __init__(self, opt): + """Initialize the class; save the options in the class + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + self.opt = opt + self.root = opt.dataroot + self.current_epoch = 0 + + @staticmethod + def modify_commandline_options(parser, is_train): + """Add new dataset-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + return parser + + @abstractmethod + def __len__(self): + """Return the total number of images in the dataset.""" + return 0 + + @abstractmethod + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index - - a random integer for data indexing + + Returns: + a dictionary of data with their names. It ususally contains the data itself and its metadata information. + """ + pass + + +def get_params(opt, size): + w, h = size + new_h = h + new_w = w + if opt.preprocess == 'resize_and_crop': + new_h = new_w = opt.load_size + elif opt.preprocess == 'scale_width_and_crop': + new_w = opt.load_size + new_h = opt.load_size * h // w + + x = random.randint(0, np.maximum(0, new_w - opt.crop_size)) + y = random.randint(0, np.maximum(0, new_h - opt.crop_size)) + + flip = random.random() > 0.5 + + return {'crop_pos': (x, y), 'flip': flip} + + +def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True): + transform_list = [] + if grayscale: + transform_list.append(transforms.Grayscale(1)) + if 'fixsize' in opt.preprocess: + transform_list.append(transforms.Resize(params["size"], method)) + if 'resize' in opt.preprocess: + osize = [opt.load_size, opt.load_size] + if "gta2cityscapes" in opt.dataroot: + osize[0] = opt.load_size // 2 + transform_list.append(transforms.Resize(osize, method)) + elif 'scale_width' in opt.preprocess: + transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method))) + elif 'scale_shortside' in opt.preprocess: + transform_list.append(transforms.Lambda(lambda img: __scale_shortside(img, opt.load_size, opt.crop_size, method))) + + if 'zoom' in opt.preprocess: + if params is None: + transform_list.append(transforms.Lambda(lambda img: __random_zoom(img, opt.load_size, opt.crop_size, method))) + else: + transform_list.append(transforms.Lambda(lambda img: __random_zoom(img, opt.load_size, opt.crop_size, method, factor=params["scale_factor"]))) + + if 'crop' in opt.preprocess: + if params is None or 'crop_pos' not in params: + transform_list.append(transforms.RandomCrop(opt.crop_size)) + else: + transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size))) + + if 'patch' in opt.preprocess: + transform_list.append(transforms.Lambda(lambda img: __patch(img, params['patch_index'], opt.crop_size))) + + if 'trim' in opt.preprocess: + transform_list.append(transforms.Lambda(lambda img: __trim(img, opt.crop_size))) + + # if opt.preprocess == 'none': + transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method))) + + if not opt.no_flip: + if params is None or 'flip' not in params: + transform_list.append(transforms.RandomHorizontalFlip()) + elif 'flip' in params: + transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) + + if convert: + transform_list += [transforms.ToTensor()] + if grayscale: + transform_list += [transforms.Normalize((0.5,), (0.5,))] + else: + transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + return transforms.Compose(transform_list) + + +def __make_power_2(img, base, method=Image.BICUBIC): + ow, oh = img.size + h = int(round(oh / base) * base) + w = int(round(ow / base) * base) + if h == oh and w == ow: + return img + + return img.resize((w, h), method) + + +def __random_zoom(img, target_width, crop_width, method=Image.BICUBIC, factor=None): + if factor is None: + zoom_level = np.random.uniform(0.8, 1.0, size=[2]) + else: + zoom_level = (factor[0], factor[1]) + iw, ih = img.size + zoomw = max(crop_width, iw * zoom_level[0]) + zoomh = max(crop_width, ih * zoom_level[1]) + img = img.resize((int(round(zoomw)), int(round(zoomh))), method) + return img + + +def __scale_shortside(img, target_width, crop_width, method=Image.BICUBIC): + ow, oh = img.size + shortside = min(ow, oh) + if shortside >= target_width: + return img + else: + scale = target_width / shortside + return img.resize((round(ow * scale), round(oh * scale)), method) + + +def __trim(img, trim_width): + ow, oh = img.size + if ow > trim_width: + xstart = np.random.randint(ow - trim_width) + xend = xstart + trim_width + else: + xstart = 0 + xend = ow + if oh > trim_width: + ystart = np.random.randint(oh - trim_width) + yend = ystart + trim_width + else: + ystart = 0 + yend = oh + return img.crop((xstart, ystart, xend, yend)) + + +def __scale_width(img, target_width, crop_width, method=Image.BICUBIC): + ow, oh = img.size + if ow == target_width and oh >= crop_width: + return img + w = target_width + h = int(max(target_width * oh / ow, crop_width)) + return img.resize((w, h), method) + + +def __crop(img, pos, size): + ow, oh = img.size + x1, y1 = pos + tw = th = size + if (ow > tw or oh > th): + return img.crop((x1, y1, x1 + tw, y1 + th)) + return img + + +def __patch(img, index, size): + ow, oh = img.size + nw, nh = ow // size, oh // size + roomx = ow - nw * size + roomy = oh - nh * size + startx = np.random.randint(int(roomx) + 1) + starty = np.random.randint(int(roomy) + 1) + + index = index % (nw * nh) + ix = index // nh + iy = index % nh + gridx = startx + ix * size + gridy = starty + iy * size + return img.crop((gridx, gridy, gridx + size, gridy + size)) + + +def __flip(img, flip): + if flip: + return img.transpose(Image.FLIP_LEFT_RIGHT) + return img + + +def __print_size_warning(ow, oh, w, h): + """Print warning information about image size(only print once)""" + if not hasattr(__print_size_warning, 'has_printed'): + print("The image size needs to be a multiple of 4. " + "The loaded image size was (%d, %d), so it was adjusted to " + "(%d, %d). This adjustment will be done to all images " + "whose sizes are not multiples of 4" % (ow, oh, w, h)) + __print_size_warning.has_printed = True diff --git a/data/image_folder.py b/data/image_folder.py new file mode 100644 index 0000000000000000000000000000000000000000..2a137d32459367701bcaba3664eb381051a41d88 --- /dev/null +++ b/data/image_folder.py @@ -0,0 +1,66 @@ +"""A modified image folder class + +We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) +so that this class can load images from both current directory and its subdirectories. +""" + +import torch.utils.data as data + +from PIL import Image +import os +import os.path + +IMG_EXTENSIONS = [ + '.jpg', '.JPG', '.jpeg', '.JPEG', + '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', + '.tif', '.TIF', '.tiff', '.TIFF', +] + + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) + + +def make_dataset(dir, max_dataset_size=float("inf")): + images = [] + assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir + + for root, _, fnames in sorted(os.walk(dir, followlinks=True)): + for fname in fnames: + if is_image_file(fname): + path = os.path.join(root, fname) + images.append(path) + return images[:min(max_dataset_size, len(images))] + + +def default_loader(path): + return Image.open(path).convert('RGB') + + +class ImageFolder(data.Dataset): + + def __init__(self, root, transform=None, return_paths=False, + loader=default_loader): + imgs = make_dataset(root) + if len(imgs) == 0: + raise(RuntimeError("Found 0 images in: " + root + "\n" + "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) + + self.root = root + self.imgs = imgs + self.transform = transform + self.return_paths = return_paths + self.loader = loader + + def __getitem__(self, index): + path = self.imgs[index] + img = self.loader(path) + if self.transform is not None: + img = self.transform(img) + if self.return_paths: + return img, path + else: + return img + + def __len__(self): + return len(self.imgs) diff --git a/data/single_dataset.py b/data/single_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..9a5c3232f2ff746e73eeb4a7775027796dd20969 --- /dev/null +++ b/data/single_dataset.py @@ -0,0 +1,40 @@ +from data.base_dataset import BaseDataset, get_transform +from data.image_folder import make_dataset +from PIL import Image + + +class SingleDataset(BaseDataset): + """This dataset class can load a set of images specified by the path --dataroot /path/to/data. + + It can be used for generating CycleGAN results only for one side with the model option '-model test'. + """ + + def __init__(self, opt): + """Initialize this dataset class. + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseDataset.__init__(self, opt) + self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size)) + input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc + self.transform = get_transform(opt, grayscale=(input_nc == 1)) + + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index - - a random integer for data indexing + + Returns a dictionary that contains A and A_paths + A(tensor) - - an image in one domain + A_paths(str) - - the path of the image + """ + A_path = self.A_paths[index] + A_img = Image.open(A_path).convert('RGB') + A = self.transform(A_img) + return {'A': A, 'A_paths': A_path} + + def __len__(self): + """Return the total number of images in the dataset.""" + return len(self.A_paths) diff --git a/data/singleimage_dataset.py b/data/singleimage_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..0a9f1b55098ee75879ece617f62115aeee3a00ed --- /dev/null +++ b/data/singleimage_dataset.py @@ -0,0 +1,108 @@ +import numpy as np +import os.path +from data.base_dataset import BaseDataset, get_transform +from data.image_folder import make_dataset +from PIL import Image +import random +import util.util as util + + +class SingleImageDataset(BaseDataset): + """ + This dataset class can load unaligned/unpaired datasets. + + It requires two directories to host training images from domain A '/path/to/data/trainA' + and from domain B '/path/to/data/trainB' respectively. + You can train the model with the dataset flag '--dataroot /path/to/data'. + Similarly, you need to prepare two directories: + '/path/to/data/testA' and '/path/to/data/testB' during test time. + """ + + def __init__(self, opt): + """Initialize this dataset class. + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseDataset.__init__(self, opt) + + self.dir_A = os.path.join(opt.dataroot, 'trainA') # create a path '/path/to/data/trainA' + self.dir_B = os.path.join(opt.dataroot, 'trainB') # create a path '/path/to/data/trainB' + + if os.path.exists(self.dir_A) and os.path.exists(self.dir_B): + self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA' + self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB' + self.A_size = len(self.A_paths) # get the size of dataset A + self.B_size = len(self.B_paths) # get the size of dataset B + + assert len(self.A_paths) == 1 and len(self.B_paths) == 1,\ + "SingleImageDataset class should be used with one image in each domain" + A_img = Image.open(self.A_paths[0]).convert('RGB') + B_img = Image.open(self.B_paths[0]).convert('RGB') + print("Image sizes %s and %s" % (str(A_img.size), str(B_img.size))) + + self.A_img = A_img + self.B_img = B_img + + # In single-image translation, we augment the data loader by applying + # random scaling. Still, we design the data loader such that the + # amount of scaling is the same within a minibatch. To do this, + # we precompute the random scaling values, and repeat them by |batch_size|. + A_zoom = 1 / self.opt.random_scale_max + zoom_levels_A = np.random.uniform(A_zoom, 1.0, size=(len(self) // opt.batch_size + 1, 1, 2)) + self.zoom_levels_A = np.reshape(np.tile(zoom_levels_A, (1, opt.batch_size, 1)), [-1, 2]) + + B_zoom = 1 / self.opt.random_scale_max + zoom_levels_B = np.random.uniform(B_zoom, 1.0, size=(len(self) // opt.batch_size + 1, 1, 2)) + self.zoom_levels_B = np.reshape(np.tile(zoom_levels_B, (1, opt.batch_size, 1)), [-1, 2]) + + # While the crop locations are randomized, the negative samples should + # not come from the same location. To do this, we precompute the + # crop locations with no repetition. + self.patch_indices_A = list(range(len(self))) + random.shuffle(self.patch_indices_A) + self.patch_indices_B = list(range(len(self))) + random.shuffle(self.patch_indices_B) + + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index (int) -- a random integer for data indexing + + Returns a dictionary that contains A, B, A_paths and B_paths + A (tensor) -- an image in the input domain + B (tensor) -- its corresponding image in the target domain + A_paths (str) -- image paths + B_paths (str) -- image paths + """ + A_path = self.A_paths[0] + B_path = self.B_paths[0] + A_img = self.A_img + B_img = self.B_img + + # apply image transformation + if self.opt.phase == "train": + param = {'scale_factor': self.zoom_levels_A[index], + 'patch_index': self.patch_indices_A[index], + 'flip': random.random() > 0.5} + + transform_A = get_transform(self.opt, params=param, method=Image.BILINEAR) + A = transform_A(A_img) + + param = {'scale_factor': self.zoom_levels_B[index], + 'patch_index': self.patch_indices_B[index], + 'flip': random.random() > 0.5} + transform_B = get_transform(self.opt, params=param, method=Image.BILINEAR) + B = transform_B(B_img) + else: + transform = get_transform(self.opt, method=Image.BILINEAR) + A = transform(A_img) + B = transform(B_img) + + return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path} + + def __len__(self): + """ Let's pretend the single image contains 100,000 crops for convenience. + """ + return 100000 diff --git a/data/template_dataset.py b/data/template_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..bfdf16be2a8a834b204c45d88c86857b37b9bd25 --- /dev/null +++ b/data/template_dataset.py @@ -0,0 +1,75 @@ +"""Dataset class template + +This module provides a template for users to implement custom datasets. +You can specify '--dataset_mode template' to use this dataset. +The class name should be consistent with both the filename and its dataset_mode option. +The filename should be _dataset.py +The class name should be Dataset.py +You need to implement the following functions: + -- : Add dataset-specific options and rewrite default values for existing options. + -- <__init__>: Initialize this dataset class. + -- <__getitem__>: Return a data point and its metadata information. + -- <__len__>: Return the number of images. +""" +from data.base_dataset import BaseDataset, get_transform +# from data.image_folder import make_dataset +# from PIL import Image + + +class TemplateDataset(BaseDataset): + """A template dataset class for you to implement custom datasets.""" + @staticmethod + def modify_commandline_options(parser, is_train): + """Add new dataset-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option') + parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values + return parser + + def __init__(self, opt): + """Initialize this dataset class. + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + + A few things can be done here. + - save the options (have been done in BaseDataset) + - get image paths and meta information of the dataset. + - define the image transformation. + """ + # save the option and dataset root + BaseDataset.__init__(self, opt) + # get the image paths of your dataset; + self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root + # define the default transform function. You can use ; You can also define your custom transform function + self.transform = get_transform(opt) + + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index -- a random integer for data indexing + + Returns: + a dictionary of data with their names. It usually contains the data itself and its metadata information. + + Step 1: get a random image path: e.g., path = self.image_paths[index] + Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB'). + Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image) + Step 4: return a data point as a dictionary. + """ + path = 'temp' # needs to be a string + data_A = None # needs to be a tensor + data_B = None # needs to be a tensor + return {'data_A': data_A, 'data_B': data_B, 'path': path} + + def __len__(self): + """Return the total number of images.""" + return len(self.image_paths) diff --git a/data/unaligned_dataset.py b/data/unaligned_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..de3363c74b0dea710903cd10e7173ad3a0fb7438 --- /dev/null +++ b/data/unaligned_dataset.py @@ -0,0 +1,78 @@ +import os.path +from data.base_dataset import BaseDataset, get_transform +from data.image_folder import make_dataset +from PIL import Image +import random +import util.util as util + + +class UnalignedDataset(BaseDataset): + """ + This dataset class can load unaligned/unpaired datasets. + + It requires two directories to host training images from domain A '/path/to/data/trainA' + and from domain B '/path/to/data/trainB' respectively. + You can train the model with the dataset flag '--dataroot /path/to/data'. + Similarly, you need to prepare two directories: + '/path/to/data/testA' and '/path/to/data/testB' during test time. + """ + + def __init__(self, opt): + """Initialize this dataset class. + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseDataset.__init__(self, opt) + self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA' + self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB' + + if opt.phase == "test" and not os.path.exists(self.dir_A) \ + and os.path.exists(os.path.join(opt.dataroot, "valA")): + self.dir_A = os.path.join(opt.dataroot, "valA") + self.dir_B = os.path.join(opt.dataroot, "valB") + + self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA' + self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB' + self.A_size = len(self.A_paths) # get the size of dataset A + self.B_size = len(self.B_paths) # get the size of dataset B + + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index (int) -- a random integer for data indexing + + Returns a dictionary that contains A, B, A_paths and B_paths + A (tensor) -- an image in the input domain + B (tensor) -- its corresponding image in the target domain + A_paths (str) -- image paths + B_paths (str) -- image paths + """ + A_path = self.A_paths[index % self.A_size] # make sure index is within then range + if self.opt.serial_batches: # make sure index is within then range + index_B = index % self.B_size + else: # randomize the index for domain B to avoid fixed pairs. + index_B = random.randint(0, self.B_size - 1) + B_path = self.B_paths[index_B] + A_img = Image.open(A_path).convert('RGB') + B_img = Image.open(B_path).convert('RGB') + + # Apply image transformation + # For CUT/FastCUT mode, if in finetuning phase (learning rate is decaying), + # do not perform resize-crop data augmentation of CycleGAN. + is_finetuning = self.opt.isTrain and self.current_epoch > self.opt.n_epochs + modified_opt = util.copyconf(self.opt, load_size=self.opt.crop_size if is_finetuning else self.opt.load_size) + transform = get_transform(modified_opt) + A = transform(A_img) + B = transform(B_img) + + return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path} + + def __len__(self): + """Return the total number of images in the dataset. + + As we have two datasets with potentially different number of images, + we take a maximum of + """ + return max(self.A_size, self.B_size) diff --git a/docs/datasets.md b/docs/datasets.md new file mode 100644 index 0000000000000000000000000000000000000000..eb1b88d0ff6b912c46ebcd505ab3a852652ab826 --- /dev/null +++ b/docs/datasets.md @@ -0,0 +1,45 @@ +### CUT and CycleGAN Datasets +Download the CUT and CycleGAN datasets using the following script. Some of the datasets are collected by other researchers and papers. Please cite the original papers if you use the data. +```bash +bash ./datasets/download_cut_dataset.sh dataset_name +``` +- `grumpifycat`: 88 Russian Blue cats from The Oxford-IIIT Pet [Dataset](http://www.robots.ox.ac.uk/~vgg/data/pets/) and 214 Grumpy cats. We use an OpenCV detector `./datasets/detect_cat_face.py` to detect cat faces. +- `facades`: 400 images from the [CMP Facades dataset](http://cmp.felk.cvut.cz/~tylecr1/facade). [[Citation](../datasets/bibtex/facades.tex)] +- `cityscapes`: 2975 images from the [Cityscapes training set](https://www.cityscapes-dataset.com). [[Citation](../datasets/bibtex/cityscapes.tex)]. Note: Due to license issue, we cannot directly provide the Cityscapes dataset. Please download the Cityscapes dataset from [https://cityscapes-dataset.com](https://cityscapes-dataset.com) and use the script `./datasets/prepare_cityscapes_dataset.py`. + +Please cite the CycleGAN paper if you use the following datasets. [[Citation](../datasets/bibtex/cyclegan.tex)] +- `maps`: 1096 training images scraped from Google Maps. +- `horse2zebra`: 939 horse images and 1177 zebra images downloaded from [ImageNet](http://www.image-net.org) using keywords `wild horse` and `zebra` +- `apple2orange`: 996 apple images and 1020 orange images downloaded from [ImageNet](http://www.image-net.org) using keywords `apple` and `navel orange`. +- `summer2winter_yosemite`: 1273 summer Yosemite images and 854 winter Yosemite images were downloaded using Flickr API. See more details in our paper. +- `monet2photo`, `vangogh2photo`, `ukiyoe2photo`, `cezanne2photo`: The art images were downloaded from [Wikiart](https://www.wikiart.org/). The real photos are downloaded from Flickr using the combination of the tags *landscape* and *landscapephotography*. The training set size of each class is Monet:1074, Cezanne:584, Van Gogh:401, Ukiyo-e:1433, Photographs:6853. +- `iphone2dslr_flower`: both classes of images were downlaoded from Flickr. The training set size of each class is iPhone:1813, DSLR:3316. See more details in our paper. + +To train a model on your own datasets, you need to create a data folder with two subdirectories `trainA` and `trainB` that contain images from domain A and B. You can test your model on your training set by setting `--phase train` in `test.py`. You can also create subdirectories `testA` and `testB` if you have test data. + +You should **not** expect our method to work on just any random combination of input and output datasets (e.g. `cats<->keyboards`). From our experiments, we find it works better if two datasets share similar visual content. For example, `landscape painting<->landscape photographs` works much better than `portrait painting <-> landscape photographs`. `zebras<->horses` achieves compelling results while `cats<->dogs` completely fails. + +### pix2pix datasets +Download the pix2pix datasets using the following script. Some of the datasets are collected by other researchers. Please cite their papers if you use the data. +```bash +bash ./datasets/download_pix2pix_dataset.sh dataset_name +``` +- `facades`: 400 images from [CMP Facades dataset](http://cmp.felk.cvut.cz/~tylecr1/facade). [[Citation](../datasets/bibtex/facades.tex)] +- `cityscapes`: 2975 images from the [Cityscapes training set](https://www.cityscapes-dataset.com). [[Citation](../datasets/bibtex/cityscapes.tex)] +- `maps`: 1096 training images scraped from Google Maps +- `edges2shoes`: 50k training images from [UT Zappos50K dataset](http://vision.cs.utexas.edu/projects/finegrained/utzap50k). Edges are computed by [HED](https://github.com/s9xie/hed) edge detector + post-processing. [[Citation](datasets/bibtex/shoes.tex)] +- `edges2handbags`: 137K Amazon Handbag images from [iGAN project](https://github.com/junyanz/iGAN). Edges are computed by [HED](https://github.com/s9xie/hed) edge detector + post-processing. [[Citation](datasets/bibtex/handbags.tex)] +- `night2day`: around 20K natural scene images from [Transient Attributes dataset](http://transattr.cs.brown.edu/) [[Citation](datasets/bibtex/transattr.tex)]. To train a `day2night` pix2pix model, you need to add `--direction BtoA`. + +We provide a python script to generate pix2pix training data in the form of pairs of images {A,B}, where A and B are two different depictions of the same underlying scene. For example, these might be pairs {label map, photo} or {bw image, color image}. Then we can learn to translate A to B or B to A: + +Create folder `/path/to/data` with subfolders `A` and `B`. `A` and `B` should each have their own subfolders `train`, `val`, `test`, etc. In `/path/to/data/A/train`, put training images in style A. In `/path/to/data/B/train`, put the corresponding images in style B. Repeat same for other data splits (`val`, `test`, etc). + +Corresponding images in a pair {A,B} must be the same size and have the same filename, e.g., `/path/to/data/A/train/1.jpg` is considered to correspond to `/path/to/data/B/train/1.jpg`. + +Once the data is formatted this way, call: +```bash +python datasets/combine_A_and_B.py --fold_A /path/to/data/A --fold_B /path/to/data/B --fold_AB /path/to/data +``` + +This will combine each pair of images (A,B) into a single image file, ready for training. diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000000000000000000000000000000000000..3742673b8a376def8a974b3abec2adc3b9f882b8 --- /dev/null +++ b/environment.yml @@ -0,0 +1,16 @@ +name: contrastive-unpaired-translation +channels: +- pytorch +- defaults +dependencies: +- python=3.6 +- pytorch=1.4.0 +- scipy +- pip: + - dominate==2.4.0 + - torchvision==0.5.0 + - Pillow==6.1.0 + - numpy==1.16.4 + - visdom==0.1.8 + - packaging + - GPUtil==1.4.0 diff --git a/experiments/__init__.py b/experiments/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4344fa9db3e3548e4b2450324536e75bc3e05d90 --- /dev/null +++ b/experiments/__init__.py @@ -0,0 +1,54 @@ +import os +import importlib + + +def find_launcher_using_name(launcher_name): + # cur_dir = os.path.dirname(os.path.abspath(__file__)) + # pythonfiles = glob.glob(cur_dir + '/**/*.py') + launcher_filename = "experiments.{}_launcher".format(launcher_name) + launcherlib = importlib.import_module(launcher_filename) + + # In the file, the class called LauncherNameLauncher() will + # be instantiated. It has to be a subclass of BaseLauncher, + # and it is case-insensitive. + launcher = None + target_launcher_name = launcher_name.replace('_', '') + 'launcher' + for name, cls in launcherlib.__dict__.items(): + if name.lower() == target_launcher_name.lower(): + launcher = cls + + if launcher is None: + raise ValueError("In %s.py, there should be a subclass of BaseLauncher " + "with class name that matches %s in lowercase." % + (launcher_filename, target_launcher_name)) + + return launcher + + +if __name__ == "__main__": + import sys + import pickle + + assert len(sys.argv) >= 3 + + name = sys.argv[1] + Launcher = find_launcher_using_name(name) + + cache = "/tmp/tmux_launcher/{}".format(name) + if os.path.isfile(cache): + instance = pickle.load(open(cache, 'r')) + else: + instance = Launcher() + + cmd = sys.argv[2] + if cmd == "launch": + instance.launch() + elif cmd == "stop": + instance.stop() + elif cmd == "send": + expid = int(sys.argv[3]) + cmd = int(sys.argv[4]) + instance.send_command(expid, cmd) + + os.makedirs("/tmp/tmux_launcher/", exist_ok=True) + pickle.dump(instance, open(cache, 'w')) diff --git a/experiments/__main__.py b/experiments/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..d77ad454567b3f0b35f2c4f40f60e0df4b2f4d91 --- /dev/null +++ b/experiments/__main__.py @@ -0,0 +1,87 @@ +import os +import importlib + + +def find_launcher_using_name(launcher_name): + # cur_dir = os.path.dirname(os.path.abspath(__file__)) + # pythonfiles = glob.glob(cur_dir + '/**/*.py') + launcher_filename = "experiments.{}_launcher".format(launcher_name) + launcherlib = importlib.import_module(launcher_filename) + + # In the file, the class called LauncherNameLauncher() will + # be instantiated. It has to be a subclass of BaseLauncher, + # and it is case-insensitive. + launcher = None + # target_launcher_name = launcher_name.replace('_', '') + 'launcher' + for name, cls in launcherlib.__dict__.items(): + if name.lower() == "launcher": + launcher = cls + + if launcher is None: + raise ValueError("In %s.py, there should be a class named Launcher") + + return launcher + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('name') + parser.add_argument('cmd') + parser.add_argument('id', nargs='+', type=str) + parser.add_argument('--mode', default=None) + parser.add_argument('--which_epoch', default=None) + parser.add_argument('--continue_train', action='store_true') + parser.add_argument('--subdir', default='') + parser.add_argument('--title', default='') + parser.add_argument('--gpu_id', default=None, type=int) + parser.add_argument('--phase', default='test') + + opt = parser.parse_args() + + name = opt.name + Launcher = find_launcher_using_name(name) + + instance = Launcher() + + cmd = opt.cmd + ids = 'all' if 'all' in opt.id else [int(i) for i in opt.id] + if cmd == "launch": + instance.launch(ids, continue_train=opt.continue_train) + elif cmd == "stop": + instance.stop() + elif cmd == "send": + assert False + elif cmd == "close": + instance.close() + elif cmd == "dry": + instance.dry() + elif cmd == "relaunch": + instance.close() + instance.launch(ids, continue_train=opt.continue_train) + elif cmd == "run" or cmd == "train": + assert len(ids) == 1, '%s is invalid for run command' % (' '.join(opt.id)) + expid = ids[0] + instance.run_command(instance.commands(), expid, + continue_train=opt.continue_train, + gpu_id=opt.gpu_id) + elif cmd == 'launch_test': + instance.launch(ids, test=True) + elif cmd == "run_test" or cmd == "test": + test_commands = instance.test_commands() + if ids == "all": + ids = list(range(len(test_commands))) + for expid in ids: + instance.run_command(test_commands, expid, opt.which_epoch, + gpu_id=opt.gpu_id) + if expid < len(ids) - 1: + os.system("sleep 5s") + elif cmd == "print_names": + instance.print_names(ids, test=False) + elif cmd == "print_test_names": + instance.print_names(ids, test=True) + elif cmd == "create_comparison_html": + instance.create_comparison_html(name, ids, opt.subdir, opt.title, opt.phase) + else: + raise ValueError("Command not recognized") diff --git a/experiments/grumpifycat_launcher.py b/experiments/grumpifycat_launcher.py new file mode 100644 index 0000000000000000000000000000000000000000..ae0c5aef608e0a93be7b5d909119f9d903c07733 --- /dev/null +++ b/experiments/grumpifycat_launcher.py @@ -0,0 +1,28 @@ +from .tmux_launcher import Options, TmuxLauncher + + +class Launcher(TmuxLauncher): + def common_options(self): + return [ + # Command 0 + Options( + dataroot="./datasets/grumpifycat", + name="grumpifycat_CUT", + CUT_mode="CUT" + ), + + # Command 1 + Options( + dataroot="./datasets/grumpifycat", + name="grumpifycat_FastCUT", + CUT_mode="FastCUT", + ) + ] + + def commands(self): + return ["python train.py " + str(opt) for opt in self.common_options()] + + def test_commands(self): + # RussianBlue -> Grumpy Cats dataset does not have test split. + # Therefore, let's set the test split to be the "train" set. + return ["python test.py " + str(opt.set(phase='train')) for opt in self.common_options()] diff --git a/experiments/placeholder_launcher.py b/experiments/placeholder_launcher.py new file mode 100644 index 0000000000000000000000000000000000000000..9e44909fa5098d10227c5684d92f6559316b7630 --- /dev/null +++ b/experiments/placeholder_launcher.py @@ -0,0 +1,81 @@ +from .tmux_launcher import Options, TmuxLauncher + + +class Launcher(TmuxLauncher): + + # List of training commands + def commands(self): + opt = Options() + + # common options for all training sessions defined in this launcher + opt.set(dataroot="~/datasets/cityscapes/", # specify --dataroot option here + model="contrastive_cycle_gan", + pool_size=0, + no_dropout="", + init_type="xavier", + batch_size=1, + display_freq=400, + evaluation_metrics="fid,cityscapes", + evaluation_freq=10000, + direction="BtoA", + use_recommended_options="", + nce_idt_freq=0.1, + ) + + # Specify individual options here + commands = [ + + # first command. + # This command can be run using python -m experiments placeholder run 0 + # It will output python train.py [OPTIONS], where OPTIONS are everything defined in the variable opt + "python train.py " + str(opt.clone().set( + name="cityscapes_placeholder_noidt", # name of experiments + nce_idt=False, + )), + + # second command. + # This command can be run using python -m experiments placeholder run 1 + # It removes the option --nce_idt_freq 0.1 that was defined by our common options + "python train.py " + str(opt.clone().set( + name="cityscapes_placeholder_singlelayer", + nce_layers="16", + ).remove("nce_idt_freq")), + + + # third command that performs multigpu training + # This command can be run using python -m experiments placeholder run 2 + "python train.py " + str(opt.clone().set( + name="cityscapes_placeholder_multigpu", + nce_layers="16", + batch_size=4, + gpu_ids="0,1", + )), + + ] + + return commands + + # This is the command used for testing. + # They can be run using python -m experiments placeholder run_test $i + def test_commands(self): + opt = Options() + opt.set(dataroot="~/datasets/cityscapes_unaligned/cityscapes", + model="contrastive_cycle_gan", + no_dropout="", + init_type="xavier", + batch_size=1, + direction="BtoA", + epoch=40, + phase='train', + evaluation_metrics="fid", + ) + + commands = [ + "python test.py " + str(opt.clone().set( + name="cityscapes_nce", + nce_layers="0,8,16", + direction="BtoA", + )), + ] + + return commands diff --git a/experiments/pretrained_launcher.py b/experiments/pretrained_launcher.py new file mode 100644 index 0000000000000000000000000000000000000000..0be457449dce390a12048447ec40303454793676 --- /dev/null +++ b/experiments/pretrained_launcher.py @@ -0,0 +1,61 @@ +from .tmux_launcher import Options, TmuxLauncher + + +class Launcher(TmuxLauncher): + def common_options(self): + return [ + # Command 0 + Options( + # NOTE: download the resized (and compressed) val set from + # http://efrosgans.eecs.berkeley.edu/CUT/datasets/cityscapes_val_for_CUT.tar + dataroot="datasets/cityscapes/cityscapes_val/", + direction="BtoA", + phase="val", + name="cityscapes_cut_pretrained", + CUT_mode="CUT", + ), + + # Command 1 + Options( + dataroot="./datasets/cityscapes_unaligned/cityscapes/", + direction="BtoA", + name="cityscapes_fastcut_pretrained", + CUT_mode="FastCUT", + ), + + # Command 2 + Options( + dataroot="./datasets/horse2zebra/", + name="horse2zebra_cut_pretrained", + CUT_mode="CUT" + ), + + # Command 3 + Options( + dataroot="./datasets/horse2zebra/", + name="horse2zebra_fastcut_pretrained", + CUT_mode="FastCUT", + ), + + # Command 4 + Options( + dataroot="./datasets/afhq/cat2dog/", + name="cat2dog_cut_pretrained", + CUT_mode="CUT" + ), + + # Command 5 + Options( + dataroot="./datasets/afhq/cat2dog/", + name="cat2dog_fastcut_pretrained", + CUT_mode="FastCUT", + ), + + + ] + + def commands(self): + return ["python train.py " + str(opt) for opt in self.common_options()] + + def test_commands(self): + return ["python test.py " + str(opt.set(num_test=500)) for opt in self.common_options()] diff --git a/experiments/singleimage_launcher.py b/experiments/singleimage_launcher.py new file mode 100644 index 0000000000000000000000000000000000000000..5d286c913e1c308e0f21a6f6071b00f9c5653fd6 --- /dev/null +++ b/experiments/singleimage_launcher.py @@ -0,0 +1,18 @@ +from .tmux_launcher import Options, TmuxLauncher + + +class Launcher(TmuxLauncher): + def common_options(self): + return [ + Options( + name="singleimage_monet_etretat", + dataroot="./datasets/single_image_monet_etretat", + model="sincut" + ) + ] + + def commands(self): + return ["python train.py " + str(opt) for opt in self.common_options()] + + def test_commands(self): + return ["python test.py " + str(opt) for opt in self.common_options()] diff --git a/experiments/tmux_launcher.py b/experiments/tmux_launcher.py new file mode 100644 index 0000000000000000000000000000000000000000..985e8bfeca006e01603b2e2887b6636296128621 --- /dev/null +++ b/experiments/tmux_launcher.py @@ -0,0 +1,215 @@ +""" +experiment launcher using tmux panes +""" +import os +import math +import GPUtil +import re + +available_gpu_devices = None + + +class Options(): + def __init__(self, *args, **kwargs): + self.args = [] + self.kvs = {"gpu_ids": "0"} + self.set(*args, **kwargs) + + def set(self, *args, **kwargs): + for a in args: + self.args.append(a) + for k, v in kwargs.items(): + self.kvs[k] = v + + return self + + def remove(self, *args): + for a in args: + if a in self.args: + self.args.remove(a) + if a in self.kvs: + del self.kvs[a] + + return self + + def update(self, opt): + self.args += opt.args + self.kvs.update(opt.kvs) + return self + + def __str__(self): + final = " ".join(self.args) + for k, v in self.kvs.items(): + final += " --{} {}".format(k, v) + + return final + + def clone(self): + opt = Options() + opt.args = self.args.copy() + opt.kvs = self.kvs.copy() + return opt + + +def grab_pattern(pattern, text): + found = re.search(pattern, text) + if found is not None: + return found[1] + else: + None + + +# http://code.activestate.com/recipes/252177-find-the-common-beginning-in-a-list-of-strings/ +def findcommonstart(strlist): + prefix_len = ([min([x[0] == elem for elem in x]) + for x in zip(*strlist)] + [0]).index(0) + prefix_len = max(1, prefix_len - 4) + return strlist[0][:prefix_len] + + +class TmuxLauncher(): + def __init__(self): + super().__init__() + self.tmux_prepared = False + + def prepare_tmux_panes(self, num_experiments, dry=False): + self.pane_per_window = 1 + self.n_windows = int(math.ceil(num_experiments / self.pane_per_window)) + print('preparing {} tmux panes'.format(num_experiments)) + for w in range(self.n_windows): + if dry: + continue + window_name = "experiments_{}".format(w) + os.system("tmux new-window -n {}".format(window_name)) + self.tmux_prepared = True + + def refine_command(self, command, which_epoch, continue_train, gpu_id=None): + command = str(command) + if "--gpu_ids" in command: + gpu_ids = re.search(r'--gpu_ids ([\d,?]+)', command)[1] + else: + gpu_ids = "0" + + gpu_ids = gpu_ids.split(",") + num_gpus = len(gpu_ids) + global available_gpu_devices + if available_gpu_devices is None and gpu_id is None: + available_gpu_devices = [str(g) for g in GPUtil.getAvailable(limit=8, maxMemory=0.5)] + if gpu_id is not None: + available_gpu_devices = [i for i in str(gpu_id)] + if len(available_gpu_devices) < num_gpus: + raise ValueError("{} GPU(s) required for the command {} is not available".format(num_gpus, command)) + active_devices = ",".join(available_gpu_devices[:num_gpus]) + if which_epoch is not None: + which_epoch = " --epoch %s " % which_epoch + else: + which_epoch = "" + command = "CUDA_VISIBLE_DEVICES={} {} {}".format(active_devices, command, which_epoch) + if continue_train: + command += " --continue_train " + + # available_gpu_devices = [str(g) for g in GPUtil.getAvailable(limit=8, maxMemory=0.8)] + available_gpu_devices = available_gpu_devices[num_gpus:] + + return command + + def send_command(self, exp_id, command, dry=False, continue_train=False): + command = self.refine_command(command, None, continue_train=continue_train) + pane_name = "experiments_{windowid}.{paneid}".format(windowid=exp_id // self.pane_per_window, + paneid=exp_id % self.pane_per_window) + if dry is False: + os.system("tmux send-keys -t {} \"{}\" Enter".format(pane_name, command)) + + print("{}: {}".format(pane_name, command)) + return pane_name + + def run_command(self, command, ids, which_epoch=None, continue_train=False, gpu_id=None): + if type(command) is not list: + command = [command] + if ids is None: + ids = list(range(len(command))) + if type(ids) is not list: + ids = [ids] + + for id in ids: + this_command = command[id] + refined_command = self.refine_command(this_command, which_epoch, continue_train=continue_train, gpu_id=gpu_id) + print(refined_command) + os.system(refined_command) + + def commands(self): + return [] + + def launch(self, ids, test=False, dry=False, continue_train=False): + commands = self.test_commands() if test else self.commands() + if type(ids) is list: + commands = [commands[i] for i in ids] + if not self.tmux_prepared: + self.prepare_tmux_panes(len(commands), dry) + assert self.tmux_prepared + + for i, command in enumerate(commands): + self.send_command(i, command, dry, continue_train=continue_train) + + def dry(self): + self.launch(dry=True) + + def stop(self): + num_experiments = len(self.commands()) + self.pane_per_window = 4 + self.n_windows = int(math.ceil(num_experiments / self.pane_per_window)) + for w in range(self.n_windows): + window_name = "experiments_{}".format(w) + for i in range(self.pane_per_window): + os.system("tmux send-keys -t {window}.{pane} C-c".format(window=window_name, pane=i)) + + def close(self): + num_experiments = len(self.commands()) + self.pane_per_window = 1 + self.n_windows = int(math.ceil(num_experiments / self.pane_per_window)) + for w in range(self.n_windows): + window_name = "experiments_{}".format(w) + os.system("tmux kill-window -t {}".format(window_name)) + + def print_names(self, ids, test=False): + if test: + cmds = self.test_commands() + else: + cmds = self.commands() + if type(ids) is list: + cmds = [cmds[i] for i in ids] + + for cmdid, cmd in enumerate(cmds): + name = grab_pattern(r'--name ([^ ]+)', cmd) + print(name) + + def create_comparison_html(self, expr_name, ids, subdir, title, phase): + cmds = self.test_commands() + if type(ids) is list: + cmds = [cmds[i] for i in ids] + + no_easy_label = True + dirs = [] + labels = [] + for cmdid, cmd in enumerate(cmds): + name = grab_pattern(r'--name ([^ ]+)', cmd) + which_epoch = grab_pattern(r'--epoch ([^ ]+)', cmd) + if which_epoch is None: + which_epoch = "latest" + label = grab_pattern(r'--easy_label "([^"]+)"', cmd) + if label is None: + label = name + else: + no_easy_label = False + labels.append(label) + dir = "results/%s/%s_%s/%s/" % (name, phase, which_epoch, subdir) + dirs.append(dir) + + commonprefix = findcommonstart(labels) if no_easy_label else "" + labels = ['"' + label[len(commonprefix):] + '"' for label in labels] + dirstr = ' '.join(dirs) + labelstr = ' '.join(labels) + + command = "python ~/tools/html.py --web_dir_prefix results/comparison_ --name %s --dirs %s --labels %s --image_width 256" % (expr_name + '_' + title, dirstr, labelstr) + print(command) + os.system(command) diff --git a/imgs/gif_cut.gif b/imgs/gif_cut.gif new file mode 100644 index 0000000000000000000000000000000000000000..1162f40938969ac4ab6d10e567c91211c8b13588 --- /dev/null +++ b/imgs/gif_cut.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1855628146188891cfad08ed48c33e42c4495611c5918d8ea827fadd38825aa8 +size 4484901 diff --git a/imgs/grumpycat.jpg b/imgs/grumpycat.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9bb8bd3c08c8b17cfc3dbc37074e3694cca7fba5 Binary files /dev/null and b/imgs/grumpycat.jpg differ diff --git a/imgs/horse2zebra_comparison.jpg b/imgs/horse2zebra_comparison.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a4498d91a9e54aada1b2fd11300a6b2d05efe56a --- /dev/null +++ b/imgs/horse2zebra_comparison.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2485875664fbd61cd0199e6422e32c00157782745e6417997eb1b90add2a8d46 +size 3128135 diff --git a/imgs/paris.jpg b/imgs/paris.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0026ed207123036cbe5c47624287ff87884b59b Binary files /dev/null and b/imgs/paris.jpg differ diff --git a/imgs/patchnce.gif b/imgs/patchnce.gif new file mode 100644 index 0000000000000000000000000000000000000000..c45d7df1b3a11a1ecade94ef2ee3da377d6724f9 --- /dev/null +++ b/imgs/patchnce.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a43d2053c824d66702edd87607f84dcc8febfe4d509c751cb6c8c2bbd6b6de7c +size 1064563 diff --git a/imgs/results.gif b/imgs/results.gif new file mode 100644 index 0000000000000000000000000000000000000000..38b21f94362f0de72e52162f6a6091841f604fdf --- /dev/null +++ b/imgs/results.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:600346609917eadb196046ed151838b7333f0e8f4d87388ebb9fbe74a9e8de21 +size 4946258 diff --git a/imgs/singleimage.gif b/imgs/singleimage.gif new file mode 100644 index 0000000000000000000000000000000000000000..1f6880de509dec8da78c9f95b5342e656d075b0c --- /dev/null +++ b/imgs/singleimage.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39ebbbeaf3229f4a35d7d33eadde52dc2e0e867ebbbb961a0993e5220a2cf0fc +size 2273936 diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fc01113da66ff042bd1807b5bfdb70c4bce8d14c --- /dev/null +++ b/models/__init__.py @@ -0,0 +1,67 @@ +"""This package contains modules related to objective functions, optimizations, and network architectures. + +To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel. +You need to implement the following five functions: + -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). + -- : unpack data from dataset and apply preprocessing. + -- : produce intermediate results. + -- : calculate loss, gradients, and update network weights. + -- : (optionally) add model-specific options and set default options. + +In the function <__init__>, you need to define four lists: + -- self.loss_names (str list): specify the training losses that you want to plot and save. + -- self.model_names (str list): define networks used in our training. + -- self.visual_names (str list): specify the images that you want to display and save. + -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage. + +Now you can use the model class by specifying flag '--model dummy'. +See our template model class 'template_model.py' for more details. +""" + +import importlib +from models.base_model import BaseModel + + +def find_model_using_name(model_name): + """Import the module "models/[model_name]_model.py". + + In the file, the class called DatasetNameModel() will + be instantiated. It has to be a subclass of BaseModel, + and it is case-insensitive. + """ + model_filename = "models." + model_name + "_model" + modellib = importlib.import_module(model_filename) + model = None + target_model_name = model_name.replace('_', '') + 'model' + for name, cls in modellib.__dict__.items(): + if name.lower() == target_model_name.lower() \ + and issubclass(cls, BaseModel): + model = cls + + if model is None: + print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) + exit(0) + + return model + + +def get_option_setter(model_name): + """Return the static method of the model class.""" + model_class = find_model_using_name(model_name) + return model_class.modify_commandline_options + + +def create_model(opt): + """Create a model given the option. + + This function warps the class CustomDatasetDataLoader. + This is the main interface between this package and 'train.py'/'test.py' + + Example: + >>> from models import create_model + >>> model = create_model(opt) + """ + model = find_model_using_name(opt.model) + instance = model(opt) + print("model [%s] was created" % type(instance).__name__) + return instance diff --git a/models/__pycache__/__init__.cpython-310.pyc b/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f12d68075338b31e7c3c61a10fd843064bcdb2fd Binary files /dev/null and b/models/__pycache__/__init__.cpython-310.pyc differ diff --git a/models/__pycache__/base_model.cpython-310.pyc b/models/__pycache__/base_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b6211118ab2cf7fffb8d4c4f060eb726e07f908 Binary files /dev/null and b/models/__pycache__/base_model.cpython-310.pyc differ diff --git a/models/__pycache__/cut_model.cpython-310.pyc b/models/__pycache__/cut_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fdf1ff6e30eb6d28310100709abb7733d1f30c5 Binary files /dev/null and b/models/__pycache__/cut_model.cpython-310.pyc differ diff --git a/models/__pycache__/cycle_gan_model.cpython-310.pyc b/models/__pycache__/cycle_gan_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87919a6ec5a727825923f0e6180c3a3781a03c12 Binary files /dev/null and b/models/__pycache__/cycle_gan_model.cpython-310.pyc differ diff --git a/models/__pycache__/networks.cpython-310.pyc b/models/__pycache__/networks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab033bd7261e45b16cba6aa88a3d46bc724aacfa Binary files /dev/null and b/models/__pycache__/networks.cpython-310.pyc differ diff --git a/models/__pycache__/patchnce.cpython-310.pyc b/models/__pycache__/patchnce.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c6b9d8e60115eb8225a3b5380e4090ce28ed3fa Binary files /dev/null and b/models/__pycache__/patchnce.cpython-310.pyc differ diff --git a/models/__pycache__/stylegan_networks.cpython-310.pyc b/models/__pycache__/stylegan_networks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d5dc468d9e1499c6d4e422a3624d0fb4c56504c Binary files /dev/null and b/models/__pycache__/stylegan_networks.cpython-310.pyc differ diff --git a/models/base_model.py b/models/base_model.py new file mode 100644 index 0000000000000000000000000000000000000000..37bc25f35bafb64e4149de8a9bfab9a6d103c3fe --- /dev/null +++ b/models/base_model.py @@ -0,0 +1,258 @@ +import os +import torch +from collections import OrderedDict +from abc import ABC, abstractmethod +from . import networks + + +class BaseModel(ABC): + """This class is an abstract base class (ABC) for models. + To create a subclass, you need to implement the following five functions: + -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). + -- : unpack data from dataset and apply preprocessing. + -- : produce intermediate results. + -- : calculate losses, gradients, and update network weights. + -- : (optionally) add model-specific options and set default options. + """ + + def __init__(self, opt): + """Initialize the BaseModel class. + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + + When creating your custom class, you need to implement your own initialization. + In this fucntion, you should first call + Then, you need to define four lists: + -- self.loss_names (str list): specify the training losses that you want to plot and save. + -- self.model_names (str list): specify the images that you want to display and save. + -- self.visual_names (str list): define networks used in our training. + -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. + """ + self.opt = opt + self.gpu_ids = opt.gpu_ids + self.isTrain = opt.isTrain + self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU + self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir + if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark. + torch.backends.cudnn.benchmark = True + self.loss_names = [] + self.model_names = [] + self.visual_names = [] + self.optimizers = [] + self.image_paths = [] + self.metric = 0 # used for learning rate policy 'plateau' + + @staticmethod + def dict_grad_hook_factory(add_func=lambda x: x): + saved_dict = dict() + + def hook_gen(name): + def grad_hook(grad): + saved_vals = add_func(grad) + saved_dict[name] = saved_vals + return grad_hook + return hook_gen, saved_dict + + @staticmethod + def modify_commandline_options(parser, is_train): + """Add new model-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + return parser + + @abstractmethod + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input (dict): includes the data itself and its metadata information. + """ + pass + + @abstractmethod + def forward(self): + """Run forward pass; called by both functions and .""" + pass + + @abstractmethod + def optimize_parameters(self): + """Calculate losses, gradients, and update network weights; called in every training iteration""" + pass + + def setup(self, opt): + """Load and print networks; create schedulers + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + if self.isTrain: + self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] + if not self.isTrain or opt.continue_train: + load_suffix = opt.epoch + self.load_networks(load_suffix) + + self.print_networks(opt.verbose) + + def parallelize(self): + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, 'net' + name) + setattr(self, 'net' + name, torch.nn.DataParallel(net, self.opt.gpu_ids)) + + def data_dependent_initialize(self, data): + pass + + def eval(self): + """Make models eval mode during test time""" + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, 'net' + name) + net.eval() + + def test(self): + """Forward function used in test time. + + This function wraps function in no_grad() so we don't save intermediate steps for backprop + It also calls to produce additional visualization results + """ + with torch.no_grad(): + self.forward() + self.compute_visuals() + + def compute_visuals(self): + """Calculate additional output images for visdom and HTML visualization""" + pass + + def get_image_paths(self): + """ Return image paths that are used to load current data""" + return self.image_paths + + def update_learning_rate(self): + """Update learning rates for all the networks; called at the end of every epoch""" + for scheduler in self.schedulers: + if self.opt.lr_policy == 'plateau': + scheduler.step(self.metric) + else: + scheduler.step() + + lr = self.optimizers[0].param_groups[0]['lr'] + print('learning rate = %.7f' % lr) + + def get_current_visuals(self): + """Return visualization images. train.py will display these images with visdom, and save the images to a HTML""" + visual_ret = OrderedDict() + for name in self.visual_names: + if isinstance(name, str): + visual_ret[name] = getattr(self, name) + return visual_ret + + def get_current_losses(self): + """Return traning losses / errors. train.py will print out these errors on console, and save them to a file""" + errors_ret = OrderedDict() + for name in self.loss_names: + if isinstance(name, str): + errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number + return errors_ret + + def save_networks(self, epoch): + """Save all the networks to the disk. + + Parameters: + epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) + """ + for name in self.model_names: + if isinstance(name, str): + save_filename = '%s_net_%s.pth' % (epoch, name) + save_path = os.path.join(self.save_dir, save_filename) + net = getattr(self, 'net' + name) + + if len(self.gpu_ids) > 0 and torch.cuda.is_available(): + torch.save(net.module.cpu().state_dict(), save_path) + net.cuda(self.gpu_ids[0]) + else: + torch.save(net.cpu().state_dict(), save_path) + + def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): + """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" + key = keys[i] + if i + 1 == len(keys): # at the end, pointing to a parameter/buffer + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'running_mean' or key == 'running_var'): + if getattr(module, key) is None: + state_dict.pop('.'.join(keys)) + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'num_batches_tracked'): + state_dict.pop('.'.join(keys)) + else: + self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) + + def load_networks(self, epoch): + """Load all the networks from the disk. + + Parameters: + epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) + """ + for name in self.model_names: + if isinstance(name, str): + load_filename = '%s_net_%s.pth' % (epoch, name) + if self.opt.isTrain and self.opt.pretrained_name is not None: + load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name) + else: + load_dir = self.save_dir + + load_path = os.path.join(load_dir, load_filename) + net = getattr(self, 'net' + name) + if isinstance(net, torch.nn.DataParallel): + net = net.module + print('loading the model from %s' % load_path) + # if you are using PyTorch newer than 0.4 (e.g., built from + # GitHub source), you can remove str() on self.device + state_dict = torch.load(load_path, map_location=str(self.device)) + if hasattr(state_dict, '_metadata'): + del state_dict._metadata + + # patch InstanceNorm checkpoints prior to 0.4 + # for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop + # self.__patch_instance_norm_state_dict(state_dict, net, key.split('.')) + net.load_state_dict(state_dict) + + def print_networks(self, verbose): + """Print the total number of parameters in the network and (if verbose) network architecture + + Parameters: + verbose (bool) -- if verbose: print the network architecture + """ + print('---------- Networks initialized -------------') + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, 'net' + name) + num_params = 0 + for param in net.parameters(): + num_params += param.numel() + if verbose: + print(net) + print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) + print('-----------------------------------------------') + + def set_requires_grad(self, nets, requires_grad=False): + """Set requies_grad=Fasle for all the networks to avoid unnecessary computations + Parameters: + nets (network list) -- a list of networks + requires_grad (bool) -- whether the networks require gradients or not + """ + if not isinstance(nets, list): + nets = [nets] + for net in nets: + if net is not None: + for param in net.parameters(): + param.requires_grad = requires_grad + + def generate_visuals_for_evaluation(self, data, mode): + return {} diff --git a/models/cut_model.py b/models/cut_model.py new file mode 100644 index 0000000000000000000000000000000000000000..61e7f559d36e7c83c921c050c5b8351b3d9ebaf2 --- /dev/null +++ b/models/cut_model.py @@ -0,0 +1,214 @@ +import numpy as np +import torch +from .base_model import BaseModel +from . import networks +from .patchnce import PatchNCELoss +import util.util as util + + +class CUTModel(BaseModel): + """ This class implements CUT and FastCUT model, described in the paper + Contrastive Learning for Unpaired Image-to-Image Translation + Taesung Park, Alexei A. Efros, Richard Zhang, Jun-Yan Zhu + ECCV, 2020 + + The code borrows heavily from the PyTorch implementation of CycleGAN + https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix + """ + @staticmethod + def modify_commandline_options(parser, is_train=True): + """ Configures options specific for CUT model + """ + parser.add_argument('--CUT_mode', type=str, default="CUT", choices='(CUT, cut, FastCUT, fastcut)') + + parser.add_argument('--lambda_GAN', type=float, default=1.0, help='weight for GAN loss:GAN(G(X))') + parser.add_argument('--lambda_NCE', type=float, default=1.0, help='weight for NCE loss: NCE(G(X), X)') + parser.add_argument('--nce_idt', type=util.str2bool, nargs='?', const=True, default=False, help='use NCE loss for identity mapping: NCE(G(Y), Y))') + parser.add_argument('--nce_layers', type=str, default='0,4,8,12,16', help='compute NCE loss on which layers') + parser.add_argument('--nce_includes_all_negatives_from_minibatch', + type=util.str2bool, nargs='?', const=True, default=False, + help='(used for single image translation) If True, include the negatives from the other samples of the minibatch when computing the contrastive loss. Please see models/patchnce.py for more details.') + parser.add_argument('--netF', type=str, default='mlp_sample', choices=['sample', 'reshape', 'mlp_sample'], help='how to downsample the feature map') + parser.add_argument('--netF_nc', type=int, default=256) + parser.add_argument('--nce_T', type=float, default=0.07, help='temperature for NCE loss') + parser.add_argument('--num_patches', type=int, default=256, help='number of patches per layer') + parser.add_argument('--flip_equivariance', + type=util.str2bool, nargs='?', const=True, default=False, + help="Enforce flip-equivariance as additional regularization. It's used by FastCUT, but not CUT") + + parser.set_defaults(pool_size=0) # no image pooling + + opt, _ = parser.parse_known_args() + + # Set default parameters for CUT and FastCUT + if opt.CUT_mode.lower() == "cut": + parser.set_defaults(nce_idt=True, lambda_NCE=1.0) + elif opt.CUT_mode.lower() == "fastcut": + parser.set_defaults( + nce_idt=False, lambda_NCE=10.0, flip_equivariance=True, + n_epochs=150, n_epochs_decay=50 + ) + else: + raise ValueError(opt.CUT_mode) + + return parser + + def __init__(self, opt): + BaseModel.__init__(self, opt) + + # specify the training losses you want to print out. + # The training/test scripts will call + self.loss_names = ['G_GAN', 'D_real', 'D_fake', 'G', 'NCE'] + self.visual_names = ['real_A', 'fake_B', 'real_B'] + self.nce_layers = [int(i) for i in self.opt.nce_layers.split(',')] + + if opt.nce_idt and self.isTrain: + self.loss_names += ['NCE_Y'] + self.visual_names += ['idt_B'] + + if self.isTrain: + self.model_names = ['G', 'F', 'D'] + else: # during test time, only load G + self.model_names = ['G'] + + # define networks (both generator and discriminator) + self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt) + self.netF = networks.define_F(opt.input_nc, opt.netF, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt) + + if self.isTrain: + self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt) + + # define loss functions + self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) + self.criterionNCE = [] + + for nce_layer in self.nce_layers: + self.criterionNCE.append(PatchNCELoss(opt).to(self.device)) + + self.criterionIdt = torch.nn.L1Loss().to(self.device) + self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) + self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) + self.optimizers.append(self.optimizer_G) + self.optimizers.append(self.optimizer_D) + + def data_dependent_initialize(self, data): + """ + The feature network netF is defined in terms of the shape of the intermediate, extracted + features of the encoder portion of netG. Because of this, the weights of netF are + initialized at the first feedforward pass with some input images. + Please also see PatchSampleF.create_mlp(), which is called at the first forward() call. + """ + bs_per_gpu = data["A"].size(0) // max(len(self.opt.gpu_ids), 1) + self.set_input(data) + self.real_A = self.real_A[:bs_per_gpu] + self.real_B = self.real_B[:bs_per_gpu] + self.forward() # compute fake images: G(A) + if self.opt.isTrain: + self.compute_D_loss().backward() # calculate gradients for D + self.compute_G_loss().backward() # calculate graidents for G + if self.opt.lambda_NCE > 0.0: + self.optimizer_F = torch.optim.Adam(self.netF.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, self.opt.beta2)) + self.optimizers.append(self.optimizer_F) + + def optimize_parameters(self): + # forward + self.forward() + + # update D + self.set_requires_grad(self.netD, True) + self.optimizer_D.zero_grad() + self.loss_D = self.compute_D_loss() + self.loss_D.backward() + self.optimizer_D.step() + + # update G + self.set_requires_grad(self.netD, False) + self.optimizer_G.zero_grad() + if self.opt.netF == 'mlp_sample': + self.optimizer_F.zero_grad() + self.loss_G = self.compute_G_loss() + self.loss_G.backward() + self.optimizer_G.step() + if self.opt.netF == 'mlp_sample': + self.optimizer_F.step() + + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + Parameters: + input (dict): include the data itself and its metadata information. + The option 'direction' can be used to swap domain A and domain B. + """ + AtoB = self.opt.direction == 'AtoB' + self.real_A = input['A' if AtoB else 'B'].to(self.device) + self.real_B = input['B' if AtoB else 'A'].to(self.device) + self.image_paths = input['A_paths' if AtoB else 'B_paths'] + + def forward(self): + """Run forward pass; called by both functions and .""" + self.real = torch.cat((self.real_A, self.real_B), dim=0) if self.opt.nce_idt and self.opt.isTrain else self.real_A + if self.opt.flip_equivariance: + self.flipped_for_equivariance = self.opt.isTrain and (np.random.random() < 0.5) + if self.flipped_for_equivariance: + self.real = torch.flip(self.real, [3]) + + self.fake = self.netG(self.real) + self.fake_B = self.fake[:self.real_A.size(0)] + if self.opt.nce_idt: + self.idt_B = self.fake[self.real_A.size(0):] + + def compute_D_loss(self): + """Calculate GAN loss for the discriminator""" + fake = self.fake_B.detach() + # Fake; stop backprop to the generator by detaching fake_B + pred_fake = self.netD(fake) + self.loss_D_fake = self.criterionGAN(pred_fake, False).mean() + # Real + self.pred_real = self.netD(self.real_B) + loss_D_real = self.criterionGAN(self.pred_real, True) + self.loss_D_real = loss_D_real.mean() + + # combine loss and calculate gradients + self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5 + return self.loss_D + + def compute_G_loss(self): + """Calculate GAN and NCE loss for the generator""" + fake = self.fake_B + # First, G(A) should fake the discriminator + if self.opt.lambda_GAN > 0.0: + pred_fake = self.netD(fake) + self.loss_G_GAN = self.criterionGAN(pred_fake, True).mean() * self.opt.lambda_GAN + else: + self.loss_G_GAN = 0.0 + + if self.opt.lambda_NCE > 0.0: + self.loss_NCE = self.calculate_NCE_loss(self.real_A, self.fake_B) + else: + self.loss_NCE, self.loss_NCE_bd = 0.0, 0.0 + + if self.opt.nce_idt and self.opt.lambda_NCE > 0.0: + self.loss_NCE_Y = self.calculate_NCE_loss(self.real_B, self.idt_B) + loss_NCE_both = (self.loss_NCE + self.loss_NCE_Y) * 0.5 + else: + loss_NCE_both = self.loss_NCE + + self.loss_G = self.loss_G_GAN + loss_NCE_both + return self.loss_G + + def calculate_NCE_loss(self, src, tgt): + n_layers = len(self.nce_layers) + feat_q = self.netG(tgt, self.nce_layers, encode_only=True) + + if self.opt.flip_equivariance and self.flipped_for_equivariance: + feat_q = [torch.flip(fq, [3]) for fq in feat_q] + + feat_k = self.netG(src, self.nce_layers, encode_only=True) + feat_k_pool, sample_ids = self.netF(feat_k, self.opt.num_patches, None) + feat_q_pool, _ = self.netF(feat_q, self.opt.num_patches, sample_ids) + + total_nce_loss = 0.0 + for f_q, f_k, crit, nce_layer in zip(feat_q_pool, feat_k_pool, self.criterionNCE, self.nce_layers): + loss = crit(f_q, f_k) * self.opt.lambda_NCE + total_nce_loss += loss.mean() + + return total_nce_loss / n_layers diff --git a/models/cycle_gan_model.py b/models/cycle_gan_model.py new file mode 100644 index 0000000000000000000000000000000000000000..67753d0a70c2032de9784a7bdbae71892a105d55 --- /dev/null +++ b/models/cycle_gan_model.py @@ -0,0 +1,325 @@ +import torch +import itertools +from util.image_pool import ImagePool +from .base_model import BaseModel +from . import networks + +try: + from apex import amp +except ImportError as error: + print(error) + + +class CycleGANModel(BaseModel): + """ + This class implements the CycleGAN model, for learning image-to-image translation without paired data. + + The model training requires '--dataset_mode unaligned' dataset. + By default, it uses a '--netG resnet_9blocks' ResNet generator, + a '--netD basic' discriminator (PatchGAN introduced by pix2pix), + and a least-square GANs objective ('--gan_mode lsgan'). + + CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf + """ + + @staticmethod + def modify_commandline_options(parser, is_train=True): + """Add new dataset-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + + For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses. + A (source domain), B (target domain). + Generators: G_A: A -> B; G_B: B -> A. + Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A. + Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper) + Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper) + Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper) + Dropout is not used in the original CycleGAN paper. + """ + # parser.set_defaults(no_dropout=True, no_antialias=True, no_antialias_up=True) # default CycleGAN did not use dropout + # parser.set_defaults(no_dropout=True) + if is_train: + parser.add_argument( + "--lambda_A", + type=float, + default=10.0, + help="weight for cycle loss (A -> B -> A)", + ) + parser.add_argument( + "--lambda_B", + type=float, + default=10.0, + help="weight for cycle loss (B -> A -> B)", + ) + parser.add_argument( + "--lambda_identity", + type=float, + default=0.5, + help="use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1", + ) + + return parser + + def __init__(self, opt): + """Initialize the CycleGAN class. + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseModel.__init__(self, opt) + # specify the training losses you want to print out. The training/test scripts will call + self.loss_names = [ + "D_A", + "G_A", + "cycle_A", + "idt_A", + "D_B", + "G_B", + "cycle_B", + "idt_B", + ] + # specify the images you want to save/display. The training/test scripts will call + visual_names_A = ["real_A", "fake_B", "rec_A"] + visual_names_B = ["real_B", "fake_A", "rec_B"] + if ( + self.isTrain and self.opt.lambda_identity > 0.0 + ): # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B) + visual_names_A.append("idt_B") + visual_names_B.append("idt_A") + + self.visual_names = ( + visual_names_A + visual_names_B + ) # combine visualizations for A and B + # specify the models you want to save to the disk. The training/test scripts will call and . + if self.isTrain: + self.model_names = ["G_A", "G_B", "D_A", "D_B"] + else: # during test time, only load Gs + self.model_names = ["G_A", "G_B"] + + # define networks (both Generators and discriminators) + # The naming is different from those used in the paper. + # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X) + self.netG_A = networks.define_G( + opt.input_nc, + opt.output_nc, + opt.ngf, + opt.netG, + opt.normG, + not opt.no_dropout, + opt.init_type, + opt.init_gain, + opt.no_antialias, + opt.no_antialias_up, + self.gpu_ids, + opt=opt, + ) + self.netG_B = networks.define_G( + opt.output_nc, + opt.input_nc, + opt.ngf, + opt.netG, + opt.normG, + not opt.no_dropout, + opt.init_type, + opt.init_gain, + opt.no_antialias, + opt.no_antialias_up, + self.gpu_ids, + opt=opt, + ) + + if self.isTrain: # define discriminators + self.netD_A = networks.define_D( + opt.output_nc, + opt.ndf, + opt.netD, + opt.n_layers_D, + opt.normD, + opt.init_type, + opt.init_gain, + opt.no_antialias, + self.gpu_ids, + opt=opt, + ) + self.netD_B = networks.define_D( + opt.input_nc, + opt.ndf, + opt.netD, + opt.n_layers_D, + opt.normD, + opt.init_type, + opt.init_gain, + opt.no_antialias, + self.gpu_ids, + opt=opt, + ) + + if self.isTrain: + if ( + opt.lambda_identity > 0.0 + ): # only works when input and output images have the same number of channels + assert opt.input_nc == opt.output_nc + self.fake_A_pool = ImagePool( + opt.pool_size + ) # create image buffer to store previously generated images + self.fake_B_pool = ImagePool( + opt.pool_size + ) # create image buffer to store previously generated images + # define loss functions + self.criterionGAN = networks.GANLoss(opt.gan_mode).to( + self.device + ) # define GAN loss. + self.criterionCycle = torch.nn.L1Loss() + self.criterionIdt = torch.nn.L1Loss() + # initialize optimizers; schedulers will be automatically created by function . + self.optimizer_G = torch.optim.Adam( + itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), + lr=opt.lr, + betas=(opt.beta1, 0.999), + ) + self.optimizer_D = torch.optim.Adam( + itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), + lr=opt.lr, + betas=(opt.beta1, 0.999), + ) + self.optimizers.append(self.optimizer_G) + self.optimizers.append(self.optimizer_D) + + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input (dict): include the data itself and its metadata information. + + The option 'direction' can be used to swap domain A and domain B. + """ + AtoB = self.opt.direction == "AtoB" + self.real_A = input["A" if AtoB else "B"].to(self.device) + self.real_B = input["B" if AtoB else "A"].to(self.device) + self.image_paths = input["A_paths" if AtoB else "B_paths"] + + def forward(self): + """Run forward pass; called by both functions and .""" + self.fake_B = self.netG_A(self.real_A) # G_A(A) + self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A)) + self.fake_A = self.netG_B(self.real_B) # G_B(B) + self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B)) + + def backward_D_basic(self, netD, real, fake): + """Calculate GAN loss for the discriminator + + Parameters: + netD (network) -- the discriminator D + real (tensor array) -- real images + fake (tensor array) -- images generated by a generator + + Return the discriminator loss. + We also call loss_D.backward() to calculate the gradients. + """ + # Real + pred_real = netD(real) + loss_D_real = self.criterionGAN(pred_real, True) + # Fake + pred_fake = netD(fake.detach()) + loss_D_fake = self.criterionGAN(pred_fake, False) + # Combined loss and calculate gradients + loss_D = (loss_D_real + loss_D_fake) * 0.5 + # if self.opt.amp: + # with amp.scale_loss(loss_D, self.optimizer_D) as scaled_loss: + # scaled_loss.backward() + # else: + loss_D.backward() + return loss_D + + def backward_D_A(self): + """Calculate GAN loss for discriminator D_A""" + fake_B = self.fake_B_pool.query(self.fake_B) + self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B) + + def backward_D_B(self): + """Calculate GAN loss for discriminator D_B""" + fake_A = self.fake_A_pool.query(self.fake_A) + self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A) + + def backward_G(self): + """Calculate the loss for generators G_A and G_B""" + lambda_idt = self.opt.lambda_identity + lambda_A = self.opt.lambda_A + lambda_B = self.opt.lambda_B + # Identity loss + if lambda_idt > 0: + # G_A should be identity if real_B is fed: ||G_A(B) - B|| + self.idt_A = self.netG_A(self.real_B) + self.loss_idt_A = ( + self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt + ) + # G_B should be identity if real_A is fed: ||G_B(A) - A|| + self.idt_B = self.netG_B(self.real_A) + self.loss_idt_B = ( + self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt + ) + else: + self.loss_idt_A = 0 + self.loss_idt_B = 0 + + # GAN loss D_A(G_A(A)) + self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True) + # GAN loss D_B(G_B(B)) + self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True) + # Forward cycle loss || G_B(G_A(A)) - A|| + self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A + # Backward cycle loss || G_A(G_B(B)) - B|| + self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B + # combined loss and calculate gradients + self.loss_G = ( + self.loss_G_A + + self.loss_G_B + + self.loss_cycle_A + + self.loss_cycle_B + + self.loss_idt_A + + self.loss_idt_B + ) + # if self.opt.amp: + # with amp.scale_loss(self.loss_G, self.optimizer_G) as scaled_loss: + # scaled_loss.backward() + # else: + self.loss_G.backward() + + def data_dependent_initialize(self, *args, **kwargs): + return + + def generate_visuals_for_evaluation(self, data, mode): + with torch.no_grad(): + visuals = {} + AtoB = self.opt.direction == "AtoB" + G = self.netG_A + source = data["A" if AtoB else "B"].to(self.device) + if mode == "forward": + visuals["fake_B"] = G(source) + else: + raise ValueError("mode %s is not recognized" % mode) + return visuals + + def optimize_parameters(self): + """Calculate losses, gradients, and update network weights; called in every training iteration""" + # forward + self.forward() # compute fake images and reconstruction images. + # G_A and G_B + self.set_requires_grad( + [self.netD_A, self.netD_B], False + ) # Ds require no gradients when optimizing Gs + self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero + self.backward_G() # calculate gradients for G_A and G_B + self.optimizer_G.step() # update G_A and G_B's weights + # D_A and D_B + self.set_requires_grad([self.netD_A, self.netD_B], True) + self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero + self.backward_D_A() # calculate gradients for D_A + self.backward_D_B() # calculate graidents for D_B + self.optimizer_D.step() # update D_A and D_B's weights diff --git a/models/networks.py b/models/networks.py new file mode 100644 index 0000000000000000000000000000000000000000..e0bc5f48193a9d59e5ba6f808d84488f8d935f41 --- /dev/null +++ b/models/networks.py @@ -0,0 +1,1403 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn import init +import functools +from torch.optim import lr_scheduler +import numpy as np +from .stylegan_networks import StyleGAN2Discriminator, StyleGAN2Generator, TileStyleGAN2Discriminator + +############################################################################### +# Helper Functions +############################################################################### + + +def get_filter(filt_size=3): + if(filt_size == 1): + a = np.array([1., ]) + elif(filt_size == 2): + a = np.array([1., 1.]) + elif(filt_size == 3): + a = np.array([1., 2., 1.]) + elif(filt_size == 4): + a = np.array([1., 3., 3., 1.]) + elif(filt_size == 5): + a = np.array([1., 4., 6., 4., 1.]) + elif(filt_size == 6): + a = np.array([1., 5., 10., 10., 5., 1.]) + elif(filt_size == 7): + a = np.array([1., 6., 15., 20., 15., 6., 1.]) + + filt = torch.Tensor(a[:, None] * a[None, :]) + filt = filt / torch.sum(filt) + + return filt + + +class Downsample(nn.Module): + def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0): + super(Downsample, self).__init__() + self.filt_size = filt_size + self.pad_off = pad_off + self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))] + self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes] + self.stride = stride + self.off = int((self.stride - 1) / 2.) + self.channels = channels + + filt = get_filter(filt_size=self.filt_size) + self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1))) + + self.pad = get_pad_layer(pad_type)(self.pad_sizes) + + def forward(self, inp): + if(self.filt_size == 1): + if(self.pad_off == 0): + return inp[:, :, ::self.stride, ::self.stride] + else: + return self.pad(inp)[:, :, ::self.stride, ::self.stride] + else: + return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1]) + + +class Upsample2(nn.Module): + def __init__(self, scale_factor, mode='nearest'): + super().__init__() + self.factor = scale_factor + self.mode = mode + + def forward(self, x): + return torch.nn.functional.interpolate(x, scale_factor=self.factor, mode=self.mode) + + +class Upsample(nn.Module): + def __init__(self, channels, pad_type='repl', filt_size=4, stride=2): + super(Upsample, self).__init__() + self.filt_size = filt_size + self.filt_odd = np.mod(filt_size, 2) == 1 + self.pad_size = int((filt_size - 1) / 2) + self.stride = stride + self.off = int((self.stride - 1) / 2.) + self.channels = channels + + filt = get_filter(filt_size=self.filt_size) * (stride**2) + self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1))) + + self.pad = get_pad_layer(pad_type)([1, 1, 1, 1]) + + def forward(self, inp): + ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:] + if(self.filt_odd): + return ret_val + else: + return ret_val[:, :, :-1, :-1] + + +def get_pad_layer(pad_type): + if(pad_type in ['refl', 'reflect']): + PadLayer = nn.ReflectionPad2d + elif(pad_type in ['repl', 'replicate']): + PadLayer = nn.ReplicationPad2d + elif(pad_type == 'zero'): + PadLayer = nn.ZeroPad2d + else: + print('Pad type [%s] not recognized' % pad_type) + return PadLayer + + +class Identity(nn.Module): + def forward(self, x): + return x + + +def get_norm_layer(norm_type='instance'): + """Return a normalization layer + + Parameters: + norm_type (str) -- the name of the normalization layer: batch | instance | none + + For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev). + For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics. + """ + if norm_type == 'batch': + norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True) + elif norm_type == 'instance': + norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) + elif norm_type == 'none': + def norm_layer(x): + return Identity() + else: + raise NotImplementedError('normalization layer [%s] is not found' % norm_type) + return norm_layer + + +def get_scheduler(optimizer, opt): + """Return a learning rate scheduler + + Parameters: + optimizer -- the optimizer of the network + opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.  + opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine + + For 'linear', we keep the same learning rate for the first epochs + and linearly decay the rate to zero over the next epochs. + For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. + See https://pytorch.org/docs/stable/optim.html for more details. + """ + if opt.lr_policy == 'linear': + def lambda_rule(epoch): + lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1) + return lr_l + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) + elif opt.lr_policy == 'step': + scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) + elif opt.lr_policy == 'plateau': + scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) + elif opt.lr_policy == 'cosine': + scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0) + else: + return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) + return scheduler + + +def init_weights(net, init_type='normal', init_gain=0.02, debug=False): + """Initialize network weights. + + Parameters: + net (network) -- network to be initialized + init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + + We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might + work better for some applications. Feel free to try yourself. + """ + def init_func(m): # define the initialization function + classname = m.__class__.__name__ + if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): + if debug: + print(classname) + if init_type == 'normal': + init.normal_(m.weight.data, 0.0, init_gain) + elif init_type == 'xavier': + init.xavier_normal_(m.weight.data, gain=init_gain) + elif init_type == 'kaiming': + init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') + elif init_type == 'orthogonal': + init.orthogonal_(m.weight.data, gain=init_gain) + else: + raise NotImplementedError('initialization method [%s] is not implemented' % init_type) + if hasattr(m, 'bias') and m.bias is not None: + init.constant_(m.bias.data, 0.0) + elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies. + init.normal_(m.weight.data, 1.0, init_gain) + init.constant_(m.bias.data, 0.0) + + net.apply(init_func) # apply the initialization function + + +def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True): + """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights + Parameters: + net (network) -- the network to be initialized + init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal + gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Return an initialized network. + """ + if len(gpu_ids) > 0: + assert(torch.cuda.is_available()) + net.to(gpu_ids[0]) + # if not amp: + # net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs for non-AMP training + if initialize_weights: + init_weights(net, init_type, init_gain=init_gain, debug=debug) + return net + + +def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', + init_gain=0.02, no_antialias=False, no_antialias_up=False, gpu_ids=[], opt=None): + """Create a generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128 + norm (str) -- the name of normalization layers used in the network: batch | instance | none + use_dropout (bool) -- if use dropout layers. + init_type (str) -- the name of our initialization method. + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Returns a generator + + Our current implementation provides two types of generators: + U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images) + The original U-Net paper: https://arxiv.org/abs/1505.04597 + + Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks) + Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations. + We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style). + + + The generator has been initialized by . It uses RELU for non-linearity. + """ + net = None + norm_layer = get_norm_layer(norm_type=norm) + + if netG == 'resnet_9blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=9, opt=opt) + elif netG == 'resnet_6blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=6, opt=opt) + elif netG == 'resnet_4blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=4, opt=opt) + elif netG == 'unet_128': + net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout) + elif netG == 'unet_256': + net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout) + elif netG == 'stylegan2': + net = StyleGAN2Generator(input_nc, output_nc, ngf, use_dropout=use_dropout, opt=opt) + elif netG == 'smallstylegan2': + net = StyleGAN2Generator(input_nc, output_nc, ngf, use_dropout=use_dropout, n_blocks=2, opt=opt) + elif netG == 'resnet_cat': + n_blocks = 8 + net = G_Resnet(input_nc, output_nc, opt.nz, num_downs=2, n_res=n_blocks - 4, ngf=ngf, norm='inst', nl_layer='relu') + else: + raise NotImplementedError('Generator model name [%s] is not recognized' % netG) + return init_net(net, init_type, init_gain, gpu_ids, initialize_weights=('stylegan2' not in netG)) + + +def define_F(input_nc, netF, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None): + if netF == 'global_pool': + net = PoolingF() + elif netF == 'reshape': + net = ReshapeF() + elif netF == 'sample': + net = PatchSampleF(use_mlp=False, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc) + elif netF == 'mlp_sample': + net = PatchSampleF(use_mlp=True, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc) + elif netF == 'strided_conv': + net = StridedConvF(init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids) + else: + raise NotImplementedError('projection model name [%s] is not recognized' % netF) + return init_net(net, init_type, init_gain, gpu_ids) + + +def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None): + """Create a discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the first conv layer + netD (str) -- the architecture's name: basic | n_layers | pixel + n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers' + norm (str) -- the type of normalization layers used in the network. + init_type (str) -- the name of the initialization method. + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Returns a discriminator + + Our current implementation provides three types of discriminators: + [basic]: 'PatchGAN' classifier described in the original pix2pix paper. + It can classify whether 70×70 overlapping patches are real or fake. + Such a patch-level discriminator architecture has fewer parameters + than a full-image discriminator and can work on arbitrarily-sized images + in a fully convolutional fashion. + + [n_layers]: With this mode, you cna specify the number of conv layers in the discriminator + with the parameter (default=3 as used in [basic] (PatchGAN).) + + [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not. + It encourages greater color diversity but has no effect on spatial statistics. + + The discriminator has been initialized by . It uses Leaky RELU for non-linearity. + """ + net = None + norm_layer = get_norm_layer(norm_type=norm) + + if netD == 'basic': # default PatchGAN classifier + net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, no_antialias=no_antialias,) + elif netD == 'n_layers': # more options + net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias,) + elif netD == 'pixel': # classify if each pixel is real or fake + net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer) + elif 'stylegan2' in netD: + net = StyleGAN2Discriminator(input_nc, ndf, n_layers_D, no_antialias=no_antialias, opt=opt) + else: + raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD) + return init_net(net, init_type, init_gain, gpu_ids, + initialize_weights=('stylegan2' not in netD)) + + +############################################################################## +# Classes +############################################################################## +class GANLoss(nn.Module): + """Define different GAN objectives. + + The GANLoss class abstracts away the need to create the target label tensor + that has the same size as the input. + """ + + def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0): + """ Initialize the GANLoss class. + + Parameters: + gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. + target_real_label (bool) - - label for a real image + target_fake_label (bool) - - label of a fake image + + Note: Do not use sigmoid as the last layer of Discriminator. + LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss. + """ + super(GANLoss, self).__init__() + self.register_buffer('real_label', torch.tensor(target_real_label)) + self.register_buffer('fake_label', torch.tensor(target_fake_label)) + self.gan_mode = gan_mode + if gan_mode == 'lsgan': + self.loss = nn.MSELoss() + elif gan_mode == 'vanilla': + self.loss = nn.BCEWithLogitsLoss() + elif gan_mode in ['wgangp', 'nonsaturating']: + self.loss = None + else: + raise NotImplementedError('gan mode %s not implemented' % gan_mode) + + def get_target_tensor(self, prediction, target_is_real): + """Create label tensors with the same size as the input. + + Parameters: + prediction (tensor) - - tpyically the prediction from a discriminator + target_is_real (bool) - - if the ground truth label is for real images or fake images + + Returns: + A label tensor filled with ground truth label, and with the size of the input + """ + + if target_is_real: + target_tensor = self.real_label + else: + target_tensor = self.fake_label + return target_tensor.expand_as(prediction) + + def __call__(self, prediction, target_is_real): + """Calculate loss given Discriminator's output and grount truth labels. + + Parameters: + prediction (tensor) - - tpyically the prediction output from a discriminator + target_is_real (bool) - - if the ground truth label is for real images or fake images + + Returns: + the calculated loss. + """ + bs = prediction.size(0) + if self.gan_mode in ['lsgan', 'vanilla']: + target_tensor = self.get_target_tensor(prediction, target_is_real) + loss = self.loss(prediction, target_tensor) + elif self.gan_mode == 'wgangp': + if target_is_real: + loss = -prediction.mean() + else: + loss = prediction.mean() + elif self.gan_mode == 'nonsaturating': + if target_is_real: + loss = F.softplus(-prediction).view(bs, -1).mean(dim=1) + else: + loss = F.softplus(prediction).view(bs, -1).mean(dim=1) + return loss + + +def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0): + """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028 + + Arguments: + netD (network) -- discriminator network + real_data (tensor array) -- real images + fake_data (tensor array) -- generated images from the generator + device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') + type (str) -- if we mix real and fake data or not [real | fake | mixed]. + constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2 + lambda_gp (float) -- weight for this loss + + Returns the gradient penalty loss + """ + if lambda_gp > 0.0: + if type == 'real': # either use real images, fake images, or a linear interpolation of two. + interpolatesv = real_data + elif type == 'fake': + interpolatesv = fake_data + elif type == 'mixed': + alpha = torch.rand(real_data.shape[0], 1, device=device) + alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape) + interpolatesv = alpha * real_data + ((1 - alpha) * fake_data) + else: + raise NotImplementedError('{} not implemented'.format(type)) + interpolatesv.requires_grad_(True) + disc_interpolates = netD(interpolatesv) + gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv, + grad_outputs=torch.ones(disc_interpolates.size()).to(device), + create_graph=True, retain_graph=True, only_inputs=True) + gradients = gradients[0].view(real_data.size(0), -1) # flat the data + gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps + return gradient_penalty, gradients + else: + return 0.0, None + + +class Normalize(nn.Module): + + def __init__(self, power=2): + super(Normalize, self).__init__() + self.power = power + + def forward(self, x): + norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power) + out = x.div(norm + 1e-7) + return out + + +class PoolingF(nn.Module): + def __init__(self): + super(PoolingF, self).__init__() + model = [nn.AdaptiveMaxPool2d(1)] + self.model = nn.Sequential(*model) + self.l2norm = Normalize(2) + + def forward(self, x): + return self.l2norm(self.model(x)) + + +class ReshapeF(nn.Module): + def __init__(self): + super(ReshapeF, self).__init__() + model = [nn.AdaptiveAvgPool2d(4)] + self.model = nn.Sequential(*model) + self.l2norm = Normalize(2) + + def forward(self, x): + x = self.model(x) + x_reshape = x.permute(0, 2, 3, 1).flatten(0, 2) + return self.l2norm(x_reshape) + + +class StridedConvF(nn.Module): + def __init__(self, init_type='normal', init_gain=0.02, gpu_ids=[]): + super().__init__() + # self.conv1 = nn.Conv2d(256, 128, 3, stride=2) + # self.conv2 = nn.Conv2d(128, 64, 3, stride=1) + self.l2_norm = Normalize(2) + self.mlps = {} + self.moving_averages = {} + self.init_type = init_type + self.init_gain = init_gain + self.gpu_ids = gpu_ids + + def create_mlp(self, x): + C, H = x.shape[1], x.shape[2] + n_down = int(np.rint(np.log2(H / 32))) + mlp = [] + for i in range(n_down): + mlp.append(nn.Conv2d(C, max(C // 2, 64), 3, stride=2)) + mlp.append(nn.ReLU()) + C = max(C // 2, 64) + mlp.append(nn.Conv2d(C, 64, 3)) + mlp = nn.Sequential(*mlp) + init_net(mlp, self.init_type, self.init_gain, self.gpu_ids) + return mlp + + def update_moving_average(self, key, x): + if key not in self.moving_averages: + self.moving_averages[key] = x.detach() + + self.moving_averages[key] = self.moving_averages[key] * 0.999 + x.detach() * 0.001 + + def forward(self, x, use_instance_norm=False): + C, H = x.shape[1], x.shape[2] + key = '%d_%d' % (C, H) + if key not in self.mlps: + self.mlps[key] = self.create_mlp(x) + self.add_module("child_%s" % key, self.mlps[key]) + mlp = self.mlps[key] + x = mlp(x) + self.update_moving_average(key, x) + x = x - self.moving_averages[key] + if use_instance_norm: + x = F.instance_norm(x) + return self.l2_norm(x) + + +class PatchSampleF(nn.Module): + def __init__(self, use_mlp=False, init_type='normal', init_gain=0.02, nc=256, gpu_ids=[]): + # potential issues: currently, we use the same patch_ids for multiple images in the batch + super(PatchSampleF, self).__init__() + self.l2norm = Normalize(2) + self.use_mlp = use_mlp + self.nc = nc # hard-coded + self.mlp_init = False + self.init_type = init_type + self.init_gain = init_gain + self.gpu_ids = gpu_ids + + def create_mlp(self, feats): + for mlp_id, feat in enumerate(feats): + input_nc = feat.shape[1] + mlp = nn.Sequential(*[nn.Linear(input_nc, self.nc), nn.ReLU(), nn.Linear(self.nc, self.nc)]) + if len(self.gpu_ids) > 0: + mlp.cuda() + setattr(self, 'mlp_%d' % mlp_id, mlp) + init_net(self, self.init_type, self.init_gain, self.gpu_ids) + self.mlp_init = True + + def forward(self, feats, num_patches=64, patch_ids=None): + return_ids = [] + return_feats = [] + if self.use_mlp and not self.mlp_init: + self.create_mlp(feats) + for feat_id, feat in enumerate(feats): + B, H, W = feat.shape[0], feat.shape[2], feat.shape[3] + feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2) + if num_patches > 0: + if patch_ids is not None: + patch_id = patch_ids[feat_id] + else: + # torch.randperm produces cudaErrorIllegalAddress for newer versions of PyTorch. https://github.com/taesungp/contrastive-unpaired-translation/issues/83 + #patch_id = torch.randperm(feat_reshape.shape[1], device=feats[0].device) + patch_id = np.random.permutation(feat_reshape.shape[1]) + patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device) + patch_id = torch.tensor(patch_id, dtype=torch.long, device=feat.device) + x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1]) + else: + x_sample = feat_reshape + patch_id = [] + if self.use_mlp: + mlp = getattr(self, 'mlp_%d' % feat_id) + x_sample = mlp(x_sample) + return_ids.append(patch_id) + x_sample = self.l2norm(x_sample) + + if num_patches == 0: + x_sample = x_sample.permute(0, 2, 1).reshape([B, x_sample.shape[-1], H, W]) + return_feats.append(x_sample) + return return_feats, return_ids + + +class G_Resnet(nn.Module): + def __init__(self, input_nc, output_nc, nz, num_downs, n_res, ngf=64, + norm=None, nl_layer=None): + super(G_Resnet, self).__init__() + n_downsample = num_downs + pad_type = 'reflect' + self.enc_content = ContentEncoder(n_downsample, n_res, input_nc, ngf, norm, nl_layer, pad_type=pad_type) + if nz == 0: + self.dec = Decoder(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz) + else: + self.dec = Decoder_all(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz) + + def decode(self, content, style=None): + return self.dec(content, style) + + def forward(self, image, style=None, nce_layers=[], encode_only=False): + content, feats = self.enc_content(image, nce_layers=nce_layers, encode_only=encode_only) + if encode_only: + return feats + else: + images_recon = self.decode(content, style) + if len(nce_layers) > 0: + return images_recon, feats + else: + return images_recon + +################################################################################## +# Encoder and Decoders +################################################################################## + + +class E_adaIN(nn.Module): + def __init__(self, input_nc, output_nc=1, nef=64, n_layers=4, + norm=None, nl_layer=None, vae=False): + # style encoder + super(E_adaIN, self).__init__() + self.enc_style = StyleEncoder(n_layers, input_nc, nef, output_nc, norm='none', activ='relu', vae=vae) + + def forward(self, image): + style = self.enc_style(image) + return style + + +class StyleEncoder(nn.Module): + def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, vae=False): + super(StyleEncoder, self).__init__() + self.vae = vae + self.model = [] + self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')] + for i in range(2): + self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')] + dim *= 2 + for i in range(n_downsample - 2): + self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')] + self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling + if self.vae: + self.fc_mean = nn.Linear(dim, style_dim) # , 1, 1, 0) + self.fc_var = nn.Linear(dim, style_dim) # , 1, 1, 0) + else: + self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)] + + self.model = nn.Sequential(*self.model) + self.output_dim = dim + + def forward(self, x): + if self.vae: + output = self.model(x) + output = output.view(x.size(0), -1) + output_mean = self.fc_mean(output) + output_var = self.fc_var(output) + return output_mean, output_var + else: + return self.model(x).view(x.size(0), -1) + + +class ContentEncoder(nn.Module): + def __init__(self, n_downsample, n_res, input_dim, dim, norm, activ, pad_type='zero'): + super(ContentEncoder, self).__init__() + self.model = [] + self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')] + # downsampling blocks + for i in range(n_downsample): + self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')] + dim *= 2 + # residual blocks + self.model += [ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type)] + self.model = nn.Sequential(*self.model) + self.output_dim = dim + + def forward(self, x, nce_layers=[], encode_only=False): + if len(nce_layers) > 0: + feat = x + feats = [] + for layer_id, layer in enumerate(self.model): + feat = layer(feat) + if layer_id in nce_layers: + feats.append(feat) + if layer_id == nce_layers[-1] and encode_only: + return None, feats + return feat, feats + else: + return self.model(x), None + + for layer_id, layer in enumerate(self.model): + print(layer_id, layer) + + +class Decoder_all(nn.Module): + def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0): + super(Decoder_all, self).__init__() + # AdaIN residual blocks + self.resnet_block = ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz) + self.n_blocks = 0 + # upsampling blocks + for i in range(n_upsample): + block = [Upsample2(scale_factor=2), Conv2dBlock(dim + nz, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')] + setattr(self, 'block_{:d}'.format(self.n_blocks), nn.Sequential(*block)) + self.n_blocks += 1 + dim //= 2 + # use reflection padding in the last conv layer + setattr(self, 'block_{:d}'.format(self.n_blocks), Conv2dBlock(dim + nz, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect')) + self.n_blocks += 1 + + def forward(self, x, y=None): + if y is not None: + output = self.resnet_block(cat_feature(x, y)) + for n in range(self.n_blocks): + block = getattr(self, 'block_{:d}'.format(n)) + if n > 0: + output = block(cat_feature(output, y)) + else: + output = block(output) + return output + + +class Decoder(nn.Module): + def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0): + super(Decoder, self).__init__() + + self.model = [] + # AdaIN residual blocks + self.model += [ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)] + # upsampling blocks + for i in range(n_upsample): + if i == 0: + input_dim = dim + nz + else: + input_dim = dim + self.model += [Upsample2(scale_factor=2), Conv2dBlock(input_dim, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')] + dim //= 2 + # use reflection padding in the last conv layer + self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect')] + self.model = nn.Sequential(*self.model) + + def forward(self, x, y=None): + if y is not None: + return self.model(cat_feature(x, y)) + else: + return self.model(x) + +################################################################################## +# Sequential Models +################################################################################## + + +class ResBlocks(nn.Module): + def __init__(self, num_blocks, dim, norm='inst', activation='relu', pad_type='zero', nz=0): + super(ResBlocks, self).__init__() + self.model = [] + for i in range(num_blocks): + self.model += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type, nz=nz)] + self.model = nn.Sequential(*self.model) + + def forward(self, x): + return self.model(x) + + +################################################################################## +# Basic Blocks +################################################################################## +def cat_feature(x, y): + y_expand = y.view(y.size(0), y.size(1), 1, 1).expand( + y.size(0), y.size(1), x.size(2), x.size(3)) + x_cat = torch.cat([x, y_expand], 1) + return x_cat + + +class ResBlock(nn.Module): + def __init__(self, dim, norm='inst', activation='relu', pad_type='zero', nz=0): + super(ResBlock, self).__init__() + + model = [] + model += [Conv2dBlock(dim + nz, dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type)] + model += [Conv2dBlock(dim, dim + nz, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type)] + self.model = nn.Sequential(*model) + + def forward(self, x): + residual = x + out = self.model(x) + out += residual + return out + + +class Conv2dBlock(nn.Module): + def __init__(self, input_dim, output_dim, kernel_size, stride, + padding=0, norm='none', activation='relu', pad_type='zero'): + super(Conv2dBlock, self).__init__() + self.use_bias = True + # initialize padding + if pad_type == 'reflect': + self.pad = nn.ReflectionPad2d(padding) + elif pad_type == 'zero': + self.pad = nn.ZeroPad2d(padding) + else: + assert 0, "Unsupported padding type: {}".format(pad_type) + + # initialize normalization + norm_dim = output_dim + if norm == 'batch': + self.norm = nn.BatchNorm2d(norm_dim) + elif norm == 'inst': + self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=False) + elif norm == 'ln': + self.norm = LayerNorm(norm_dim) + elif norm == 'none': + self.norm = None + else: + assert 0, "Unsupported normalization: {}".format(norm) + + # initialize activation + if activation == 'relu': + self.activation = nn.ReLU(inplace=True) + elif activation == 'lrelu': + self.activation = nn.LeakyReLU(0.2, inplace=True) + elif activation == 'prelu': + self.activation = nn.PReLU() + elif activation == 'selu': + self.activation = nn.SELU(inplace=True) + elif activation == 'tanh': + self.activation = nn.Tanh() + elif activation == 'none': + self.activation = None + else: + assert 0, "Unsupported activation: {}".format(activation) + + # initialize convolution + self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) + + def forward(self, x): + x = self.conv(self.pad(x)) + if self.norm: + x = self.norm(x) + if self.activation: + x = self.activation(x) + return x + + +class LinearBlock(nn.Module): + def __init__(self, input_dim, output_dim, norm='none', activation='relu'): + super(LinearBlock, self).__init__() + use_bias = True + # initialize fully connected layer + self.fc = nn.Linear(input_dim, output_dim, bias=use_bias) + + # initialize normalization + norm_dim = output_dim + if norm == 'batch': + self.norm = nn.BatchNorm1d(norm_dim) + elif norm == 'inst': + self.norm = nn.InstanceNorm1d(norm_dim) + elif norm == 'ln': + self.norm = LayerNorm(norm_dim) + elif norm == 'none': + self.norm = None + else: + assert 0, "Unsupported normalization: {}".format(norm) + + # initialize activation + if activation == 'relu': + self.activation = nn.ReLU(inplace=True) + elif activation == 'lrelu': + self.activation = nn.LeakyReLU(0.2, inplace=True) + elif activation == 'prelu': + self.activation = nn.PReLU() + elif activation == 'selu': + self.activation = nn.SELU(inplace=True) + elif activation == 'tanh': + self.activation = nn.Tanh() + elif activation == 'none': + self.activation = None + else: + assert 0, "Unsupported activation: {}".format(activation) + + def forward(self, x): + out = self.fc(x) + if self.norm: + out = self.norm(out) + if self.activation: + out = self.activation(out) + return out + +################################################################################## +# Normalization layers +################################################################################## + + +class LayerNorm(nn.Module): + def __init__(self, num_features, eps=1e-5, affine=True): + super(LayerNorm, self).__init__() + self.num_features = num_features + self.affine = affine + self.eps = eps + + if self.affine: + self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) + self.beta = nn.Parameter(torch.zeros(num_features)) + + def forward(self, x): + shape = [-1] + [1] * (x.dim() - 1) + mean = x.view(x.size(0), -1).mean(1).view(*shape) + std = x.view(x.size(0), -1).std(1).view(*shape) + x = (x - mean) / (std + self.eps) + + if self.affine: + shape = [1, -1] + [1] * (x.dim() - 2) + x = x * self.gamma.view(*shape) + self.beta.view(*shape) + return x + + +class ResnetGenerator(nn.Module): + """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations. + + We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style) + """ + + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, no_antialias_up=False, opt=None): + """Construct a Resnet-based generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers + n_blocks (int) -- the number of ResNet blocks + padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero + """ + assert(n_blocks >= 0) + super(ResnetGenerator, self).__init__() + self.opt = opt + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + n_downsampling = 2 + for i in range(n_downsampling): # add downsampling layers + mult = 2 ** i + if(no_antialias): + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + else: + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True), + Downsample(ngf * mult * 2)] + + mult = 2 ** n_downsampling + for i in range(n_blocks): # add ResNet blocks + + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + for i in range(n_downsampling): # add upsampling layers + mult = 2 ** (n_downsampling - i) + if no_antialias_up: + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + else: + model += [Upsample(ngf * mult), + nn.Conv2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=1, + padding=1, # output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model += [nn.Tanh()] + + self.model = nn.Sequential(*model) + + def forward(self, input, layers=[], encode_only=False): + if -1 in layers: + layers.append(len(self.model)) + if len(layers) > 0: + feat = input + feats = [] + for layer_id, layer in enumerate(self.model): + # print(layer_id, layer) + feat = layer(feat) + if layer_id in layers: + # print("%d: adding the output of %s %d" % (layer_id, layer.__class__.__name__, feat.size(1))) + feats.append(feat) + else: + # print("%d: skipping %s %d" % (layer_id, layer.__class__.__name__, feat.size(1))) + pass + if layer_id == layers[-1] and encode_only: + # print('encoder only return features') + return feats # return intermediate features alone; stop in the last layers + + return feat, feats # return both output and intermediate features + else: + """Standard forward""" + fake = self.model(input) + return fake + + +class ResnetDecoder(nn.Module): + """Resnet-based decoder that consists of a few Resnet blocks + a few upsampling operations. + """ + + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False): + """Construct a Resnet-based decoder + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers + n_blocks (int) -- the number of ResNet blocks + padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero + """ + assert(n_blocks >= 0) + super(ResnetDecoder, self).__init__() + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + model = [] + n_downsampling = 2 + mult = 2 ** n_downsampling + for i in range(n_blocks): # add ResNet blocks + + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + for i in range(n_downsampling): # add upsampling layers + mult = 2 ** (n_downsampling - i) + if(no_antialias): + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + else: + model += [Upsample(ngf * mult), + nn.Conv2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=1, + padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model += [nn.Tanh()] + + self.model = nn.Sequential(*model) + + def forward(self, input): + """Standard forward""" + return self.model(input) + + +class ResnetEncoder(nn.Module): + """Resnet-based encoder that consists of a few downsampling + several Resnet blocks + """ + + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False): + """Construct a Resnet-based encoder + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers + n_blocks (int) -- the number of ResNet blocks + padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero + """ + assert(n_blocks >= 0) + super(ResnetEncoder, self).__init__() + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + n_downsampling = 2 + for i in range(n_downsampling): # add downsampling layers + mult = 2 ** i + if(no_antialias): + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + else: + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True), + Downsample(ngf * mult * 2)] + + mult = 2 ** n_downsampling + for i in range(n_blocks): # add ResNet blocks + + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + self.model = nn.Sequential(*model) + + def forward(self, input): + """Standard forward""" + return self.model(input) + + +class ResnetBlock(nn.Module): + """Define a Resnet block""" + + def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): + """Initialize the Resnet block + + A resnet block is a conv block with skip connections + We construct a conv block with build_conv_block function, + and implement skip connections in function. + Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf + """ + super(ResnetBlock, self).__init__() + self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias) + + def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias): + """Construct a convolutional block. + + Parameters: + dim (int) -- the number of channels in the conv layer. + padding_type (str) -- the name of padding layer: reflect | replicate | zero + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers. + use_bias (bool) -- if the conv layer uses bias or not + + Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU)) + """ + conv_block = [] + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)] + if use_dropout: + conv_block += [nn.Dropout(0.5)] + + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)] + + return nn.Sequential(*conv_block) + + def forward(self, x): + """Forward function (with skip connections)""" + out = x + self.conv_block(x) # add skip connections + return out + + +class UnetGenerator(nn.Module): + """Create a Unet-based generator""" + + def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False): + """Construct a Unet generator + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7, + image of size 128x128 will become of size 1x1 # at the bottleneck + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + + We construct the U-Net from the innermost layer to the outermost layer. + It is a recursive process. + """ + super(UnetGenerator, self).__init__() + # construct unet structure + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer + for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout) + # gradually reduce the number of filters from ngf * 8 to ngf + unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer + + def forward(self, input): + """Standard forward""" + return self.model(input) + + +class UnetSkipConnectionBlock(nn.Module): + """Defines the Unet submodule with skip connection. + X -------------------identity---------------------- + |-- downsampling -- |submodule| -- upsampling --| + """ + + def __init__(self, outer_nc, inner_nc, input_nc=None, + submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): + """Construct a Unet submodule with skip connections. + + Parameters: + outer_nc (int) -- the number of filters in the outer conv layer + inner_nc (int) -- the number of filters in the inner conv layer + input_nc (int) -- the number of channels in input images/features + submodule (UnetSkipConnectionBlock) -- previously defined submodules + outermost (bool) -- if this module is the outermost module + innermost (bool) -- if this module is the innermost module + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers. + """ + super(UnetSkipConnectionBlock, self).__init__() + self.outermost = outermost + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + if input_nc is None: + input_nc = outer_nc + downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, + stride=2, padding=1, bias=use_bias) + downrelu = nn.LeakyReLU(0.2, True) + downnorm = norm_layer(inner_nc) + uprelu = nn.ReLU(True) + upnorm = norm_layer(outer_nc) + + if outermost: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, + kernel_size=4, stride=2, + padding=1) + down = [downconv] + up = [uprelu, upconv, nn.Tanh()] + model = down + [submodule] + up + elif innermost: + upconv = nn.ConvTranspose2d(inner_nc, outer_nc, + kernel_size=4, stride=2, + padding=1, bias=use_bias) + down = [downrelu, downconv] + up = [uprelu, upconv, upnorm] + model = down + up + else: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, + kernel_size=4, stride=2, + padding=1, bias=use_bias) + down = [downrelu, downconv, downnorm] + up = [uprelu, upconv, upnorm] + + if use_dropout: + model = down + [submodule] + up + [nn.Dropout(0.5)] + else: + model = down + [submodule] + up + + self.model = nn.Sequential(*model) + + def forward(self, x): + if self.outermost: + return self.model(x) + else: # add skip connections + return torch.cat([x, self.model(x)], 1) + + +class NLayerDiscriminator(nn.Module): + """Defines a PatchGAN discriminator""" + + def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False): + """Construct a PatchGAN discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the last conv layer + n_layers (int) -- the number of conv layers in the discriminator + norm_layer -- normalization layer + """ + super(NLayerDiscriminator, self).__init__() + if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + kw = 4 + padw = 1 + if(no_antialias): + sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)] + else: + sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)] + nf_mult = 1 + nf_mult_prev = 1 + for n in range(1, n_layers): # gradually increase the number of filters + nf_mult_prev = nf_mult + nf_mult = min(2 ** n, 8) + if(no_antialias): + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + else: + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True), + Downsample(ndf * nf_mult)] + + nf_mult_prev = nf_mult + nf_mult = min(2 ** n_layers, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map + self.model = nn.Sequential(*sequence) + + def forward(self, input): + """Standard forward.""" + return self.model(input) + + +class PixelDiscriminator(nn.Module): + """Defines a 1x1 PatchGAN discriminator (pixelGAN)""" + + def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d): + """Construct a 1x1 PatchGAN discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + """ + super(PixelDiscriminator, self).__init__() + if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + self.net = [ + nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0), + nn.LeakyReLU(0.2, True), + nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias), + norm_layer(ndf * 2), + nn.LeakyReLU(0.2, True), + nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)] + + self.net = nn.Sequential(*self.net) + + def forward(self, input): + """Standard forward.""" + return self.net(input) + + +class PatchDiscriminator(NLayerDiscriminator): + """Defines a PatchGAN discriminator""" + + def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False): + super().__init__(input_nc, ndf, 2, norm_layer, no_antialias) + + def forward(self, input): + B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3) + size = 16 + Y = H // size + X = W // size + input = input.view(B, C, Y, size, X, size) + input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size) + return super().forward(input) + + +class GroupedChannelNorm(nn.Module): + def __init__(self, num_groups): + super().__init__() + self.num_groups = num_groups + + def forward(self, x): + shape = list(x.shape) + new_shape = [shape[0], self.num_groups, shape[1] // self.num_groups] + shape[2:] + x = x.view(*new_shape) + mean = x.mean(dim=2, keepdim=True) + std = x.std(dim=2, keepdim=True) + x_norm = (x - mean) / (std + 1e-7) + return x_norm.view(*shape) diff --git a/models/patchnce.py b/models/patchnce.py new file mode 100644 index 0000000000000000000000000000000000000000..475793c919dcb6e39c0c1aa658b4738988040717 --- /dev/null +++ b/models/patchnce.py @@ -0,0 +1,55 @@ +from packaging import version +import torch +from torch import nn + + +class PatchNCELoss(nn.Module): + def __init__(self, opt): + super().__init__() + self.opt = opt + self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none') + self.mask_dtype = torch.uint8 if version.parse(torch.__version__) < version.parse('1.2.0') else torch.bool + + def forward(self, feat_q, feat_k): + num_patches = feat_q.shape[0] + dim = feat_q.shape[1] + feat_k = feat_k.detach() + + # pos logit + l_pos = torch.bmm( + feat_q.view(num_patches, 1, -1), feat_k.view(num_patches, -1, 1)) + l_pos = l_pos.view(num_patches, 1) + + # neg logit + + # Should the negatives from the other samples of a minibatch be utilized? + # In CUT and FastCUT, we found that it's best to only include negatives + # from the same image. Therefore, we set + # --nce_includes_all_negatives_from_minibatch as False + # However, for single-image translation, the minibatch consists of + # crops from the "same" high-resolution image. + # Therefore, we will include the negatives from the entire minibatch. + if self.opt.nce_includes_all_negatives_from_minibatch: + # reshape features as if they are all negatives of minibatch of size 1. + batch_dim_for_bmm = 1 + else: + batch_dim_for_bmm = self.opt.batch_size + + # reshape features to batch size + feat_q = feat_q.view(batch_dim_for_bmm, -1, dim) + feat_k = feat_k.view(batch_dim_for_bmm, -1, dim) + npatches = feat_q.size(1) + l_neg_curbatch = torch.bmm(feat_q, feat_k.transpose(2, 1)) + + # diagonal entries are similarity between same features, and hence meaningless. + # just fill the diagonal with very small number, which is exp(-10) and almost zero + diagonal = torch.eye(npatches, device=feat_q.device, dtype=self.mask_dtype)[None, :, :] + l_neg_curbatch.masked_fill_(diagonal, -10.0) + l_neg = l_neg_curbatch.view(-1, npatches) + + out = torch.cat((l_pos, l_neg), dim=1) / self.opt.nce_T + + loss = self.cross_entropy_loss(out, torch.zeros(out.size(0), dtype=torch.long, + device=feat_q.device)) + + return loss diff --git a/models/sincut_model.py b/models/sincut_model.py new file mode 100644 index 0000000000000000000000000000000000000000..7e54bcc9b688e05e727e2f23645106d7efe931bd --- /dev/null +++ b/models/sincut_model.py @@ -0,0 +1,79 @@ +import torch +from .cut_model import CUTModel + + +class SinCUTModel(CUTModel): + """ This class implements the single image translation model (Fig 9) of + Contrastive Learning for Unpaired Image-to-Image Translation + Taesung Park, Alexei A. Efros, Richard Zhang, Jun-Yan Zhu + ECCV, 2020 + """ + + @staticmethod + def modify_commandline_options(parser, is_train=True): + parser = CUTModel.modify_commandline_options(parser, is_train) + parser.add_argument('--lambda_R1', type=float, default=1.0, + help='weight for the R1 gradient penalty') + parser.add_argument('--lambda_identity', type=float, default=1.0, + help='the "identity preservation loss"') + + parser.set_defaults(nce_includes_all_negatives_from_minibatch=True, + dataset_mode="singleimage", + netG="stylegan2", + stylegan2_G_num_downsampling=1, + netD="stylegan2", + gan_mode="nonsaturating", + num_patches=1, + nce_layers="0,2,4", + lambda_NCE=4.0, + ngf=10, + ndf=8, + lr=0.002, + beta1=0.0, + beta2=0.99, + load_size=1024, + crop_size=64, + preprocess="zoom_and_patch", + ) + + if is_train: + parser.set_defaults(preprocess="zoom_and_patch", + batch_size=16, + save_epoch_freq=1, + save_latest_freq=20000, + n_epochs=8, + n_epochs_decay=8, + + ) + else: + parser.set_defaults(preprocess="none", # load the whole image as it is + batch_size=1, + num_test=1, + ) + + return parser + + def __init__(self, opt): + super().__init__(opt) + if self.isTrain: + if opt.lambda_R1 > 0.0: + self.loss_names += ['D_R1'] + if opt.lambda_identity > 0.0: + self.loss_names += ['idt'] + + def compute_D_loss(self): + self.real_B.requires_grad_() + GAN_loss_D = super().compute_D_loss() + self.loss_D_R1 = self.R1_loss(self.pred_real, self.real_B) + self.loss_D = GAN_loss_D + self.loss_D_R1 + return self.loss_D + + def compute_G_loss(self): + CUT_loss_G = super().compute_G_loss() + self.loss_idt = torch.nn.functional.l1_loss(self.idt_B, self.real_B) * self.opt.lambda_identity + return CUT_loss_G + self.loss_idt + + def R1_loss(self, real_pred, real_img): + grad_real, = torch.autograd.grad(outputs=real_pred.sum(), inputs=real_img, create_graph=True, retain_graph=True) + grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean() + return grad_penalty * (self.opt.lambda_R1 * 0.5) diff --git a/models/stylegan_networks.py b/models/stylegan_networks.py new file mode 100644 index 0000000000000000000000000000000000000000..a3c625da4ead5414789b60c23613306e0df7df94 --- /dev/null +++ b/models/stylegan_networks.py @@ -0,0 +1,914 @@ +""" +The network architectures is based on PyTorch implemenation of StyleGAN2Encoder. +Original PyTorch repo: https://github.com/rosinality/style-based-gan-pytorch +Origianl StyelGAN2 paper: https://github.com/NVlabs/stylegan2 +We use the network architeture for our single-image traning setting. +""" + +import math +import numpy as np +import random + +import torch +from torch import nn +from torch.nn import functional as F + + +def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): + return F.leaky_relu(input + bias, negative_slope) * scale + + +class FusedLeakyReLU(nn.Module): + def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): + super().__init__() + self.bias = nn.Parameter(torch.zeros(1, channel, 1, 1)) + self.negative_slope = negative_slope + self.scale = scale + + def forward(self, input): + # print("FusedLeakyReLU: ", input.abs().mean()) + out = fused_leaky_relu(input, self.bias, + self.negative_slope, + self.scale) + # print("FusedLeakyReLU: ", out.abs().mean()) + return out + + +def upfirdn2d_native( + input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 +): + _, minor, in_h, in_w = input.shape + kernel_h, kernel_w = kernel.shape + + out = input.view(-1, minor, in_h, 1, in_w, 1) + out = F.pad(out, [0, up_x - 1, 0, 0, 0, up_y - 1, 0, 0]) + out = out.view(-1, minor, in_h * up_y, in_w * up_x) + + out = F.pad( + out, [max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] + ) + out = out[ + :, + :, + max(-pad_y0, 0): out.shape[2] - max(-pad_y1, 0), + max(-pad_x0, 0): out.shape[3] - max(-pad_x1, 0), + ] + + # out = out.permute(0, 3, 1, 2) + out = out.reshape( + [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] + ) + w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) + out = F.conv2d(out, w) + out = out.reshape( + -1, + minor, + in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, + in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, + ) + # out = out.permute(0, 2, 3, 1) + + return out[:, :, ::down_y, ::down_x] + + +def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): + return upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) + + +class PixelNorm(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, input): + return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8) + + +def make_kernel(k): + k = torch.tensor(k, dtype=torch.float32) + + if len(k.shape) == 1: + k = k[None, :] * k[:, None] + + k /= k.sum() + + return k + + +class Upsample(nn.Module): + def __init__(self, kernel, factor=2): + super().__init__() + + self.factor = factor + kernel = make_kernel(kernel) * (factor ** 2) + self.register_buffer('kernel', kernel) + + p = kernel.shape[0] - factor + + pad0 = (p + 1) // 2 + factor - 1 + pad1 = p // 2 + + self.pad = (pad0, pad1) + + def forward(self, input): + out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad) + + return out + + +class Downsample(nn.Module): + def __init__(self, kernel, factor=2): + super().__init__() + + self.factor = factor + kernel = make_kernel(kernel) + self.register_buffer('kernel', kernel) + + p = kernel.shape[0] - factor + + pad0 = (p + 1) // 2 + pad1 = p // 2 + + self.pad = (pad0, pad1) + + def forward(self, input): + out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad) + + return out + + +class Blur(nn.Module): + def __init__(self, kernel, pad, upsample_factor=1): + super().__init__() + + kernel = make_kernel(kernel) + + if upsample_factor > 1: + kernel = kernel * (upsample_factor ** 2) + + self.register_buffer('kernel', kernel) + + self.pad = pad + + def forward(self, input): + out = upfirdn2d(input, self.kernel, pad=self.pad) + + return out + + +class EqualConv2d(nn.Module): + def __init__( + self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True + ): + super().__init__() + + self.weight = nn.Parameter( + torch.randn(out_channel, in_channel, kernel_size, kernel_size) + ) + self.scale = math.sqrt(1) / math.sqrt(in_channel * (kernel_size ** 2)) + + self.stride = stride + self.padding = padding + + if bias: + self.bias = nn.Parameter(torch.zeros(out_channel)) + + else: + self.bias = None + + def forward(self, input): + # print("Before EqualConv2d: ", input.abs().mean()) + out = F.conv2d( + input, + self.weight * self.scale, + bias=self.bias, + stride=self.stride, + padding=self.padding, + ) + # print("After EqualConv2d: ", out.abs().mean(), (self.weight * self.scale).abs().mean()) + + return out + + def __repr__(self): + return ( + f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},' + f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})' + ) + + +class EqualLinear(nn.Module): + def __init__( + self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None + ): + super().__init__() + + self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) + + if bias: + self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) + + else: + self.bias = None + + self.activation = activation + + self.scale = (math.sqrt(1) / math.sqrt(in_dim)) * lr_mul + self.lr_mul = lr_mul + + def forward(self, input): + if self.activation: + out = F.linear(input, self.weight * self.scale) + out = fused_leaky_relu(out, self.bias * self.lr_mul) + + else: + out = F.linear( + input, self.weight * self.scale, bias=self.bias * self.lr_mul + ) + + return out + + def __repr__(self): + return ( + f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' + ) + + +class ScaledLeakyReLU(nn.Module): + def __init__(self, negative_slope=0.2): + super().__init__() + + self.negative_slope = negative_slope + + def forward(self, input): + out = F.leaky_relu(input, negative_slope=self.negative_slope) + + return out * math.sqrt(2) + + +class ModulatedConv2d(nn.Module): + def __init__( + self, + in_channel, + out_channel, + kernel_size, + style_dim, + demodulate=True, + upsample=False, + downsample=False, + blur_kernel=[1, 3, 3, 1], + ): + super().__init__() + + self.eps = 1e-8 + self.kernel_size = kernel_size + self.in_channel = in_channel + self.out_channel = out_channel + self.upsample = upsample + self.downsample = downsample + + if upsample: + factor = 2 + p = (len(blur_kernel) - factor) - (kernel_size - 1) + pad0 = (p + 1) // 2 + factor - 1 + pad1 = p // 2 + 1 + + self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor) + + if downsample: + factor = 2 + p = (len(blur_kernel) - factor) + (kernel_size - 1) + pad0 = (p + 1) // 2 + pad1 = p // 2 + + self.blur = Blur(blur_kernel, pad=(pad0, pad1)) + + fan_in = in_channel * kernel_size ** 2 + self.scale = math.sqrt(1) / math.sqrt(fan_in) + self.padding = kernel_size // 2 + + self.weight = nn.Parameter( + torch.randn(1, out_channel, in_channel, kernel_size, kernel_size) + ) + + if style_dim is not None and style_dim > 0: + self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) + + self.demodulate = demodulate + + def __repr__(self): + return ( + f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, ' + f'upsample={self.upsample}, downsample={self.downsample})' + ) + + def forward(self, input, style): + batch, in_channel, height, width = input.shape + + if style is not None: + style = self.modulation(style).view(batch, 1, in_channel, 1, 1) + else: + style = torch.ones(batch, 1, in_channel, 1, 1).cuda() + weight = self.scale * self.weight * style + + if self.demodulate: + demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8) + weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) + + weight = weight.view( + batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size + ) + + if self.upsample: + input = input.view(1, batch * in_channel, height, width) + weight = weight.view( + batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size + ) + weight = weight.transpose(1, 2).reshape( + batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size + ) + out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) + _, _, height, width = out.shape + out = out.view(batch, self.out_channel, height, width) + out = self.blur(out) + + elif self.downsample: + input = self.blur(input) + _, _, height, width = input.shape + input = input.view(1, batch * in_channel, height, width) + out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) + _, _, height, width = out.shape + out = out.view(batch, self.out_channel, height, width) + + else: + input = input.view(1, batch * in_channel, height, width) + out = F.conv2d(input, weight, padding=self.padding, groups=batch) + _, _, height, width = out.shape + out = out.view(batch, self.out_channel, height, width) + + return out + + +class NoiseInjection(nn.Module): + def __init__(self): + super().__init__() + + self.weight = nn.Parameter(torch.zeros(1)) + + def forward(self, image, noise=None): + if noise is None: + batch, _, height, width = image.shape + noise = image.new_empty(batch, 1, height, width).normal_() + + return image + self.weight * noise + + +class ConstantInput(nn.Module): + def __init__(self, channel, size=4): + super().__init__() + + self.input = nn.Parameter(torch.randn(1, channel, size, size)) + + def forward(self, input): + batch = input.shape[0] + out = self.input.repeat(batch, 1, 1, 1) + + return out + + +class StyledConv(nn.Module): + def __init__( + self, + in_channel, + out_channel, + kernel_size, + style_dim=None, + upsample=False, + blur_kernel=[1, 3, 3, 1], + demodulate=True, + inject_noise=True, + ): + super().__init__() + + self.inject_noise = inject_noise + self.conv = ModulatedConv2d( + in_channel, + out_channel, + kernel_size, + style_dim, + upsample=upsample, + blur_kernel=blur_kernel, + demodulate=demodulate, + ) + + self.noise = NoiseInjection() + # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1)) + # self.activate = ScaledLeakyReLU(0.2) + self.activate = FusedLeakyReLU(out_channel) + + def forward(self, input, style=None, noise=None): + out = self.conv(input, style) + if self.inject_noise: + out = self.noise(out, noise=noise) + # out = out + self.bias + out = self.activate(out) + + return out + + +class ToRGB(nn.Module): + def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): + super().__init__() + + if upsample: + self.upsample = Upsample(blur_kernel) + + self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False) + self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) + + def forward(self, input, style, skip=None): + out = self.conv(input, style) + out = out + self.bias + + if skip is not None: + skip = self.upsample(skip) + + out = out + skip + + return out + + +class Generator(nn.Module): + def __init__( + self, + size, + style_dim, + n_mlp, + channel_multiplier=2, + blur_kernel=[1, 3, 3, 1], + lr_mlp=0.01, + ): + super().__init__() + + self.size = size + + self.style_dim = style_dim + + layers = [PixelNorm()] + + for i in range(n_mlp): + layers.append( + EqualLinear( + style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu' + ) + ) + + self.style = nn.Sequential(*layers) + + self.channels = { + 4: 512, + 8: 512, + 16: 512, + 32: 512, + 64: 256 * channel_multiplier, + 128: 128 * channel_multiplier, + 256: 64 * channel_multiplier, + 512: 32 * channel_multiplier, + 1024: 16 * channel_multiplier, + } + + self.input = ConstantInput(self.channels[4]) + self.conv1 = StyledConv( + self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel + ) + self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False) + + self.log_size = int(math.log(size, 2)) + self.num_layers = (self.log_size - 2) * 2 + 1 + + self.convs = nn.ModuleList() + self.upsamples = nn.ModuleList() + self.to_rgbs = nn.ModuleList() + self.noises = nn.Module() + + in_channel = self.channels[4] + + for layer_idx in range(self.num_layers): + res = (layer_idx + 5) // 2 + shape = [1, 1, 2 ** res, 2 ** res] + self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape)) + + for i in range(3, self.log_size + 1): + out_channel = self.channels[2 ** i] + + self.convs.append( + StyledConv( + in_channel, + out_channel, + 3, + style_dim, + upsample=True, + blur_kernel=blur_kernel, + ) + ) + + self.convs.append( + StyledConv( + out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel + ) + ) + + self.to_rgbs.append(ToRGB(out_channel, style_dim)) + + in_channel = out_channel + + self.n_latent = self.log_size * 2 - 2 + + def make_noise(self): + device = self.input.input.device + + noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)] + + for i in range(3, self.log_size + 1): + for _ in range(2): + noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device)) + + return noises + + def mean_latent(self, n_latent): + latent_in = torch.randn( + n_latent, self.style_dim, device=self.input.input.device + ) + latent = self.style(latent_in).mean(0, keepdim=True) + + return latent + + def get_latent(self, input): + return self.style(input) + + def forward( + self, + styles, + return_latents=False, + inject_index=None, + truncation=1, + truncation_latent=None, + input_is_latent=False, + noise=None, + randomize_noise=True, + ): + if not input_is_latent: + styles = [self.style(s) for s in styles] + + if noise is None: + if randomize_noise: + noise = [None] * self.num_layers + else: + noise = [ + getattr(self.noises, f'noise_{i}') for i in range(self.num_layers) + ] + + if truncation < 1: + style_t = [] + + for style in styles: + style_t.append( + truncation_latent + truncation * (style - truncation_latent) + ) + + styles = style_t + + if len(styles) < 2: + inject_index = self.n_latent + + if len(styles[0].shape) < 3: + latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) + + else: + latent = styles[0] + + else: + if inject_index is None: + inject_index = random.randint(1, self.n_latent - 1) + + latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) + latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1) + + latent = torch.cat([latent, latent2], 1) + + out = self.input(latent) + out = self.conv1(out, latent[:, 0], noise=noise[0]) + + skip = self.to_rgb1(out, latent[:, 1]) + + i = 1 + for conv1, conv2, noise1, noise2, to_rgb in zip( + self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs + ): + out = conv1(out, latent[:, i], noise=noise1) + out = conv2(out, latent[:, i + 1], noise=noise2) + skip = to_rgb(out, latent[:, i + 2], skip) + + i += 2 + + image = skip + + if return_latents: + return image, latent + + else: + return image, None + + +class ConvLayer(nn.Sequential): + def __init__( + self, + in_channel, + out_channel, + kernel_size, + downsample=False, + blur_kernel=[1, 3, 3, 1], + bias=True, + activate=True, + ): + layers = [] + + if downsample: + factor = 2 + p = (len(blur_kernel) - factor) + (kernel_size - 1) + pad0 = (p + 1) // 2 + pad1 = p // 2 + + layers.append(Blur(blur_kernel, pad=(pad0, pad1))) + + stride = 2 + self.padding = 0 + + else: + stride = 1 + self.padding = kernel_size // 2 + + layers.append( + EqualConv2d( + in_channel, + out_channel, + kernel_size, + padding=self.padding, + stride=stride, + bias=bias and not activate, + ) + ) + + if activate: + if bias: + layers.append(FusedLeakyReLU(out_channel)) + + else: + layers.append(ScaledLeakyReLU(0.2)) + + super().__init__(*layers) + + +class ResBlock(nn.Module): + def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1], downsample=True, skip_gain=1.0): + super().__init__() + + self.skip_gain = skip_gain + self.conv1 = ConvLayer(in_channel, in_channel, 3) + self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=downsample, blur_kernel=blur_kernel) + + if in_channel != out_channel or downsample: + self.skip = ConvLayer( + in_channel, out_channel, 1, downsample=downsample, activate=False, bias=False + ) + else: + self.skip = nn.Identity() + + def forward(self, input): + out = self.conv1(input) + out = self.conv2(out) + + skip = self.skip(input) + out = (out * self.skip_gain + skip) / math.sqrt(self.skip_gain ** 2 + 1.0) + + return out + + +class StyleGAN2Discriminator(nn.Module): + def __init__(self, input_nc, ndf=64, n_layers=3, no_antialias=False, size=None, opt=None): + super().__init__() + self.opt = opt + self.stddev_group = 16 + if size is None: + size = 2 ** int((np.rint(np.log2(min(opt.load_size, opt.crop_size))))) + if "patch" in self.opt.netD and self.opt.D_patch_size is not None: + size = 2 ** int(np.log2(self.opt.D_patch_size)) + + blur_kernel = [1, 3, 3, 1] + channel_multiplier = ndf / 64 + channels = { + 4: min(384, int(4096 * channel_multiplier)), + 8: min(384, int(2048 * channel_multiplier)), + 16: min(384, int(1024 * channel_multiplier)), + 32: min(384, int(512 * channel_multiplier)), + 64: int(256 * channel_multiplier), + 128: int(128 * channel_multiplier), + 256: int(64 * channel_multiplier), + 512: int(32 * channel_multiplier), + 1024: int(16 * channel_multiplier), + } + + convs = [ConvLayer(3, channels[size], 1)] + + log_size = int(math.log(size, 2)) + + in_channel = channels[size] + + if "smallpatch" in self.opt.netD: + final_res_log2 = 4 + elif "patch" in self.opt.netD: + final_res_log2 = 3 + else: + final_res_log2 = 2 + + for i in range(log_size, final_res_log2, -1): + out_channel = channels[2 ** (i - 1)] + + convs.append(ResBlock(in_channel, out_channel, blur_kernel)) + + in_channel = out_channel + + self.convs = nn.Sequential(*convs) + + if False and "tile" in self.opt.netD: + in_channel += 1 + self.final_conv = ConvLayer(in_channel, channels[4], 3) + if "patch" in self.opt.netD: + self.final_linear = ConvLayer(channels[4], 1, 3, bias=False, activate=False) + else: + self.final_linear = nn.Sequential( + EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'), + EqualLinear(channels[4], 1), + ) + + def forward(self, input, get_minibatch_features=False): + if "patch" in self.opt.netD and self.opt.D_patch_size is not None: + h, w = input.size(2), input.size(3) + y = torch.randint(h - self.opt.D_patch_size, ()) + x = torch.randint(w - self.opt.D_patch_size, ()) + input = input[:, :, y:y + self.opt.D_patch_size, x:x + self.opt.D_patch_size] + out = input + for i, conv in enumerate(self.convs): + out = conv(out) + # print(i, out.abs().mean()) + # out = self.convs(input) + + batch, channel, height, width = out.shape + + if False and "tile" in self.opt.netD: + group = min(batch, self.stddev_group) + stddev = out.view( + group, -1, 1, channel // 1, height, width + ) + stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) + stddev = stddev.mean([2, 3, 4], keepdim=True).squeeze(2) + stddev = stddev.repeat(group, 1, height, width) + out = torch.cat([out, stddev], 1) + + out = self.final_conv(out) + # print(out.abs().mean()) + + if "patch" not in self.opt.netD: + out = out.view(batch, -1) + out = self.final_linear(out) + + return out + + +class TileStyleGAN2Discriminator(StyleGAN2Discriminator): + def forward(self, input): + B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3) + size = self.opt.D_patch_size + Y = H // size + X = W // size + input = input.view(B, C, Y, size, X, size) + input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size) + return super().forward(input) + + +class StyleGAN2Encoder(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, opt=None): + super().__init__() + assert opt is not None + self.opt = opt + channel_multiplier = ngf / 32 + channels = { + 4: min(512, int(round(4096 * channel_multiplier))), + 8: min(512, int(round(2048 * channel_multiplier))), + 16: min(512, int(round(1024 * channel_multiplier))), + 32: min(512, int(round(512 * channel_multiplier))), + 64: int(round(256 * channel_multiplier)), + 128: int(round(128 * channel_multiplier)), + 256: int(round(64 * channel_multiplier)), + 512: int(round(32 * channel_multiplier)), + 1024: int(round(16 * channel_multiplier)), + } + + blur_kernel = [1, 3, 3, 1] + + cur_res = 2 ** int((np.rint(np.log2(min(opt.load_size, opt.crop_size))))) + convs = [nn.Identity(), + ConvLayer(3, channels[cur_res], 1)] + + num_downsampling = self.opt.stylegan2_G_num_downsampling + for i in range(num_downsampling): + in_channel = channels[cur_res] + out_channel = channels[cur_res // 2] + convs.append(ResBlock(in_channel, out_channel, blur_kernel, downsample=True)) + cur_res = cur_res // 2 + + for i in range(n_blocks // 2): + n_channel = channels[cur_res] + convs.append(ResBlock(n_channel, n_channel, downsample=False)) + + self.convs = nn.Sequential(*convs) + + def forward(self, input, layers=[], get_features=False): + feat = input + feats = [] + if -1 in layers: + layers.append(len(self.convs) - 1) + for layer_id, layer in enumerate(self.convs): + feat = layer(feat) + # print(layer_id, " features ", feat.abs().mean()) + if layer_id in layers: + feats.append(feat) + + if get_features: + return feat, feats + else: + return feat + + +class StyleGAN2Decoder(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, opt=None): + super().__init__() + assert opt is not None + self.opt = opt + + blur_kernel = [1, 3, 3, 1] + + channel_multiplier = ngf / 32 + channels = { + 4: min(512, int(round(4096 * channel_multiplier))), + 8: min(512, int(round(2048 * channel_multiplier))), + 16: min(512, int(round(1024 * channel_multiplier))), + 32: min(512, int(round(512 * channel_multiplier))), + 64: int(round(256 * channel_multiplier)), + 128: int(round(128 * channel_multiplier)), + 256: int(round(64 * channel_multiplier)), + 512: int(round(32 * channel_multiplier)), + 1024: int(round(16 * channel_multiplier)), + } + + num_downsampling = self.opt.stylegan2_G_num_downsampling + cur_res = 2 ** int((np.rint(np.log2(min(opt.load_size, opt.crop_size))))) // (2 ** num_downsampling) + convs = [] + + for i in range(n_blocks // 2): + n_channel = channels[cur_res] + convs.append(ResBlock(n_channel, n_channel, downsample=False)) + + for i in range(num_downsampling): + in_channel = channels[cur_res] + out_channel = channels[cur_res * 2] + inject_noise = "small" not in self.opt.netG + convs.append( + StyledConv(in_channel, out_channel, 3, upsample=True, blur_kernel=blur_kernel, inject_noise=inject_noise) + ) + cur_res = cur_res * 2 + + convs.append(ConvLayer(channels[cur_res], 3, 1)) + + self.convs = nn.Sequential(*convs) + + def forward(self, input): + return self.convs(input) + + +class StyleGAN2Generator(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, opt=None): + super().__init__() + self.opt = opt + self.encoder = StyleGAN2Encoder(input_nc, output_nc, ngf, use_dropout, n_blocks, padding_type, no_antialias, opt) + self.decoder = StyleGAN2Decoder(input_nc, output_nc, ngf, use_dropout, n_blocks, padding_type, no_antialias, opt) + + def forward(self, input, layers=[], encode_only=False): + feat, feats = self.encoder(input, layers, True) + if encode_only: + return feats + else: + fake = self.decoder(feat) + + if len(layers) > 0: + return fake, feats + else: + return fake diff --git a/models/template_model.py b/models/template_model.py new file mode 100644 index 0000000000000000000000000000000000000000..68cdaf6a9a2cb321ff2a01949b38adc6fa22e97c --- /dev/null +++ b/models/template_model.py @@ -0,0 +1,99 @@ +"""Model class template + +This module provides a template for users to implement custom models. +You can specify '--model template' to use this model. +The class name should be consistent with both the filename and its model option. +The filename should be _dataset.py +The class name should be Dataset.py +It implements a simple image-to-image translation baseline based on regression loss. +Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss: + min_ ||netG(data_A) - data_B||_1 +You need to implement the following functions: + : Add model-specific options and rewrite default values for existing options. + <__init__>: Initialize this model class. + : Unpack input data and perform data pre-processing. + : Run forward pass. This will be called by both and . + : Update network weights; it will be called in every training iteration. +""" +import torch +from .base_model import BaseModel +from . import networks + + +class TemplateModel(BaseModel): + @staticmethod + def modify_commandline_options(parser, is_train=True): + """Add new model-specific options and rewrite default values for existing options. + + Parameters: + parser -- the option parser + is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset. + if is_train: + parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model. + + return parser + + def __init__(self, opt): + """Initialize this model class. + + Parameters: + opt -- training/test options + + A few things can be done here. + - (required) call the initialization function of BaseModel + - define loss function, visualization images, model names, and optimizers + """ + BaseModel.__init__(self, opt) # call the initialization method of BaseModel + # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk. + self.loss_names = ['loss_G'] + # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images. + self.visual_names = ['data_A', 'data_B', 'output'] + # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks. + # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them. + self.model_names = ['G'] + # define networks; you can use opt.isTrain to specify different behaviors for training and test. + self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids) + if self.isTrain: # only defined during training time + # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss. + # We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device) + self.criterionLoss = torch.nn.L1Loss() + # define and initialize optimizers. You can define one optimizer for each network. + # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. + self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) + self.optimizers = [self.optimizer] + + # Our program will automatically call to define schedulers, load networks, and print networks + + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input: a dictionary that contains the data itself and its metadata information. + """ + AtoB = self.opt.direction == 'AtoB' # use to swap data_A and data_B + self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A + self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B + self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths + + def forward(self): + """Run forward pass. This will be called by both functions and .""" + self.output = self.netG(self.data_A) # generate output image given the input data_A + + def backward(self): + """Calculate losses, gradients, and update network weights; called in every training iteration""" + # caculate the intermediate results if necessary; here self.output has been computed during function + # calculate loss given the input and intermediate results + self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression + self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G + + def optimize_parameters(self): + """Update network weights; it will be called in every training iteration.""" + self.forward() # first call forward to calculate intermediate results + self.optimizer.zero_grad() # clear network G's existing gradients + self.backward() # calculate gradients for network G + self.optimizer.step() # update gradients for network G diff --git a/options/__init__.py b/options/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e7eedebe54aa70169fd25951b3034d819e396c90 --- /dev/null +++ b/options/__init__.py @@ -0,0 +1 @@ +"""This package options includes option modules: training options, test options, and basic options (used in both training and test).""" diff --git a/options/__pycache__/__init__.cpython-310.pyc b/options/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0497caf6939283f22cb762f0c2db7b9782b32d82 Binary files /dev/null and b/options/__pycache__/__init__.cpython-310.pyc differ diff --git a/options/__pycache__/base_options.cpython-310.pyc b/options/__pycache__/base_options.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d62c69e9b0a46a05a898c4ff760873379556e4b Binary files /dev/null and b/options/__pycache__/base_options.cpython-310.pyc differ diff --git a/options/__pycache__/train_options.cpython-310.pyc b/options/__pycache__/train_options.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d906a053a411136192f9da1025db3ebde6a8b0a1 Binary files /dev/null and b/options/__pycache__/train_options.cpython-310.pyc differ diff --git a/options/base_options.py b/options/base_options.py new file mode 100644 index 0000000000000000000000000000000000000000..e6d72e343657aeb4ba55240a1c03dd643efc1f26 --- /dev/null +++ b/options/base_options.py @@ -0,0 +1,164 @@ +import argparse +import os +from util import util +import torch +import models +import data + + +class BaseOptions(): + """This class defines options used during both training and test time. + + It also implements several helper functions such as parsing, printing, and saving the options. + It also gathers additional options defined in functions in both dataset class and model class. + """ + + def __init__(self, cmd_line=None): + """Reset the class; indicates the class hasn't been initailized""" + self.initialized = False + self.cmd_line = None + if cmd_line is not None: + self.cmd_line = cmd_line.split() + + def initialize(self, parser): + """Define the common options that are used in both training and test.""" + # basic parameters + parser.add_argument('--dataroot', default='placeholder', help='path to images (should have subfolders trainA, trainB, valA, valB, etc)') + parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') + parser.add_argument('--easy_label', type=str, default='experiment_name', help='Interpretable name') + parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') + parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') + # model parameters + parser.add_argument('--model', type=str, default='cut', help='chooses which model to use.') + parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale') + parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale') + parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer') + parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer') + parser.add_argument('--netD', type=str, default='basic', choices=['basic', 'n_layers', 'pixel', 'patch', 'tilestylegan2', 'stylegan2'], help='specify discriminator architecture. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator') + parser.add_argument('--netG', type=str, default='resnet_9blocks', choices=['resnet_9blocks', 'resnet_6blocks', 'unet_256', 'unet_128', 'stylegan2', 'smallstylegan2', 'resnet_cat'], help='specify generator architecture') + parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') + parser.add_argument('--normG', type=str, default='instance', choices=['instance', 'batch', 'none'], help='instance normalization or batch normalization for G') + parser.add_argument('--normD', type=str, default='instance', choices=['instance', 'batch', 'none'], help='instance normalization or batch normalization for D') + parser.add_argument('--init_type', type=str, default='xavier', choices=['normal', 'xavier', 'kaiming', 'orthogonal'], help='network initialization') + parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') + parser.add_argument('--no_dropout', type=util.str2bool, nargs='?', const=True, default=True, + help='no dropout for the generator') + parser.add_argument('--no_antialias', action='store_true', help='if specified, use stride=2 convs instead of antialiased-downsampling (sad)') + parser.add_argument('--no_antialias_up', action='store_true', help='if specified, use [upconv(learned filter)] instead of [upconv(hard-coded [1,3,3,1] filter), conv]') + # dataset parameters + parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]') + parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA') + parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') + parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') + parser.add_argument('--batch_size', type=int, default=1, help='input batch size') + parser.add_argument('--load_size', type=int, default=286, help='scale images to this size') + parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size') + parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') + parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]') + parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') + parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML') + parser.add_argument('--random_scale_max', type=float, default=3.0, + help='(used for single image translation) Randomly scale the image by the specified factor as data augmentation.') + # additional parameters + parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') + parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') + parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}') + + # parameters related to StyleGAN2-based networks + parser.add_argument('--stylegan2_G_num_downsampling', + default=1, type=int, + help='Number of downsampling layers used by StyleGAN2Generator') + + self.initialized = True + return parser + + def gather_options(self): + """Initialize our parser with basic options(only once). + Add additional model-specific and dataset-specific options. + These options are defined in the function + in model and dataset classes. + """ + if not self.initialized: # check if it has been initialized + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = self.initialize(parser) + + # get the basic options + if self.cmd_line is None: + opt, _ = parser.parse_known_args() + else: + opt, _ = parser.parse_known_args(self.cmd_line) + + # modify model-related parser options + model_name = opt.model + model_option_setter = models.get_option_setter(model_name) + parser = model_option_setter(parser, self.isTrain) + if self.cmd_line is None: + opt, _ = parser.parse_known_args() # parse again with new defaults + else: + opt, _ = parser.parse_known_args(self.cmd_line) # parse again with new defaults + + # modify dataset-related parser options + dataset_name = opt.dataset_mode + dataset_option_setter = data.get_option_setter(dataset_name) + parser = dataset_option_setter(parser, self.isTrain) + + # save and return the parser + self.parser = parser + if self.cmd_line is None: + return parser.parse_args() + else: + return parser.parse_args(self.cmd_line) + + def print_options(self, opt): + """Print and save options + + It will print both current options and default values(if different). + It will save options into a text file / [checkpoints_dir] / opt.txt + """ + message = '' + message += '----------------- Options ---------------\n' + for k, v in sorted(vars(opt).items()): + comment = '' + default = self.parser.get_default(k) + if v != default: + comment = '\t[default: %s]' % str(default) + message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) + message += '----------------- End -------------------' + print(message) + + # save to the disk + expr_dir = os.path.join(opt.checkpoints_dir, opt.name) + util.mkdirs(expr_dir) + file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase)) + try: + with open(file_name, 'wt') as opt_file: + opt_file.write(message) + opt_file.write('\n') + except PermissionError as error: + print("permission error {}".format(error)) + pass + + def parse(self): + """Parse our options, create checkpoints directory suffix, and set up gpu device.""" + opt = self.gather_options() + opt.isTrain = self.isTrain # train or test + + # process opt.suffix + if opt.suffix: + suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else '' + opt.name = opt.name + suffix + + self.print_options(opt) + + # set gpu ids + str_ids = opt.gpu_ids.split(',') + opt.gpu_ids = [] + for str_id in str_ids: + id = int(str_id) + if id >= 0: + opt.gpu_ids.append(id) + if len(opt.gpu_ids) > 0: + torch.cuda.set_device(opt.gpu_ids[0]) + + self.opt = opt + return self.opt diff --git a/options/test_options.py b/options/test_options.py new file mode 100644 index 0000000000000000000000000000000000000000..e4559adc7ac380d9d8218104c1002bd5319a275b --- /dev/null +++ b/options/test_options.py @@ -0,0 +1,21 @@ +from .base_options import BaseOptions + + +class TestOptions(BaseOptions): + """This class includes test options. + + It also includes shared options defined in BaseOptions. + """ + + def initialize(self, parser): + parser = BaseOptions.initialize(self, parser) # define shared options + parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') + parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') + # Dropout and Batchnorm has different behavioir during training and test. + parser.add_argument('--eval', action='store_true', help='use eval mode during test time.') + parser.add_argument('--num_test', type=int, default=50, help='how many test images to run') + + # To avoid cropping, the load_size should be the same as crop_size + parser.set_defaults(load_size=parser.get_default('crop_size')) + self.isTrain = False + return parser diff --git a/options/train_options.py b/options/train_options.py new file mode 100644 index 0000000000000000000000000000000000000000..fa41ccabf3dc51a8a65e38112ff289fd96720f37 --- /dev/null +++ b/options/train_options.py @@ -0,0 +1,44 @@ +from .base_options import BaseOptions + + +class TrainOptions(BaseOptions): + """This class includes training options. + + It also includes shared options defined in BaseOptions. + """ + + def initialize(self, parser): + parser = BaseOptions.initialize(self, parser) + # visdom and HTML visualization parameters + parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen') + parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.') + parser.add_argument('--display_id', type=int, default=None, help='window id of the web display. Default is random window id') + parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display') + parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")') + parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') + parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html') + parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') + parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') + # network saving and loading parameters + parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') + parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs') + parser.add_argument('--evaluation_freq', type=int, default=5000, help='evaluation freq') + parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration') + parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') + parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...') + parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') + parser.add_argument('--pretrained_name', type=str, default=None, help='resume training from another checkpoint') + + # training parameters + parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs with the initial learning rate') + parser.add_argument('--n_epochs_decay', type=int, default=200, help='number of epochs to linearly decay learning rate to zero') + parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') + parser.add_argument('--beta2', type=float, default=0.999, help='momentum term of adam') + parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') + parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.') + parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') + parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]') + parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') + + self.isTrain = True + return parser diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..b056604d299b014457e7bda7a679359ed369e1c5 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +torch>=1.4.0 +torchvision>=0.5.0 +dominate>=2.4.0 +visdom>=0.1.8.8 +packaging +GPUtil>=1.4.0 diff --git a/shit.ipynb b/shit.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..baea345ae2225e8f97d8d63f3cadae685bce63bd --- /dev/null +++ b/shit.ipynb @@ -0,0 +1,75 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from PIL import Image" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def copy(src, dest):\n", + " for sub in os.listdir(src):\n", + " for file in os.listdir(os.path.join(src, sub)):\n", + " image = Image.open(os.path.join(src, sub, file))\n", + "\n", + " # resize image, min(height, width) = 512\n", + " if image.size[0] < image.size[1]:\n", + " image = image.resize((512, int(512 * image.size[1] / image.size[0])))\n", + " else:\n", + " image = image.resize((int(512 * image.size[0] / image.size[1]), 512))\n", + "\n", + " image.save(os.path.join(dest, file))\n", + "\n", + "datasets = '/home/qninh/Downloads/rgb_anon'\n", + "\n", + "# build cyclegan dataset\n", + "for type in ['fog', 'rain', 'snow', 'night']:\n", + " path = os.path.join(datasets, type)\n", + "\n", + " os.makedirs(f'./datasets/{type}/trainA', exist_ok=True)\n", + " os.makedirs(f'./datasets/{type}/trainB', exist_ok=True)\n", + " os.makedirs(f'./datasets/{type}/testA', exist_ok=True)\n", + " os.makedirs(f'./datasets/{type}/testB', exist_ok=True)\n", + "\n", + " copy(os.path.join(path, \"train_ref\"), f\"./datasets/{type}/trainA\")\n", + " copy(os.path.join(path, \"test_ref\"), f\"./datasets/{type}/trainA\")\n", + "\n", + " copy(os.path.join(path, \"train\"), f\"./datasets/{type}/trainB\")\n", + " copy(os.path.join(path, \"test\"), f\"./datasets/{type}/testB\")\n", + "\n", + " copy(os.path.join(path, \"val_ref\"), f\"./datasets/{type}/testA\")\n", + " copy(os.path.join(path, \"val\"), f\"./datasets/{type}/testB\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "base", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/test.py b/test.py new file mode 100644 index 0000000000000000000000000000000000000000..46967372b96adadc62ee7c32a0c26779dd4a41fb --- /dev/null +++ b/test.py @@ -0,0 +1,70 @@ +"""General-purpose test script for image-to-image translation. + +Once you have trained your model with train.py, you can use this script to test the model. +It will load a saved model from --checkpoints_dir and save the results to --results_dir. + +It first creates model and dataset given the option. It will hard-code some parameters. +It then runs inference for --num_test images and save results to an HTML file. + +Example (You need to train models first or download pre-trained models from our website): + Test a CycleGAN model (both sides): + python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan + + Test a CycleGAN model (one side only): + python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout + + The option '--model test' is used for generating CycleGAN results only for one side. + This option will automatically set '--dataset_mode single', which only loads the images from one set. + On the contrary, using '--model cycle_gan' requires loading and generating results in both directions, + which is sometimes unnecessary. The results will be saved at ./results/. + Use '--results_dir ' to specify the results directory. + + Test a pix2pix model: + python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA + +See options/base_options.py and options/test_options.py for more test options. +See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md +See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md +""" +import os +from options.test_options import TestOptions +from data import create_dataset +from models import create_model +from util.visualizer import save_images +from util import html +import util.util as util + + +if __name__ == '__main__': + opt = TestOptions().parse() # get test options + # hard-code some parameters for test + opt.num_threads = 0 # test code only supports num_threads = 1 + opt.batch_size = 1 # test code only supports batch_size = 1 + opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. + opt.no_flip = True # no flip; comment this line if results on flipped images are needed. + opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. + dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options + train_dataset = create_dataset(util.copyconf(opt, phase="train")) + model = create_model(opt) # create a model given opt.model and other options + # create a webpage for viewing the results + web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory + print('creating web directory', web_dir) + webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch)) + + for i, data in enumerate(dataset): + if i == 0: + model.data_dependent_initialize(data) + model.setup(opt) # regular setup: load and print networks; create schedulers + model.parallelize() + if opt.eval: + model.eval() + if i >= opt.num_test: # only apply our model to opt.num_test images. + break + model.set_input(data) # unpack data from data loader + model.test() # run inference + visuals = model.get_current_visuals() # get image results + img_path = model.get_image_paths() # get image paths + if i % 5 == 0: # save images to an HTML file + print('processing (%04d)-th image... %s' % (i, img_path)) + save_images(webpage, visuals, img_path, width=opt.display_winsize) + webpage.save() # save the HTML diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000000000000000000000000000000000000..2780fd9c5d991c0881c38ada5fe7d16add8fd3ff --- /dev/null +++ b/tox.ini @@ -0,0 +1,8 @@ +[flake8] +exclude = .git +max-complexity = 13 +count = True +format = pylint +max-line-length = 120 +tee = True +ignore = E303,E501,W505,C901 diff --git a/train.py b/train.py new file mode 100644 index 0000000000000000000000000000000000000000..8d04a5f2983c569abb3b5cf7b192f564f7a67d5c --- /dev/null +++ b/train.py @@ -0,0 +1,113 @@ +import time +import torch +from options.train_options import TrainOptions +from data import create_dataset +from models import create_model +from util.visualizer import Visualizer + + +if __name__ == "__main__": + opt = TrainOptions().parse() # get training options + dataset = create_dataset( + opt + ) # create a dataset given opt.dataset_mode and other options + dataset_size = len(dataset) # get the number of images in the dataset. + + model = create_model(opt) + # create a model given opt.model and other options + print("The number of training images = %d" % dataset_size) + + visualizer = Visualizer( + opt + ) # create a visualizer that display/save images and plots + opt.visualizer = visualizer + total_iters = 0 # the total number of training iterations + + optimize_time = 0.1 + + times = [] + for epoch in range( + opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1 + ): # outer loop for different epochs; we save the model by , + + epoch_start_time = time.time() # timer for entire epoch + iter_data_time = time.time() # timer for data loading per iteration + epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch + visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch + + dataset.set_epoch(epoch) + for i, data in enumerate(dataset): # inner loop within one epoch + iter_start_time = time.time() # timer for computation per iteration + if total_iters % opt.print_freq == 0: + t_data = iter_start_time - iter_data_time + + batch_size = data["A"].size(0) + total_iters += batch_size + epoch_iter += batch_size + if len(opt.gpu_ids) > 0: + torch.cuda.synchronize() + optimize_start_time = time.time() + if epoch == opt.epoch_count and i == 0: + model.data_dependent_initialize(data) + model.setup( + opt + ) # regular setup: load and print networks; create schedulers + model.parallelize() + model.set_input(data) # unpack data from dataset and apply preprocessing + model.optimize_parameters() # calculate loss functions, get gradients, update network weights + if len(opt.gpu_ids) > 0: + torch.cuda.synchronize() + optimize_time = ( + time.time() - optimize_start_time + ) / batch_size * 0.005 + 0.995 * optimize_time + + if ( + total_iters % opt.display_freq == 0 + ): # display images on visdom and save images to a HTML file + save_result = total_iters % opt.update_html_freq == 0 + model.compute_visuals() + visualizer.display_current_results( + model.get_current_visuals(), epoch, save_result + ) + + if ( + total_iters % opt.print_freq == 0 + ): # print training losses and save logging information to the disk + losses = model.get_current_losses() + visualizer.print_current_losses( + epoch, epoch_iter, losses, optimize_time, t_data + ) + if opt.display_id is None or opt.display_id > 0: + visualizer.plot_current_losses( + epoch, float(epoch_iter) / dataset_size, losses + ) + + if ( + total_iters % opt.save_latest_freq == 0 + ): # cache our latest model every iterations + print( + "saving the latest model (epoch %d, total_iters %d)" + % (epoch, total_iters) + ) + print( + opt.name + ) # it's useful to occasionally show the experiment name on console + save_suffix = "iter_%d" % total_iters if opt.save_by_iter else "latest" + model.save_networks(save_suffix) + + iter_data_time = time.time() + + if ( + epoch % opt.save_epoch_freq == 0 + ): # cache our model every epochs + print( + "saving the model at the end of epoch %d, iters %d" + % (epoch, total_iters) + ) + model.save_networks("latest") + model.save_networks(epoch) + + print( + "End of epoch %d / %d \t Time Taken: %d sec" + % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time) + ) + model.update_learning_rate() # update learning rates at the end of every epoch. diff --git a/util/__init__.py b/util/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..718f8f67264ef6124c9628dba97f4e556d77e435 --- /dev/null +++ b/util/__init__.py @@ -0,0 +1,2 @@ +"""This package includes a miscellaneous collection of useful helper functions.""" +from util import * diff --git a/util/__pycache__/__init__.cpython-310.pyc b/util/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3bec8ae08adae87d91081f90ad2efa73ec0981e Binary files /dev/null and b/util/__pycache__/__init__.cpython-310.pyc differ diff --git a/util/__pycache__/html.cpython-310.pyc b/util/__pycache__/html.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23417c4f137f2a22439d1970adb43a06dde394c3 Binary files /dev/null and b/util/__pycache__/html.cpython-310.pyc differ diff --git a/util/__pycache__/image_pool.cpython-310.pyc b/util/__pycache__/image_pool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39ec5f05bf79ab09bcf24b947a139c0c8b64db06 Binary files /dev/null and b/util/__pycache__/image_pool.cpython-310.pyc differ diff --git a/util/__pycache__/util.cpython-310.pyc b/util/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea9e68f378d043510b021891aa3dc50bf30b1287 Binary files /dev/null and b/util/__pycache__/util.cpython-310.pyc differ diff --git a/util/__pycache__/visualizer.cpython-310.pyc b/util/__pycache__/visualizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7a7b8bebefa1ae4ca330242f9a5e22ae2d615d3 Binary files /dev/null and b/util/__pycache__/visualizer.cpython-310.pyc differ diff --git a/util/get_data.py b/util/get_data.py new file mode 100644 index 0000000000000000000000000000000000000000..97edc3ce3c3ab6d6080dca34e73a5fb77bb715fb --- /dev/null +++ b/util/get_data.py @@ -0,0 +1,110 @@ +from __future__ import print_function +import os +import tarfile +import requests +from warnings import warn +from zipfile import ZipFile +from bs4 import BeautifulSoup +from os.path import abspath, isdir, join, basename + + +class GetData(object): + """A Python script for downloading CycleGAN or pix2pix datasets. + + Parameters: + technique (str) -- One of: 'cyclegan' or 'pix2pix'. + verbose (bool) -- If True, print additional information. + + Examples: + >>> from util.get_data import GetData + >>> gd = GetData(technique='cyclegan') + >>> new_data_path = gd.get(save_path='./datasets') # options will be displayed. + + Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh' + and 'scripts/download_cyclegan_model.sh'. + """ + + def __init__(self, technique='cyclegan', verbose=True): + url_dict = { + 'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/', + 'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets' + } + self.url = url_dict.get(technique.lower()) + self._verbose = verbose + + def _print(self, text): + if self._verbose: + print(text) + + @staticmethod + def _get_options(r): + soup = BeautifulSoup(r.text, 'lxml') + options = [h.text for h in soup.find_all('a', href=True) + if h.text.endswith(('.zip', 'tar.gz'))] + return options + + def _present_options(self): + r = requests.get(self.url) + options = self._get_options(r) + print('Options:\n') + for i, o in enumerate(options): + print("{0}: {1}".format(i, o)) + choice = input("\nPlease enter the number of the " + "dataset above you wish to download:") + return options[int(choice)] + + def _download_data(self, dataset_url, save_path): + if not isdir(save_path): + os.makedirs(save_path) + + base = basename(dataset_url) + temp_save_path = join(save_path, base) + + with open(temp_save_path, "wb") as f: + r = requests.get(dataset_url) + f.write(r.content) + + if base.endswith('.tar.gz'): + obj = tarfile.open(temp_save_path) + elif base.endswith('.zip'): + obj = ZipFile(temp_save_path, 'r') + else: + raise ValueError("Unknown File Type: {0}.".format(base)) + + self._print("Unpacking Data...") + obj.extractall(save_path) + obj.close() + os.remove(temp_save_path) + + def get(self, save_path, dataset=None): + """ + + Download a dataset. + + Parameters: + save_path (str) -- A directory to save the data to. + dataset (str) -- (optional). A specific dataset to download. + Note: this must include the file extension. + If None, options will be presented for you + to choose from. + + Returns: + save_path_full (str) -- the absolute path to the downloaded data. + + """ + if dataset is None: + selected_dataset = self._present_options() + else: + selected_dataset = dataset + + save_path_full = join(save_path, selected_dataset.split('.')[0]) + + if isdir(save_path_full): + warn("\n'{0}' already exists. Voiding Download.".format( + save_path_full)) + else: + self._print('Downloading Data...') + url = "{0}/{1}".format(self.url, selected_dataset) + self._download_data(url, save_path=save_path) + + return abspath(save_path_full) diff --git a/util/html.py b/util/html.py new file mode 100644 index 0000000000000000000000000000000000000000..cc3262a1eafda34842e4dbad47bb6ba72f0c5a68 --- /dev/null +++ b/util/html.py @@ -0,0 +1,86 @@ +import dominate +from dominate.tags import meta, h3, table, tr, td, p, a, img, br +import os + + +class HTML: + """This HTML class allows us to save images and write texts into a single HTML file. + + It consists of functions such as (add a text header to the HTML file), + (add a row of images to the HTML file), and (save the HTML to the disk). + It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API. + """ + + def __init__(self, web_dir, title, refresh=0): + """Initialize the HTML classes + + Parameters: + web_dir (str) -- a directory that stores the webpage. HTML file will be created at /index.html; images will be saved at 0: + with self.doc.head: + meta(http_equiv="refresh", content=str(refresh)) + + def get_image_dir(self): + """Return the directory that stores images""" + return self.img_dir + + def add_header(self, text): + """Insert a header to the HTML file + + Parameters: + text (str) -- the header text + """ + with self.doc: + h3(text) + + def add_images(self, ims, txts, links, width=400): + """add images to the HTML file + + Parameters: + ims (str list) -- a list of image paths + txts (str list) -- a list of image names shown on the website + links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page + """ + self.t = table(border=1, style="table-layout: fixed;") # Insert a table + self.doc.add(self.t) + with self.t: + with tr(): + for im, txt, link in zip(ims, txts, links): + with td(style="word-wrap: break-word;", halign="center", valign="top"): + with p(): + with a(href=os.path.join('images', link)): + img(style="width:%dpx" % width, src=os.path.join('images', im)) + br() + p(txt) + + def save(self): + """save the current content to the HMTL file""" + html_file = '%s/index.html' % self.web_dir + f = open(html_file, 'wt') + f.write(self.doc.render()) + f.close() + + +if __name__ == '__main__': # we show an example usage here. + html = HTML('web/', 'test_html') + html.add_header('hello world') + + ims, txts, links = [], [], [] + for n in range(4): + ims.append('image_%d.png' % n) + txts.append('text_%d' % n) + links.append('image_%d.png' % n) + html.add_images(ims, txts, links) + html.save() diff --git a/util/image_pool.py b/util/image_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..6d086f882bc3d1b90c529fce6cddaaa75f2005d7 --- /dev/null +++ b/util/image_pool.py @@ -0,0 +1,54 @@ +import random +import torch + + +class ImagePool(): + """This class implements an image buffer that stores previously generated images. + + This buffer enables us to update discriminators using a history of generated images + rather than the ones produced by the latest generators. + """ + + def __init__(self, pool_size): + """Initialize the ImagePool class + + Parameters: + pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created + """ + self.pool_size = pool_size + if self.pool_size > 0: # create an empty pool + self.num_imgs = 0 + self.images = [] + + def query(self, images): + """Return an image from the pool. + + Parameters: + images: the latest generated images from the generator + + Returns images from the buffer. + + By 50/100, the buffer will return input images. + By 50/100, the buffer will return images previously stored in the buffer, + and insert the current images to the buffer. + """ + if self.pool_size == 0: # if the buffer size is 0, do nothing + return images + return_images = [] + for image in images: + image = torch.unsqueeze(image.data, 0) + if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer + self.num_imgs = self.num_imgs + 1 + self.images.append(image) + return_images.append(image) + else: + p = random.uniform(0, 1) + if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer + random_id = random.randint(0, self.pool_size - 1) # randint is inclusive + tmp = self.images[random_id].clone() + self.images[random_id] = image + return_images.append(tmp) + else: # by another 50% chance, the buffer will return the current image + return_images.append(image) + return_images = torch.cat(return_images, 0) # collect all the images and return + return return_images diff --git a/util/util.py b/util/util.py new file mode 100644 index 0000000000000000000000000000000000000000..5702d37d22b41760d62f219847afebd1ad2a9d80 --- /dev/null +++ b/util/util.py @@ -0,0 +1,166 @@ +"""This module contains simple helper functions """ +from __future__ import print_function +import torch +import numpy as np +from PIL import Image +import os +import importlib +import argparse +from argparse import Namespace +import torchvision + + +def str2bool(v): + if isinstance(v, bool): + return v + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') + + +def copyconf(default_opt, **kwargs): + conf = Namespace(**vars(default_opt)) + for key in kwargs: + setattr(conf, key, kwargs[key]) + return conf + + +def find_class_in_module(target_cls_name, module): + target_cls_name = target_cls_name.replace('_', '').lower() + clslib = importlib.import_module(module) + cls = None + for name, clsobj in clslib.__dict__.items(): + if name.lower() == target_cls_name: + cls = clsobj + + assert cls is not None, "In %s, there should be a class whose name matches %s in lowercase without underscore(_)" % (module, target_cls_name) + + return cls + + +def tensor2im(input_image, imtype=np.uint8): + """"Converts a Tensor array into a numpy image array. + + Parameters: + input_image (tensor) -- the input image tensor array + imtype (type) -- the desired type of the converted numpy array + """ + if not isinstance(input_image, np.ndarray): + if isinstance(input_image, torch.Tensor): # get the data from a variable + image_tensor = input_image.data + else: + return input_image + image_numpy = image_tensor[0].clamp(-1.0, 1.0).cpu().float().numpy() # convert it into a numpy array + if image_numpy.shape[0] == 1: # grayscale to RGB + image_numpy = np.tile(image_numpy, (3, 1, 1)) + image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling + else: # if it is a numpy array, do nothing + image_numpy = input_image + return image_numpy.astype(imtype) + + +def diagnose_network(net, name='network'): + """Calculate and print the mean of average absolute(gradients) + + Parameters: + net (torch network) -- Torch network + name (str) -- the name of the network + """ + mean = 0.0 + count = 0 + for param in net.parameters(): + if param.grad is not None: + mean += torch.mean(torch.abs(param.grad.data)) + count += 1 + if count > 0: + mean = mean / count + print(name) + print(mean) + + +def save_image(image_numpy, image_path, aspect_ratio=1.0): + """Save a numpy image to the disk + + Parameters: + image_numpy (numpy array) -- input numpy array + image_path (str) -- the path of the image + """ + + image_pil = Image.fromarray(image_numpy) + h, w, _ = image_numpy.shape + + if aspect_ratio is None: + pass + elif aspect_ratio > 1.0: + image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC) + elif aspect_ratio < 1.0: + image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC) + image_pil.save(image_path) + + +def print_numpy(x, val=True, shp=False): + """Print the mean, min, max, median, std, and size of a numpy array + + Parameters: + val (bool) -- if print the values of the numpy array + shp (bool) -- if print the shape of the numpy array + """ + x = x.astype(np.float64) + if shp: + print('shape,', x.shape) + if val: + x = x.flatten() + print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( + np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) + + +def mkdirs(paths): + """create empty directories if they don't exist + + Parameters: + paths (str list) -- a list of directory paths + """ + if isinstance(paths, list) and not isinstance(paths, str): + for path in paths: + mkdir(path) + else: + mkdir(paths) + + +def mkdir(path): + """create a single empty directory if it didn't exist + + Parameters: + path (str) -- a single directory path + """ + if not os.path.exists(path): + os.makedirs(path) + + +def correct_resize_label(t, size): + device = t.device + t = t.detach().cpu() + resized = [] + for i in range(t.size(0)): + one_t = t[i, :1] + one_np = np.transpose(one_t.numpy().astype(np.uint8), (1, 2, 0)) + one_np = one_np[:, :, 0] + one_image = Image.fromarray(one_np).resize(size, Image.NEAREST) + resized_t = torch.from_numpy(np.array(one_image)).long() + resized.append(resized_t) + return torch.stack(resized, dim=0).to(device) + + +def correct_resize(t, size, mode=Image.BICUBIC): + device = t.device + t = t.detach().cpu() + resized = [] + for i in range(t.size(0)): + one_t = t[i:i + 1] + one_image = Image.fromarray(tensor2im(one_t)).resize(size, Image.BICUBIC) + resized_t = torchvision.transforms.functional.to_tensor(one_image) * 2 - 1.0 + resized.append(resized_t) + return torch.stack(resized, dim=0).to(device) diff --git a/util/visualizer.py b/util/visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..c17f2c68ff54c99b07214bdee9dd1fdf08ae34dc --- /dev/null +++ b/util/visualizer.py @@ -0,0 +1,242 @@ +import numpy as np +import os +import sys +import ntpath +import time +from . import util, html +from subprocess import Popen, PIPE + +if sys.version_info[0] == 2: + VisdomExceptionBase = Exception +else: + VisdomExceptionBase = ConnectionError + + +def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256): + """Save images to the disk. + + Parameters: + webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details) + visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs + image_path (str) -- the string is used to create image paths + aspect_ratio (float) -- the aspect ratio of saved images + width (int) -- the images will be resized to width x width + + This function will save images stored in 'visuals' to the HTML file specified by 'webpage'. + """ + image_dir = webpage.get_image_dir() + short_path = ntpath.basename(image_path[0]) + name = os.path.splitext(short_path)[0] + + webpage.add_header(name) + ims, txts, links = [], [], [] + + for label, im_data in visuals.items(): + im = util.tensor2im(im_data) + image_name = '%s/%s.png' % (label, name) + os.makedirs(os.path.join(image_dir, label), exist_ok=True) + save_path = os.path.join(image_dir, image_name) + util.save_image(im, save_path, aspect_ratio=aspect_ratio) + ims.append(image_name) + txts.append(label) + links.append(image_name) + webpage.add_images(ims, txts, links, width=width) + + +class Visualizer(): + """This class includes several functions that can display/save images and print/save logging information. + + It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images. + """ + + def __init__(self, opt): + """Initialize the Visualizer class + + Parameters: + opt -- stores all the experiment flags; needs to be a subclass of BaseOptions + Step 1: Cache the training/test options + Step 2: connect to a visdom server + Step 3: create an HTML object for saveing HTML filters + Step 4: create a logging file to store training losses + """ + self.opt = opt # cache the option + if opt.display_id is None: + self.display_id = np.random.randint(100000) * 10 # just a random display id + else: + self.display_id = opt.display_id + self.use_html = opt.isTrain and not opt.no_html + self.win_size = opt.display_winsize + self.name = opt.name + self.port = opt.display_port + self.saved = False + if self.display_id > 0: # connect to a visdom server given and + import visdom + self.plot_data = {} + self.ncols = opt.display_ncols + if "tensorboard_base_url" not in os.environ: + self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env) + else: + self.vis = visdom.Visdom(port=2004, + base_url=os.environ['tensorboard_base_url'] + '/visdom') + if not self.vis.check_connection(): + self.create_visdom_connections() + + if self.use_html: # create an HTML object at /web/; images will be saved under /web/images/ + self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') + self.img_dir = os.path.join(self.web_dir, 'images') + print('create web directory %s...' % self.web_dir) + util.mkdirs([self.web_dir, self.img_dir]) + # create a logging file to store training losses + self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') + with open(self.log_name, "a") as log_file: + now = time.strftime("%c") + log_file.write('================ Training Loss (%s) ================\n' % now) + + def reset(self): + """Reset the self.saved status""" + self.saved = False + + def create_visdom_connections(self): + """If the program could not connect to Visdom server, this function will start a new server at port < self.port > """ + cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port + print('\n\nCould not connect to Visdom server. \n Trying to start a server....') + print('Command: %s' % cmd) + Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) + + def display_current_results(self, visuals, epoch, save_result): + """Display current results on visdom; save current results to an HTML file. + + Parameters: + visuals (OrderedDict) - - dictionary of images to display or save + epoch (int) - - the current epoch + save_result (bool) - - if save the current results to an HTML file + """ + if self.display_id > 0: # show images in the browser using visdom + ncols = self.ncols + if ncols > 0: # show all the images in one visdom panel + ncols = min(ncols, len(visuals)) + h, w = next(iter(visuals.values())).shape[:2] + table_css = """""" % (w, h) # create a table css + # create a table of images. + title = self.name + label_html = '' + label_html_row = '' + images = [] + idx = 0 + for label, image in visuals.items(): + image_numpy = util.tensor2im(image) + label_html_row += '%s' % label + images.append(image_numpy.transpose([2, 0, 1])) + idx += 1 + if idx % ncols == 0: + label_html += '%s' % label_html_row + label_html_row = '' + white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255 + while idx % ncols != 0: + images.append(white_image) + label_html_row += '' + idx += 1 + if label_html_row != '': + label_html += '%s' % label_html_row + try: + self.vis.images(images, ncols, 2, self.display_id + 1, + None, dict(title=title + ' images')) + label_html = '%s
' % label_html + self.vis.text(table_css + label_html, win=self.display_id + 2, + opts=dict(title=title + ' labels')) + except VisdomExceptionBase: + self.create_visdom_connections() + + else: # show each image in a separate visdom panel; + idx = 1 + try: + for label, image in visuals.items(): + image_numpy = util.tensor2im(image) + self.vis.image( + image_numpy.transpose([2, 0, 1]), + self.display_id + idx, + None, + dict(title=label) + ) + idx += 1 + except VisdomExceptionBase: + self.create_visdom_connections() + + if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved. + self.saved = True + # save images to the disk + for label, image in visuals.items(): + image_numpy = util.tensor2im(image) + img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) + util.save_image(image_numpy, img_path) + + # update website + webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=0) + for n in range(epoch, 0, -1): + webpage.add_header('epoch [%d]' % n) + ims, txts, links = [], [], [] + + for label, image_numpy in visuals.items(): + image_numpy = util.tensor2im(image) + img_path = 'epoch%.3d_%s.png' % (n, label) + ims.append(img_path) + txts.append(label) + links.append(img_path) + webpage.add_images(ims, txts, links, width=self.win_size) + webpage.save() + + def plot_current_losses(self, epoch, counter_ratio, losses): + """display the current losses on visdom display: dictionary of error labels and values + + Parameters: + epoch (int) -- current epoch + counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1 + losses (OrderedDict) -- training losses stored in the format of (name, float) pairs + """ + if len(losses) == 0: + return + + plot_name = '_'.join(list(losses.keys())) + + if plot_name not in self.plot_data: + self.plot_data[plot_name] = {'X': [], 'Y': [], 'legend': list(losses.keys())} + + plot_data = self.plot_data[plot_name] + plot_id = list(self.plot_data.keys()).index(plot_name) + + plot_data['X'].append(epoch + counter_ratio) + plot_data['Y'].append([losses[k] for k in plot_data['legend']]) + try: + self.vis.line( + X=np.stack([np.array(plot_data['X'])] * len(plot_data['legend']), 1), + Y=np.array(plot_data['Y']), + opts={ + 'title': self.name, + 'legend': plot_data['legend'], + 'xlabel': 'epoch', + 'ylabel': 'loss'}, + win=self.display_id - plot_id) + except VisdomExceptionBase: + self.create_visdom_connections() + + # losses: same format as |losses| of plot_current_losses + def print_current_losses(self, epoch, iters, losses, t_comp, t_data): + """print current losses on console; also save the losses to the disk + + Parameters: + epoch (int) -- current epoch + iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) + losses (OrderedDict) -- training losses stored in the format of (name, float) pairs + t_comp (float) -- computational time per data point (normalized by batch_size) + t_data (float) -- data loading time per data point (normalized by batch_size) + """ + message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data) + for k, v in losses.items(): + message += '%s: %.3f ' % (k, v) + + print(message) # print the message + with open(self.log_name, "a") as log_file: + log_file.write('%s\n' % message) # save the message