hyliu commited on
Commit
2541ce6
1 Parent(s): 8cb1339

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +57 -0
  2. DN_RGB/.ipynb_checkpoints/Untitled-checkpoint.ipynb +6 -0
  3. DN_RGB/.ipynb_checkpoints/Untitled1-checkpoint.ipynb +59 -0
  4. DN_RGB/README.md +102 -0
  5. DN_RGB/Untitled.ipynb +6 -0
  6. DN_RGB/Untitled1.ipynb +59 -0
  7. DN_RGB/code/.ipynb_checkpoints/train-1rec-raft-s-gpu1-checkpoint.sh +18 -0
  8. DN_RGB/code/.ipynb_checkpoints/train-1rec-raft-s-gpu1-nobn-checkpoint.sh +18 -0
  9. DN_RGB/code/.ipynb_checkpoints/train-1rec-raft-s-gpu1-res-checkpoint.sh +24 -0
  10. DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-checkpoint.sh +18 -0
  11. DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-in-checkpoint.sh +18 -0
  12. DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-large-checkpoint.sh +18 -0
  13. DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-ln-checkpoint.sh +17 -0
  14. DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-ln-gclip-checkpoint.sh +18 -0
  15. DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-ln-large-checkpoint.sh +17 -0
  16. DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-ln-small-checkpoint.sh +17 -0
  17. DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-nobn-checkpoint.sh +18 -0
  18. DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-nobn-nores-checkpoint.sh +18 -0
  19. DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-nobn-res10-checkpoint.sh +18 -0
  20. DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-small-checkpoint.sh +17 -0
  21. DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu2-nobn-checkpoint.sh +18 -0
  22. DN_RGB/code/.ipynb_checkpoints/train-checkpoint.sh +4 -0
  23. DN_RGB/code/.ipynb_checkpoints/trainer-checkpoint.py +182 -0
  24. DN_RGB/code/LICENSE +21 -0
  25. DN_RGB/code/__init__.py +0 -0
  26. DN_RGB/code/__pycache__/__init__.cpython-37.pyc +0 -0
  27. DN_RGB/code/__pycache__/option.cpython-36.pyc +0 -0
  28. DN_RGB/code/__pycache__/option.cpython-37.pyc +0 -0
  29. DN_RGB/code/__pycache__/template.cpython-36.pyc +0 -0
  30. DN_RGB/code/__pycache__/template.cpython-37.pyc +0 -0
  31. DN_RGB/code/__pycache__/trainer.cpython-36.pyc +0 -0
  32. DN_RGB/code/__pycache__/trainer.cpython-37.pyc +0 -0
  33. DN_RGB/code/__pycache__/utility.cpython-36.pyc +0 -0
  34. DN_RGB/code/__pycache__/utility.cpython-37.pyc +0 -0
  35. DN_RGB/code/data/__init__.py +52 -0
  36. DN_RGB/code/data/__pycache__/__init__.cpython-36.pyc +0 -0
  37. DN_RGB/code/data/__pycache__/__init__.cpython-37.pyc +0 -0
  38. DN_RGB/code/data/__pycache__/benchmark.cpython-36.pyc +0 -0
  39. DN_RGB/code/data/__pycache__/benchmark.cpython-37.pyc +0 -0
  40. DN_RGB/code/data/__pycache__/common.cpython-36.pyc +0 -0
  41. DN_RGB/code/data/__pycache__/common.cpython-37.pyc +0 -0
  42. DN_RGB/code/data/__pycache__/div2k.cpython-36.pyc +0 -0
  43. DN_RGB/code/data/__pycache__/div2k.cpython-37.pyc +0 -0
  44. DN_RGB/code/data/__pycache__/srdata.cpython-36.pyc +0 -0
  45. DN_RGB/code/data/__pycache__/srdata.cpython-37.pyc +0 -0
  46. DN_RGB/code/data/benchmark.py +25 -0
  47. DN_RGB/code/data/common.py +72 -0
  48. DN_RGB/code/data/demo.py +39 -0
  49. DN_RGB/code/data/div2k.py +32 -0
  50. DN_RGB/code/data/div2kjpeg.py +20 -0
.gitattributes CHANGED
@@ -191,3 +191,60 @@ Demosaic/experiment/test/results-Urban100/img097_x1_DM.png filter=lfs diff=lfs m
191
  Demosaic/experiment/test/results-Urban100/img098_x1_DM.png filter=lfs diff=lfs merge=lfs -text
192
  Demosaic/experiment/test/results-Urban100/img099_x1_DM.png filter=lfs diff=lfs merge=lfs -text
193
  Demosaic/experiment/test/results-Urban100/img100_x1_DM.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
  Demosaic/experiment/test/results-Urban100/img098_x1_DM.png filter=lfs diff=lfs merge=lfs -text
192
  Demosaic/experiment/test/results-Urban100/img099_x1_DM.png filter=lfs diff=lfs merge=lfs -text
193
  Demosaic/experiment/test/results-Urban100/img100_x1_DM.png filter=lfs diff=lfs merge=lfs -text
194
+ DN_RGB/experiment/test/results-Urban100/img002_x50_DN.png filter=lfs diff=lfs merge=lfs -text
195
+ DN_RGB/experiment/test/results-Urban100/img003_x50_DN.png filter=lfs diff=lfs merge=lfs -text
196
+ DN_RGB/experiment/test/results-Urban100/img004_x50_DN.png filter=lfs diff=lfs merge=lfs -text
197
+ DN_RGB/experiment/test/results-Urban100/img006_x50_DN.png filter=lfs diff=lfs merge=lfs -text
198
+ DN_RGB/experiment/test/results-Urban100/img008_x50_DN.png filter=lfs diff=lfs merge=lfs -text
199
+ DN_RGB/experiment/test/results-Urban100/img012_x50_DN.png filter=lfs diff=lfs merge=lfs -text
200
+ DN_RGB/experiment/test/results-Urban100/img013_x50_DN.png filter=lfs diff=lfs merge=lfs -text
201
+ DN_RGB/experiment/test/results-Urban100/img014_x50_DN.png filter=lfs diff=lfs merge=lfs -text
202
+ DN_RGB/experiment/test/results-Urban100/img015_x50_DN.png filter=lfs diff=lfs merge=lfs -text
203
+ DN_RGB/experiment/test/results-Urban100/img017_x50_DN.png filter=lfs diff=lfs merge=lfs -text
204
+ DN_RGB/experiment/test/results-Urban100/img018_x50_DN.png filter=lfs diff=lfs merge=lfs -text
205
+ DN_RGB/experiment/test/results-Urban100/img019_x50_DN.png filter=lfs diff=lfs merge=lfs -text
206
+ DN_RGB/experiment/test/results-Urban100/img020_x50_DN.png filter=lfs diff=lfs merge=lfs -text
207
+ DN_RGB/experiment/test/results-Urban100/img023_x50_DN.png filter=lfs diff=lfs merge=lfs -text
208
+ DN_RGB/experiment/test/results-Urban100/img024_x50_DN.png filter=lfs diff=lfs merge=lfs -text
209
+ DN_RGB/experiment/test/results-Urban100/img030_x50_DN.png filter=lfs diff=lfs merge=lfs -text
210
+ DN_RGB/experiment/test/results-Urban100/img031_x50_DN.png filter=lfs diff=lfs merge=lfs -text
211
+ DN_RGB/experiment/test/results-Urban100/img034_x50_DN.png filter=lfs diff=lfs merge=lfs -text
212
+ DN_RGB/experiment/test/results-Urban100/img037_x50_DN.png filter=lfs diff=lfs merge=lfs -text
213
+ DN_RGB/experiment/test/results-Urban100/img038_x50_DN.png filter=lfs diff=lfs merge=lfs -text
214
+ DN_RGB/experiment/test/results-Urban100/img039_x50_DN.png filter=lfs diff=lfs merge=lfs -text
215
+ DN_RGB/experiment/test/results-Urban100/img041_x50_DN.png filter=lfs diff=lfs merge=lfs -text
216
+ DN_RGB/experiment/test/results-Urban100/img045_x50_DN.png filter=lfs diff=lfs merge=lfs -text
217
+ DN_RGB/experiment/test/results-Urban100/img047_x50_DN.png filter=lfs diff=lfs merge=lfs -text
218
+ DN_RGB/experiment/test/results-Urban100/img048_x50_DN.png filter=lfs diff=lfs merge=lfs -text
219
+ DN_RGB/experiment/test/results-Urban100/img049_x50_DN.png filter=lfs diff=lfs merge=lfs -text
220
+ DN_RGB/experiment/test/results-Urban100/img053_x50_DN.png filter=lfs diff=lfs merge=lfs -text
221
+ DN_RGB/experiment/test/results-Urban100/img054_x50_DN.png filter=lfs diff=lfs merge=lfs -text
222
+ DN_RGB/experiment/test/results-Urban100/img056_x50_DN.png filter=lfs diff=lfs merge=lfs -text
223
+ DN_RGB/experiment/test/results-Urban100/img058_x50_DN.png filter=lfs diff=lfs merge=lfs -text
224
+ DN_RGB/experiment/test/results-Urban100/img060_x50_DN.png filter=lfs diff=lfs merge=lfs -text
225
+ DN_RGB/experiment/test/results-Urban100/img061_x50_DN.png filter=lfs diff=lfs merge=lfs -text
226
+ DN_RGB/experiment/test/results-Urban100/img062_x50_DN.png filter=lfs diff=lfs merge=lfs -text
227
+ DN_RGB/experiment/test/results-Urban100/img063_x50_DN.png filter=lfs diff=lfs merge=lfs -text
228
+ DN_RGB/experiment/test/results-Urban100/img066_x50_DN.png filter=lfs diff=lfs merge=lfs -text
229
+ DN_RGB/experiment/test/results-Urban100/img068_x50_DN.png filter=lfs diff=lfs merge=lfs -text
230
+ DN_RGB/experiment/test/results-Urban100/img069_x50_DN.png filter=lfs diff=lfs merge=lfs -text
231
+ DN_RGB/experiment/test/results-Urban100/img070_x50_DN.png filter=lfs diff=lfs merge=lfs -text
232
+ DN_RGB/experiment/test/results-Urban100/img072_x50_DN.png filter=lfs diff=lfs merge=lfs -text
233
+ DN_RGB/experiment/test/results-Urban100/img073_x50_DN.png filter=lfs diff=lfs merge=lfs -text
234
+ DN_RGB/experiment/test/results-Urban100/img074_x50_DN.png filter=lfs diff=lfs merge=lfs -text
235
+ DN_RGB/experiment/test/results-Urban100/img076_x50_DN.png filter=lfs diff=lfs merge=lfs -text
236
+ DN_RGB/experiment/test/results-Urban100/img077_x50_DN.png filter=lfs diff=lfs merge=lfs -text
237
+ DN_RGB/experiment/test/results-Urban100/img079_x50_DN.png filter=lfs diff=lfs merge=lfs -text
238
+ DN_RGB/experiment/test/results-Urban100/img081_x50_DN.png filter=lfs diff=lfs merge=lfs -text
239
+ DN_RGB/experiment/test/results-Urban100/img082_x50_DN.png filter=lfs diff=lfs merge=lfs -text
240
+ DN_RGB/experiment/test/results-Urban100/img083_x50_DN.png filter=lfs diff=lfs merge=lfs -text
241
+ DN_RGB/experiment/test/results-Urban100/img084_x50_DN.png filter=lfs diff=lfs merge=lfs -text
242
+ DN_RGB/experiment/test/results-Urban100/img087_x50_DN.png filter=lfs diff=lfs merge=lfs -text
243
+ DN_RGB/experiment/test/results-Urban100/img088_x50_DN.png filter=lfs diff=lfs merge=lfs -text
244
+ DN_RGB/experiment/test/results-Urban100/img089_x50_DN.png filter=lfs diff=lfs merge=lfs -text
245
+ DN_RGB/experiment/test/results-Urban100/img091_x50_DN.png filter=lfs diff=lfs merge=lfs -text
246
+ DN_RGB/experiment/test/results-Urban100/img092_x50_DN.png filter=lfs diff=lfs merge=lfs -text
247
+ DN_RGB/experiment/test/results-Urban100/img095_x50_DN.png filter=lfs diff=lfs merge=lfs -text
248
+ DN_RGB/experiment/test/results-Urban100/img098_x50_DN.png filter=lfs diff=lfs merge=lfs -text
249
+ DN_RGB/experiment/test/results-Urban100/img099_x50_DN.png filter=lfs diff=lfs merge=lfs -text
250
+ DN_RGB/experiment/test/results-Urban100/img100_x50_DN.png filter=lfs diff=lfs merge=lfs -text
DN_RGB/.ipynb_checkpoints/Untitled-checkpoint.ipynb ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [],
3
+ "metadata": {},
4
+ "nbformat": 4,
5
+ "nbformat_minor": 4
6
+ }
DN_RGB/.ipynb_checkpoints/Untitled1-checkpoint.ipynb ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stdout",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "0\n",
13
+ "1\n",
14
+ "2\n",
15
+ "3\n",
16
+ "4\n",
17
+ "5\n",
18
+ "6\n",
19
+ "7\n"
20
+ ]
21
+ }
22
+ ],
23
+ "source": [
24
+ "from time import sleep\n",
25
+ "for i in range(500):\n",
26
+ " print(i)\n",
27
+ " sleep(1)"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "execution_count": null,
33
+ "metadata": {},
34
+ "outputs": [],
35
+ "source": []
36
+ }
37
+ ],
38
+ "metadata": {
39
+ "kernelspec": {
40
+ "display_name": "Python 3",
41
+ "language": "python",
42
+ "name": "python3"
43
+ },
44
+ "language_info": {
45
+ "codemirror_mode": {
46
+ "name": "ipython",
47
+ "version": 3
48
+ },
49
+ "file_extension": ".py",
50
+ "mimetype": "text/x-python",
51
+ "name": "python",
52
+ "nbconvert_exporter": "python",
53
+ "pygments_lexer": "ipython3",
54
+ "version": "3.7.9"
55
+ }
56
+ },
57
+ "nbformat": 4,
58
+ "nbformat_minor": 4
59
+ }
DN_RGB/README.md ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pyramid Attention for Image Restoration
2
+ This repository is for PANet and PA-EDSR introduced in the following paper
3
+
4
+ [Yiqun Mei](http://yiqunm2.web.illinois.edu/), [Yuchen Fan](https://scholar.google.com/citations?user=BlfdYL0AAAAJ&hl=en), [Yulun Zhang](http://yulunzhang.com/), [Jiahui Yu](https://jiahuiyu.com/), [Yuqian Zhou](https://yzhouas.github.io/), [Ding Liu](https://scholar.google.com/citations?user=PGtHUI0AAAAJ&hl=en), [Yun Fu](http://www1.ece.neu.edu/~yunfu/), [Thomas S. Huang](http://ifp-uiuc.github.io/) and [Honghui Shi](https://www.humphreyshi.com/) "Pyramid Attention for Image Restoration", [[Arxiv]](https://arxiv.org/abs/2004.13824)
5
+
6
+ The code is built on [EDSR (PyTorch)](https://github.com/thstkdgus35/EDSR-PyTorch) & [RNAN](https://github.com/yulunzhang/RNAN) and tested on Ubuntu 18.04 environment (Python3.6, PyTorch_1.1) with Titan X/1080Ti/V100 GPUs.
7
+
8
+ ## Contents
9
+ 1. [Train](#train)
10
+ 2. [Test](#test)
11
+ 3. [Results](#results)
12
+ 4. [Citation](#citation)
13
+ 5. [Acknowledgements](#acknowledgements)
14
+
15
+ ## Train
16
+ ### Prepare training data
17
+
18
+ 1. Download DIV2K training data (800 training + 100 validtion images) from [DIV2K dataset](https://data.vision.ee.ethz.ch/cvl/DIV2K/) or [SNU_CVLab](https://cv.snu.ac.kr/research/EDSR/DIV2K.tar).
19
+
20
+ 2. Specify '--dir_data' in optional.py based on the HR and LR images path.
21
+
22
+ 3. Organize training data like:
23
+ ```bash
24
+ DIV2K/
25
+ ├── DIV2K_train_HR
26
+ ├── DIV2K_train_LR_bicubic
27
+ │ └── X10
28
+ │ └── X30
29
+ │ └── X50
30
+ │ └── X70
31
+ ├── DIV2K_valid_HR
32
+ └── DIV2K_valid_LR_bicubic
33
+ └── X10
34
+ └── X30
35
+ └── X50
36
+ └── X70
37
+ ```
38
+
39
+ For more informaiton, please refer to [EDSR(PyTorch)](https://github.com/thstkdgus35/EDSR-PyTorch).
40
+
41
+ ### Begin to train
42
+
43
+ 1. (optional) All the pretrained models and visual results can be downloaded from [Google Drive](https://drive.google.com/open?id=1q9iUzqYX0fVRzDu4J6fvSPRosgOZoJJE).
44
+
45
+ 2. Cd to 'PANet-PyTorch/[Task]/code', run the following scripts to train models.
46
+
47
+ **You can use scripts in file 'demo.sb' to train and test models for our paper.**
48
+
49
+ ```bash
50
+ # Example Usage: N=10
51
+ python main.py --n_GPUs 1 --lr 1e-4 --batch_size 16 --n_resblocks 80 --save_models --epoch 1000 --decay 200-400-600-800 --model PANET --scale 50 --patch_size 48 --reset --save PANET_N50 --n_feats 64 --data_train DIV2K --chop
52
+ ```
53
+ ## Test
54
+ ### Quick start
55
+
56
+ 1. Cd to 'PANet-PyTorch/[Task]/code', run the following scripts.
57
+
58
+ **You can use scripts in file 'demo.sb' to produce results for our paper.**
59
+
60
+ ```bash
61
+ # No self-ensemble, use different testsets to reproduce the results in the paper.
62
+ # Example Usage:
63
+ python main.py --model PANET --n_resblocks 80 --n_feats 64 --data_test Urban100 --scale 10 --save_results --chop --test_only --pre_train ../path_to_model
64
+ ```
65
+
66
+ ### The whole test pipeline
67
+ 1. Prepare test data. Organize training data like:
68
+ ```bash
69
+ benchmark/
70
+ ├── testset1
71
+ │ └── HR
72
+ │ └── LR_bicubic
73
+ │ └── X10
74
+ │ └── ..
75
+ ├── testset2
76
+ ```
77
+ 2. Conduct image denoise.
78
+
79
+ See **Quick start**
80
+ 3. Evaluate the results.
81
+
82
+ Run 'Evaluate_PSNR_SSIM.m' to obtain PSNR/SSIM values for paper.
83
+
84
+ ## Citation
85
+ If you find the code helpful in your resarch or work, please cite the following papers.
86
+ ```
87
+ @article{mei2020pyramid,
88
+ title={Pyramid Attention Networks for Image Restoration},
89
+ author={Mei, Yiqun and Fan, Yuchen and Zhang, Yulun and Yu, Jiahui and Zhou, Yuqian and Liu, Ding and Fu, Yun and Huang, Thomas S and Shi, Honghui},
90
+ journal={arXiv preprint arXiv:2004.13824},
91
+ year={2020}
92
+ }
93
+ @InProceedings{Lim_2017_CVPR_Workshops,
94
+ author = {Lim, Bee and Son, Sanghyun and Kim, Heewon and Nah, Seungjun and Lee, Kyoung Mu},
95
+ title = {Enhanced Deep Residual Networks for Single Image Super-Resolution},
96
+ booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops},
97
+ month = {July},
98
+ year = {2017}
99
+ }
100
+ ```
101
+ ## Acknowledgements
102
+ This code is built on [EDSR (PyTorch)](https://github.com/thstkdgus35/EDSR-PyTorch), [RNAN](https://github.com/yulunzhang/RNAN) and [generative-inpainting-pytorch](https://github.com/daa233/generative-inpainting-pytorch). We thank the authors for sharing their codes.
DN_RGB/Untitled.ipynb ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [],
3
+ "metadata": {},
4
+ "nbformat": 4,
5
+ "nbformat_minor": 4
6
+ }
DN_RGB/Untitled1.ipynb ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stdout",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "0\n",
13
+ "1\n",
14
+ "2\n",
15
+ "3\n",
16
+ "4\n",
17
+ "5\n",
18
+ "6\n",
19
+ "7\n"
20
+ ]
21
+ }
22
+ ],
23
+ "source": [
24
+ "from time import sleep\n",
25
+ "for i in range(500):\n",
26
+ " print(i)\n",
27
+ " sleep(1)"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "execution_count": null,
33
+ "metadata": {},
34
+ "outputs": [],
35
+ "source": []
36
+ }
37
+ ],
38
+ "metadata": {
39
+ "kernelspec": {
40
+ "display_name": "Python 3",
41
+ "language": "python",
42
+ "name": "python3"
43
+ },
44
+ "language_info": {
45
+ "codemirror_mode": {
46
+ "name": "ipython",
47
+ "version": 3
48
+ },
49
+ "file_extension": ".py",
50
+ "mimetype": "text/x-python",
51
+ "name": "python",
52
+ "nbconvert_exporter": "python",
53
+ "pygments_lexer": "ipython3",
54
+ "version": "3.7.9"
55
+ }
56
+ },
57
+ "nbformat": 4,
58
+ "nbformat_minor": 4
59
+ }
DN_RGB/code/.ipynb_checkpoints/train-1rec-raft-s-gpu1-checkpoint.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ bash ./prepare.sh
4
+ lst=0
5
+ for i in {0..3};
6
+ do
7
+ if nvidia-smi -i $i | grep -q "python"; then
8
+ :
9
+ else
10
+ lst=$i
11
+ break
12
+ fi
13
+ done
14
+
15
+ CUDA_VISIBLE_DEVICES=$lst python main.py --n_GPUs 1 --lr 1e-4 --batch_size 16 --n_resblocks 20 --save_models \
16
+ --epoch 1000 --decay 200-400-600-800 --model RAFTNETSINGLE --scale 50 --patch_size 48 \
17
+ --save RAFTSINGLE_N50_R1 --n_feats 64 --data_train DIV2K --recurrence 1 \
18
+ --load RAFTSINGLE_N50_R1 --resume -1
DN_RGB/code/.ipynb_checkpoints/train-1rec-raft-s-gpu1-nobn-checkpoint.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ bash ./prepare.sh
4
+ lst=0
5
+ for i in {0..3};
6
+ do
7
+ if nvidia-smi -i $i | grep -q "python"; then
8
+ :
9
+ else
10
+ lst=$i
11
+ break
12
+ fi
13
+ done
14
+
15
+ CUDA_VISIBLE_DEVICES=$lst python main.py --n_GPUs 1 --lr 1e-4 --batch_size 16 --n_resblocks 20 --save_models \
16
+ --epoch 1000 --decay 200-400-600-800 --model RAFTNETSINGLE --scale 50 --patch_size 48 \
17
+ --save RAFTSINGLE_N50_R1_nobn --n_feats 64 --data_train DIV2K --recurrence 1 --normalization none \
18
+ --load RAFTSINGLE_N50_R1_nobn --resume -1
DN_RGB/code/.ipynb_checkpoints/train-1rec-raft-s-gpu1-res-checkpoint.sh ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ bash ./prepare.sh
4
+ lst=0
5
+ for i in {0..3};
6
+ do
7
+ if nvidia-smi -i $i | grep -q "python"; then
8
+ :
9
+ else
10
+ lst=$i
11
+ break
12
+ fi
13
+ done
14
+
15
+ # CUDA_VISIBLE_DEVICES=$lst python main.py --n_GPUs 1 --lr 1e-4 --decay 200-400-600-800 --epoch 1000 --batch_size 16 --n_resblocks 10 --save_models \
16
+ # --model RAFTNETS --scale 1 --patch_size 48 --save RAFTS_DEMOSAIC20_R4 --n_feats 64 --data_train DIV2K --recurrence 4 --data_range "1-800/901-942"
17
+
18
+ # python main.py --model LAMBDANET --n_resblocks 20 --recurrence 1 --save_results --n_GPUs 1 --chop --data_test McM+Kodak24+CBSD68+Urban100 --scale 1 \
19
+ # --pre_train ../experiment/LAMBDA_DEMOSAIC20_R1/model/model_best.pt --test_only
20
+
21
+ CUDA_VISIBLE_DEVICES=$lst python main.py --n_GPUs 1 --lr 1e-4 --batch_size 16 --n_resblocks 20 --save_models \
22
+ --epoch 1000 --decay 200-400-600-800 --model RESNET --scale 50 --patch_size 48 \
23
+ --save RAFTS_RES --n_feats 64 --data_train DIV2K --recurrence 1 \
24
+ --load RAFTS_RES --resume -1
DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-checkpoint.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ bash ./prepare.sh
4
+ lst=0
5
+ for i in {0..3};
6
+ do
7
+ if nvidia-smi -i $i | grep -q "python"; then
8
+ :
9
+ else
10
+ lst=$i
11
+ break
12
+ fi
13
+ done
14
+
15
+ CUDA_VISIBLE_DEVICES=$lst python main.py --n_GPUs 1 --lr 1e-4 --batch_size 16 --n_resblocks 20 --save_models \
16
+ --epoch 1000 --decay 200-400-600-800 --model RAFTNETS --scale 50 --patch_size 48 \
17
+ --save RAFTS_N50_R4_bn --n_feats 64 --data_train DIV2K --recurrence 4 \
18
+ --load RAFTS_N50_R4_bn --resume -1
DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-in-checkpoint.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ bash ./prepare.sh
4
+ lst=0
5
+ for i in {0..3};
6
+ do
7
+ if nvidia-smi -i $i | grep -q "python"; then
8
+ :
9
+ else
10
+ lst=$i
11
+ break
12
+ fi
13
+ done
14
+
15
+ CUDA_VISIBLE_DEVICES=$lst python main.py --n_GPUs 1 --lr 1e-4 --batch_size 16 --n_resblocks 20 --save_models \
16
+ --epoch 1000 --decay 200-400-600-800 --model RAFTNETS --scale 50 --patch_size 48 \
17
+ --save RAFTS_N50_R4_in --n_feats 64 --data_train DIV2K --recurrence 4 --normalization instance \
18
+ --resume -1 --load RAFTS_N50_R4_in
DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-large-checkpoint.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ bash ./prepare.sh
4
+ lst=0
5
+ for i in {0..3};
6
+ do
7
+ if nvidia-smi -i $i | grep -q "python"; then
8
+ :
9
+ else
10
+ lst=$i
11
+ break
12
+ fi
13
+ done
14
+
15
+ CUDA_VISIBLE_DEVICES=$lst python main.py --n_GPUs 1 --lr 1e-4 --batch_size 16 --n_resblocks 20 --save_models \
16
+ --epoch 1000 --decay 200-400-600-800 --model RAFTNETS --scale 50 --patch_size 96 \
17
+ --save RAFTS_N50_R4_bn_large --n_feats 64 --data_train DIV2K --recurrence 4 \
18
+ --load RAFTS_N50_R4_bn_large --resume -1
DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-ln-checkpoint.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ bash ./prepare.sh
4
+ lst=0
5
+ for i in {0..3};
6
+ do
7
+ if nvidia-smi -i $i | grep -q "python"; then
8
+ :
9
+ else
10
+ lst=$i
11
+ break
12
+ fi
13
+ done
14
+
15
+ CUDA_VISIBLE_DEVICES=$lst python main.py --n_GPUs 1 --lr 1e-4 --batch_size 16 --n_resblocks 20 --save_models \
16
+ --epoch 1000 --decay 200-400-600-800 --model RAFTNETS --scale 50 --patch_size 48 --reset \
17
+ --save RAFTS_N50_R4_ln --n_feats 64 --data_train DIV2K --recurrence 4 --normalization layer
DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-ln-gclip-checkpoint.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ bash ./prepare.sh
4
+ lst=0
5
+ for i in {0..3};
6
+ do
7
+ if nvidia-smi -i $i | grep -q "python"; then
8
+ :
9
+ else
10
+ lst=$i
11
+ break
12
+ fi
13
+ done
14
+
15
+ CUDA_VISIBLE_DEVICES=$lst python main.py --n_GPUs 1 --lr 1e-4 --batch_size 16 --n_resblocks 20 --save_models \
16
+ --epoch 1000 --decay 200-400-600-800 --model RAFTNETS --scale 50 --patch_size 48 \
17
+ --save RAFTS_N50_R4_ln_clip --n_feats 64 --data_train DIV2K --recurrence 4 --normalization layer --gclip 1.2 \
18
+ --load RAFTS_N50_R4_ln_clip --resume -1
DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-ln-large-checkpoint.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ bash ./prepare.sh
4
+ lst=0
5
+ for i in {0..3};
6
+ do
7
+ if nvidia-smi -i $i | grep -q "python"; then
8
+ :
9
+ else
10
+ lst=$i
11
+ break
12
+ fi
13
+ done
14
+
15
+ CUDA_VISIBLE_DEVICES=$lst python main.py --n_GPUs 1 --lr 1e-4 --batch_size 16 --n_resblocks 20 --save_models \
16
+ --epoch 1000 --decay 200-400-600-800 --model RAFTNETS --scale 50 --patch_size 96 --reset \
17
+ --save RAFTS_N50_R4_ln_large --n_feats 64 --data_train DIV2K --recurrence 4 --normalization layer
DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-ln-small-checkpoint.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ bash ./prepare.sh
4
+ lst=0
5
+ for i in {0..3};
6
+ do
7
+ if nvidia-smi -i $i | grep -q "python"; then
8
+ :
9
+ else
10
+ lst=$i
11
+ break
12
+ fi
13
+ done
14
+
15
+ CUDA_VISIBLE_DEVICES=$lst python main.py --n_GPUs 1 --lr 1e-4 --batch_size 64 --n_resblocks 20 --save_models \
16
+ --epoch 1000 --decay 200-400-600-800 --model RAFTNETS --scale 50 --patch_size 24 --reset \
17
+ --save RAFTS_N50_R4_ln_small --n_feats 64 --data_train DIV2K --recurrence 4 --normalization layer --gclip 1.2
DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-nobn-checkpoint.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ bash ./prepare.sh
4
+ lst=0
5
+ for i in {0..3};
6
+ do
7
+ if nvidia-smi -i $i | grep -q "python"; then
8
+ :
9
+ else
10
+ lst=$i
11
+ break
12
+ fi
13
+ done
14
+
15
+ CUDA_VISIBLE_DEVICES=$lst python main.py --n_GPUs 1 --lr 1e-4 --batch_size 16 --n_resblocks 20 --save_models \
16
+ --epoch 1000 --decay 200-400-600-800 --model RAFTNETS --scale 50 --patch_size 48 \
17
+ --save RAFTS_N50_R4_nobn --n_feats 64 --data_train DIV2K --recurrence 4 --normalization none \
18
+ --load RAFTS_N50_R4_nobn --resume -1
DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-nobn-nores-checkpoint.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ bash ./prepare.sh
4
+ lst=0
5
+ for i in {0..3};
6
+ do
7
+ if nvidia-smi -i $i | grep -q "python"; then
8
+ :
9
+ else
10
+ lst=$i
11
+ break
12
+ fi
13
+ done
14
+
15
+ CUDA_VISIBLE_DEVICES=$lst python main.py --n_GPUs 1 --lr 1e-4 --batch_size 16 --n_resblocks 20 --save_models \
16
+ --epoch 1000 --decay 200-400-600-800 --model RAFTNETSNORES --scale 50 --patch_size 48 \
17
+ --save RAFTS_N50_R4_nobn_nores --n_feats 64 --data_train DIV2K --recurrence 4 --normalization none --detach \
18
+ --load RAFTS_N50_R4_nobn_nores --resume -1
DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-nobn-res10-checkpoint.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ bash ./prepare.sh
4
+ lst=0
5
+ for i in {0..3};
6
+ do
7
+ if nvidia-smi -i $i | grep -q "python"; then
8
+ :
9
+ else
10
+ lst=$i
11
+ break
12
+ fi
13
+ done
14
+
15
+ CUDA_VISIBLE_DEVICES=$lst python main.py --n_GPUs 1 --lr 1e-4 --batch_size 16 --n_resblocks 10 --save_models \
16
+ --epoch 1000 --decay 200-400-600-800 --model RAFTNETS --scale 50 --patch_size 48 \
17
+ --save RAFTS_N50_R4_nobn_res10 --n_feats 64 --data_train DIV2K --recurrence 4 --normalization none \
18
+ --load RAFTS_N50_R4_nobn_res10 --resume -1
DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu1-small-checkpoint.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ bash ./prepare.sh
4
+ lst=0
5
+ for i in {0..3};
6
+ do
7
+ if nvidia-smi -i $i | grep -q "python"; then
8
+ :
9
+ else
10
+ lst=$i
11
+ break
12
+ fi
13
+ done
14
+
15
+ CUDA_VISIBLE_DEVICES=$lst python main.py --n_GPUs 1 --lr 1e-4 --batch_size 16 --n_resblocks 20 --save_models \
16
+ --epoch 1000 --decay 200-400-600-800 --model RAFTNETS --scale 50 --patch_size 24 --reset \
17
+ --save RAFTS_N50_R4_bn_small --n_feats 64 --data_train DIV2K --recurrence 4
DN_RGB/code/.ipynb_checkpoints/train-4rec-raft-s-gpu2-nobn-checkpoint.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ bash ./prepare.sh
4
+ lst=0
5
+ for i in {0..3};
6
+ do
7
+ if nvidia-smi -i $i | grep -q "python"; then
8
+ :
9
+ else
10
+ lst=$i
11
+ break
12
+ fi
13
+ done
14
+
15
+ CUDA_VISIBLE_DEVICES=0,3 python main.py --n_GPUs 2 --lr 1e-4 --batch_size 16 --n_resblocks 20 --save_models \
16
+ --epoch 1000 --decay 200-400-600-800 --model RAFTNETS --scale 50 --patch_size 48 \
17
+ --save RAFTS_N50_R4_nobn --n_feats 64 --data_train DIV2K --recurrence 4 --normalization none \
18
+ --load RAFTS_N50_R4_nobn --resume -1
DN_RGB/code/.ipynb_checkpoints/train-checkpoint.sh ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #!/bin/bash
2
+ bash ./prepare.sh
3
+ CUDA_VISIBLE_DEVICES=0 python main.py --n_GPUs 1 --lr 1e-5 --batch_size 16 --save_models --epoch 100 --model PANET \
4
+ --scale 50 --patch_size 48 --reset --save MDSR_att_N50 --n_feats 64 --data_train DIV2K --chop --dir_data="/data/ssd/public/liuhy/sr/dataset"
DN_RGB/code/.ipynb_checkpoints/trainer-checkpoint.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import math
3
+ from decimal import Decimal
4
+
5
+ import utility
6
+
7
+ import torch
8
+ import torch.nn.utils as utils
9
+ from tqdm import tqdm
10
+
11
+ import torch.cuda.amp as amp
12
+
13
+ from torch.utils.tensorboard import SummaryWriter
14
+ import torchvision
15
+
16
+ import numpy as np
17
+
18
+ class Trainer():
19
+ def __init__(self, args, loader, my_model, my_loss, ckp):
20
+ self.args = args
21
+ self.scale = args.scale
22
+
23
+ self.ckp = ckp
24
+ self.loader_train = loader.loader_train
25
+ self.loader_test = loader.loader_test
26
+ self.model = my_model
27
+ self.loss = my_loss
28
+ self.optimizer = utility.make_optimizer(args, self.model)
29
+
30
+ if self.args.load != '':
31
+ self.optimizer.load(ckp.dir, epoch=len(ckp.log))
32
+
33
+ self.error_last = 1e8
34
+ self.scaler=amp.GradScaler(
35
+ enabled=args.amp
36
+ )
37
+ self.writter=None
38
+ self.recurrence=args.recurrence
39
+ if args.recurrence>1:
40
+ self.writter=SummaryWriter(f"runs/{args.save}")
41
+
42
+ def train(self):
43
+ self.loss.step()
44
+ epoch = self.optimizer.get_last_epoch() + 1
45
+ lr = self.optimizer.get_lr()
46
+
47
+ self.ckp.write_log(
48
+ '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))
49
+ )
50
+ self.loss.start_log()
51
+ self.model.train()
52
+
53
+ timer_data, timer_model = utility.timer(), utility.timer()
54
+ # TEMP
55
+ self.loader_train.dataset.set_scale(0)
56
+ total=len(self.loader_train)
57
+ buffer=[0.0]*self.recurrence
58
+ # torch.autograd.set_detect_anomaly(True)
59
+ for batch, (lr, hr, _,) in enumerate(self.loader_train):
60
+ lr, hr = self.prepare(lr, hr)
61
+ # print(lr.min(),lr.max(), hr.min(),hr.max())
62
+ # exit(0)
63
+ timer_data.hold()
64
+ timer_model.tic()
65
+
66
+ self.optimizer.zero_grad()
67
+ with amp.autocast(self.args.amp):
68
+ sr = self.model(lr, 0)
69
+ if len(sr)==1:
70
+ sr=sr[0]
71
+ # loss,buffer_lst=sequence_loss(sr,hr)
72
+ loss = self.loss(sr, hr)
73
+ self.scaler.scale(loss).backward()
74
+ if self.args.gclip > 0:
75
+ self.scaler.unscale_(self.optimizer)
76
+ utils.clip_grad_value_(
77
+ self.model.parameters(),
78
+ self.args.gclip
79
+ )
80
+ self.scaler.step(self.optimizer)
81
+ self.scaler.update()
82
+ for i in range(self.recurrence):
83
+ buffer[i]+=self.loss.buffer[i]
84
+ # self.optimizer.step()
85
+
86
+ timer_model.hold()
87
+
88
+ if (batch + 1) % self.args.print_every == 0:
89
+ self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(
90
+ (batch + 1) * self.args.batch_size,
91
+ len(self.loader_train.dataset),
92
+ self.loss.display_loss(batch),
93
+ timer_model.release(),
94
+ timer_data.release()))
95
+
96
+ timer_data.tic()
97
+ if self.writter:
98
+ for i in range(self.recurrence):
99
+ grid=torchvision.utils.make_grid(sr[i])
100
+ self.writter.add_image(f"Output{i}",grid,epoch)
101
+ self.writter.add_scalar(f"Loss{i}",buffer[i]/total,epoch)
102
+ self.writter.add_image("Input",torchvision.utils.make_grid(lr),epoch)
103
+ self.writter.add_image("Target",torchvision.utils.make_grid(hr),epoch)
104
+ self.loss.end_log(len(self.loader_train))
105
+ self.error_last = self.loss.log[-1, -1]
106
+ self.optimizer.schedule()
107
+
108
+ def test(self):
109
+ torch.set_grad_enabled(False)
110
+
111
+ epoch = self.optimizer.get_last_epoch()
112
+ self.ckp.write_log('\nEvaluation:')
113
+ self.ckp.add_log(
114
+ torch.zeros(1, len(self.loader_test), len(self.scale))
115
+ )
116
+ self.model.eval()
117
+
118
+ timer_test = utility.timer()
119
+ if self.args.save_results: self.ckp.begin_background()
120
+ for idx_data, d in enumerate(self.loader_test):
121
+ for idx_scale, scale in enumerate(self.scale):
122
+ d.dataset.set_scale(idx_scale)
123
+ for lr, hr, filename in tqdm(d, ncols=80):
124
+ lr, hr = self.prepare(lr, hr)
125
+ with amp.autocast(self.args.amp):
126
+ sr = self.model(lr, idx_scale)
127
+ if isinstance(sr,list):
128
+ sr=sr[-1]
129
+ sr = utility.quantize(sr, self.args.rgb_range)
130
+
131
+ save_list = [sr]
132
+ self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(
133
+ sr, hr, scale, self.args.rgb_range, dataset=d
134
+ )
135
+ if self.args.save_gt:
136
+ save_list.extend([lr, hr])
137
+
138
+ if self.args.save_results:
139
+ self.ckp.save_results(d, filename[0], save_list, scale)
140
+
141
+ self.ckp.log[-1, idx_data, idx_scale] /= len(d)
142
+ best = self.ckp.log.max(0)
143
+ self.ckp.write_log(
144
+ '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
145
+ d.dataset.name,
146
+ scale,
147
+ self.ckp.log[-1, idx_data, idx_scale],
148
+ best[0][idx_data, idx_scale],
149
+ best[1][idx_data, idx_scale] + 1
150
+ )
151
+ )
152
+ self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
153
+ self.ckp.write_log('Saving...')
154
+ # torch.cuda.empty_cache()
155
+ if self.args.save_results:
156
+ self.ckp.end_background()
157
+
158
+ if not self.args.test_only:
159
+ self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))
160
+
161
+ self.ckp.write_log(
162
+ 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
163
+ )
164
+
165
+ torch.set_grad_enabled(True)
166
+
167
+ def prepare(self, *args):
168
+ device = torch.device('cpu' if self.args.cpu else 'cuda')
169
+ def _prepare(tensor):
170
+ if self.args.precision == 'half': tensor = tensor.half()
171
+ return tensor.to(device)
172
+
173
+ return [_prepare(a) for a in args]
174
+
175
+ def terminate(self):
176
+ if self.args.test_only:
177
+ self.test()
178
+ return True
179
+ else:
180
+ epoch = self.optimizer.get_last_epoch() + 1
181
+ return epoch >= self.args.epochs
182
+
DN_RGB/code/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2018 Sanghyun Son
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
DN_RGB/code/__init__.py ADDED
File without changes
DN_RGB/code/__pycache__/__init__.cpython-37.pyc ADDED
Binary file (139 Bytes). View file
 
DN_RGB/code/__pycache__/option.cpython-36.pyc ADDED
Binary file (4.64 kB). View file
 
DN_RGB/code/__pycache__/option.cpython-37.pyc ADDED
Binary file (4.94 kB). View file
 
DN_RGB/code/__pycache__/template.cpython-36.pyc ADDED
Binary file (1.01 kB). View file
 
DN_RGB/code/__pycache__/template.cpython-37.pyc ADDED
Binary file (997 Bytes). View file
 
DN_RGB/code/__pycache__/trainer.cpython-36.pyc ADDED
Binary file (3.99 kB). View file
 
DN_RGB/code/__pycache__/trainer.cpython-37.pyc ADDED
Binary file (4.84 kB). View file
 
DN_RGB/code/__pycache__/utility.cpython-36.pyc ADDED
Binary file (9.04 kB). View file
 
DN_RGB/code/__pycache__/utility.cpython-37.pyc ADDED
Binary file (9.11 kB). View file
 
DN_RGB/code/data/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from importlib import import_module
2
+ #from dataloader import MSDataLoader
3
+ from torch.utils.data import dataloader
4
+ from torch.utils.data import ConcatDataset
5
+
6
+ # This is a simple wrapper function for ConcatDataset
7
+ class MyConcatDataset(ConcatDataset):
8
+ def __init__(self, datasets):
9
+ super(MyConcatDataset, self).__init__(datasets)
10
+ self.train = datasets[0].train
11
+
12
+ def set_scale(self, idx_scale):
13
+ for d in self.datasets:
14
+ if hasattr(d, 'set_scale'): d.set_scale(idx_scale)
15
+
16
+ class Data:
17
+ def __init__(self, args):
18
+ self.loader_train = None
19
+ if not args.test_only:
20
+ datasets = []
21
+ for d in args.data_train:
22
+ module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG'
23
+ m = import_module('data.' + module_name.lower())
24
+ datasets.append(getattr(m, module_name)(args, name=d))
25
+
26
+ self.loader_train = dataloader.DataLoader(
27
+ MyConcatDataset(datasets),
28
+ batch_size=args.batch_size,
29
+ shuffle=True,
30
+ pin_memory=not args.cpu,
31
+ num_workers=args.n_threads,
32
+ )
33
+
34
+ self.loader_test = []
35
+ for d in args.data_test:
36
+ if d in ['CBSD68','Kodak24','Set5', 'Set14', 'B100', 'Urban100']:
37
+ m = import_module('data.benchmark')
38
+ testset = getattr(m, 'Benchmark')(args, train=False, name=d)
39
+ else:
40
+ module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG'
41
+ m = import_module('data.' + module_name.lower())
42
+ testset = getattr(m, module_name)(args, train=False, name=d)
43
+
44
+ self.loader_test.append(
45
+ dataloader.DataLoader(
46
+ testset,
47
+ batch_size=1,
48
+ shuffle=False,
49
+ pin_memory=not args.cpu,
50
+ num_workers=args.n_threads,
51
+ )
52
+ )
DN_RGB/code/data/__pycache__/__init__.cpython-36.pyc ADDED
Binary file (1.82 kB). View file
 
DN_RGB/code/data/__pycache__/__init__.cpython-37.pyc ADDED
Binary file (1.78 kB). View file
 
DN_RGB/code/data/__pycache__/benchmark.cpython-36.pyc ADDED
Binary file (1.08 kB). View file
 
DN_RGB/code/data/__pycache__/benchmark.cpython-37.pyc ADDED
Binary file (1.06 kB). View file
 
DN_RGB/code/data/__pycache__/common.cpython-36.pyc ADDED
Binary file (2.77 kB). View file
 
DN_RGB/code/data/__pycache__/common.cpython-37.pyc ADDED
Binary file (2.74 kB). View file
 
DN_RGB/code/data/__pycache__/div2k.cpython-36.pyc ADDED
Binary file (1.76 kB). View file
 
DN_RGB/code/data/__pycache__/div2k.cpython-37.pyc ADDED
Binary file (1.75 kB). View file
 
DN_RGB/code/data/__pycache__/srdata.cpython-36.pyc ADDED
Binary file (4.83 kB). View file
 
DN_RGB/code/data/__pycache__/srdata.cpython-37.pyc ADDED
Binary file (4.82 kB). View file
 
DN_RGB/code/data/benchmark.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from data import common
4
+ from data import srdata
5
+
6
+ import numpy as np
7
+
8
+ import torch
9
+ import torch.utils.data as data
10
+
11
+ class Benchmark(srdata.SRData):
12
+ def __init__(self, args, name='', train=True, benchmark=True):
13
+ super(Benchmark, self).__init__(
14
+ args, name=name, train=train, benchmark=True
15
+ )
16
+
17
+ def _set_filesystem(self, dir_data):
18
+ self.apath = os.path.join(dir_data, 'benchmark', self.name)
19
+ self.dir_hr = os.path.join(self.apath, 'HR')
20
+ if self.input_large:
21
+ self.dir_lr = os.path.join(self.apath, 'LR_bicubicL')
22
+ else:
23
+ self.dir_lr = os.path.join(self.apath, 'LR_bicubic')
24
+ self.ext = ('', '.png')
25
+
DN_RGB/code/data/common.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ import numpy as np
4
+ import skimage.color as sc
5
+
6
+ import torch
7
+
8
+ def get_patch(*args, patch_size=96, scale=1, multi=False, input_large=False):
9
+ ih, iw = args[0].shape[:2]
10
+
11
+ if not input_large:
12
+ p = 1 if multi else 1
13
+ tp = p * patch_size
14
+ ip = tp // 1
15
+ else:
16
+ tp = patch_size
17
+ ip = patch_size
18
+
19
+ ix = random.randrange(0, iw - ip + 1)
20
+ iy = random.randrange(0, ih - ip + 1)
21
+
22
+ if not input_large:
23
+ tx, ty = 1 * ix, 1 * iy
24
+ else:
25
+ tx, ty = ix, iy
26
+
27
+ ret = [
28
+ args[0][iy:iy + ip, ix:ix + ip, :],
29
+ *[a[ty:ty + tp, tx:tx + tp, :] for a in args[1:]]
30
+ ]
31
+
32
+ return ret
33
+
34
+ def set_channel(*args, n_channels=3):
35
+ def _set_channel(img):
36
+ if img.ndim == 2:
37
+ img = np.expand_dims(img, axis=2)
38
+
39
+ c = img.shape[2]
40
+ if n_channels == 1 and c == 3:
41
+ img = np.expand_dims(sc.rgb2ycbcr(img)[:, :, 0], 2)
42
+ elif n_channels == 3 and c == 1:
43
+ img = np.concatenate([img] * n_channels, 2)
44
+
45
+ return img
46
+
47
+ return [_set_channel(a) for a in args]
48
+
49
+ def np2Tensor(*args, rgb_range=255):
50
+ def _np2Tensor(img):
51
+ np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1)))
52
+ tensor = torch.from_numpy(np_transpose).float()
53
+ tensor.mul_(rgb_range / 255)
54
+
55
+ return tensor
56
+
57
+ return [_np2Tensor(a) for a in args]
58
+
59
+ def augment(*args, hflip=True, rot=True):
60
+ hflip = hflip and random.random() < 0.5
61
+ vflip = rot and random.random() < 0.5
62
+ rot90 = rot and random.random() < 0.5
63
+
64
+ def _augment(img):
65
+ if hflip: img = img[:, ::-1, :]
66
+ if vflip: img = img[::-1, :, :]
67
+ if rot90: img = img.transpose(1, 0, 2)
68
+
69
+ return img
70
+
71
+ return [_augment(a) for a in args]
72
+
DN_RGB/code/data/demo.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from data import common
4
+
5
+ import numpy as np
6
+ import imageio
7
+
8
+ import torch
9
+ import torch.utils.data as data
10
+
11
+ class Demo(data.Dataset):
12
+ def __init__(self, args, name='Demo', train=False, benchmark=False):
13
+ self.args = args
14
+ self.name = name
15
+ self.scale = args.scale
16
+ self.idx_scale = 0
17
+ self.train = False
18
+ self.benchmark = benchmark
19
+
20
+ self.filelist = []
21
+ for f in os.listdir(args.dir_demo):
22
+ if f.find('.png') >= 0 or f.find('.jp') >= 0:
23
+ self.filelist.append(os.path.join(args.dir_demo, f))
24
+ self.filelist.sort()
25
+
26
+ def __getitem__(self, idx):
27
+ filename = os.path.splitext(os.path.basename(self.filelist[idx]))[0]
28
+ lr = imageio.imread(self.filelist[idx])
29
+ lr, = common.set_channel(lr, n_channels=self.args.n_colors)
30
+ lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range)
31
+
32
+ return lr_t, -1, filename
33
+
34
+ def __len__(self):
35
+ return len(self.filelist)
36
+
37
+ def set_scale(self, idx_scale):
38
+ self.idx_scale = idx_scale
39
+
DN_RGB/code/data/div2k.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from data import srdata
3
+
4
+ class DIV2K(srdata.SRData):
5
+ def __init__(self, args, name='DIV2K', train=True, benchmark=False):
6
+ data_range = [r.split('-') for r in args.data_range.split('/')]
7
+ if train:
8
+ data_range = data_range[0]
9
+ else:
10
+ if args.test_only and len(data_range) == 1:
11
+ data_range = data_range[0]
12
+ else:
13
+ data_range = data_range[1]
14
+
15
+ self.begin, self.end = list(map(lambda x: int(x), data_range))
16
+ super(DIV2K, self).__init__(
17
+ args, name=name, train=train, benchmark=benchmark
18
+ )
19
+
20
+ def _scan(self):
21
+ names_hr, names_lr = super(DIV2K, self)._scan()
22
+ names_hr = names_hr[self.begin - 1:self.end]
23
+ names_lr = [n[self.begin - 1:self.end] for n in names_lr]
24
+
25
+ return names_hr, names_lr
26
+
27
+ def _set_filesystem(self, dir_data):
28
+ super(DIV2K, self)._set_filesystem(dir_data)
29
+ self.dir_hr = os.path.join(self.apath, 'DIV2K_train_HR')
30
+ self.dir_lr = os.path.join(self.apath, 'DIV2K_train_LR_bicubic')
31
+ if self.input_large: self.dir_lr += 'L'
32
+
DN_RGB/code/data/div2kjpeg.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from data import srdata
3
+ from data import div2k
4
+
5
+ class DIV2KJPEG(div2k.DIV2K):
6
+ def __init__(self, args, name='', train=True, benchmark=False):
7
+ self.q_factor = int(name.replace('DIV2K-Q', ''))
8
+ super(DIV2KJPEG, self).__init__(
9
+ args, name=name, train=train, benchmark=benchmark
10
+ )
11
+
12
+ def _set_filesystem(self, dir_data):
13
+ self.apath = os.path.join(dir_data, 'DIV2K')
14
+ self.dir_hr = os.path.join(self.apath, 'DIV2K_train_HR')
15
+ self.dir_lr = os.path.join(
16
+ self.apath, 'DIV2K_Q{}'.format(self.q_factor)
17
+ )
18
+ if self.input_large: self.dir_lr += 'L'
19
+ self.ext = ('.png', '.jpg')
20
+