Spaces:
Runtime error
Runtime error
fix-1
Browse files- SwinIR/infer.py +93 -0
- SwinIR/main_test_swinir.py +309 -0
- SwinIR/models/network_swinir.py +867 -0
- SwinIR/weight/003_realSR_BSRGAN_DFO_s64w8_SwinIR-M_x4_GAN.pth +3 -0
- app.py +31 -100
- images/04011.png +0 -0
- images/04033.png +0 -0
- images/04064.png +0 -0
- images/04132.png +0 -0
- images/04146.png +0 -0
- images/10091.png +0 -0
- sam_diffsr/cache/hub/checkpoints/alexnet-owt-7be5be79.pth +3 -0
- sam_diffsr/infer.py +86 -0
- sam_diffsr/models_sr/diffusion.py +1 -1
- sam_diffsr/models_sr/diffusion_sam.py +1 -1
SwinIR/infer.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
|
6 |
+
from SwinIR.models.network_swinir import SwinIR as net
|
7 |
+
|
8 |
+
ROOT_PATH = os.path.dirname(__file__)
|
9 |
+
|
10 |
+
|
11 |
+
class SwinIRDemo:
|
12 |
+
def __init__(self):
|
13 |
+
self.scale = 4
|
14 |
+
self.window_size = 8
|
15 |
+
self.tile = 800
|
16 |
+
self.tile_overlap = 32
|
17 |
+
self.device = 'cuda'
|
18 |
+
|
19 |
+
model_path = os.path.join(ROOT_PATH, 'weight/003_realSR_BSRGAN_DFO_s64w8_SwinIR-M_x4_GAN.pth')
|
20 |
+
self.model = self.model_init(model_path)
|
21 |
+
|
22 |
+
def model_init(self, model_path):
|
23 |
+
model = net(upscale=self.scale, in_chans=3, img_size=64, window_size=8,
|
24 |
+
img_range=1., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
|
25 |
+
mlp_ratio=2, upsampler='nearest+conv', resi_connection='1conv')
|
26 |
+
param_key_g = 'params_ema'
|
27 |
+
|
28 |
+
pretrained_model = torch.load(model_path)
|
29 |
+
model.load_state_dict(
|
30 |
+
pretrained_model[param_key_g] if param_key_g in pretrained_model.keys() else pretrained_model,
|
31 |
+
strict=True)
|
32 |
+
|
33 |
+
model.eval()
|
34 |
+
model = model.to(self.device)
|
35 |
+
return model
|
36 |
+
|
37 |
+
def img_preprocess(self, img_PIL, device, window_size):
|
38 |
+
# imgname, img_lq, img_gt = get_image_pair(args, path) # image to HWC-BGR, float32
|
39 |
+
# img_lq = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32) / 255.
|
40 |
+
|
41 |
+
# img_lq = img_PIL.convert('BGR')
|
42 |
+
img_lq = np.asarray(img_PIL)
|
43 |
+
img_lq = img_lq / 255
|
44 |
+
|
45 |
+
img_lq = np.transpose(img_lq[:, :, [0, 1, 2]], (2, 0, 1)) # HCW-BGR to CHW-RGB
|
46 |
+
img_lq = torch.from_numpy(img_lq).float().unsqueeze(0).to(device) # CHW-RGB to NCHW-RGB
|
47 |
+
|
48 |
+
# pad input image to be a multiple of window_size
|
49 |
+
_, _, h_old, w_old = img_lq.size()
|
50 |
+
h_pad = (h_old // window_size + 1) * window_size - h_old
|
51 |
+
w_pad = (w_old // window_size + 1) * window_size - w_old
|
52 |
+
img_lq = torch.cat([img_lq, torch.flip(img_lq, [2])], 2)[:, :, :h_old + h_pad, :]
|
53 |
+
img_lq = torch.cat([img_lq, torch.flip(img_lq, [3])], 3)[:, :, :, :w_old + w_pad]
|
54 |
+
|
55 |
+
return img_lq, h_old, w_old
|
56 |
+
|
57 |
+
def test(self, img_lq):
|
58 |
+
b, c, h, w = img_lq.size()
|
59 |
+
tile = min(self.tile, h, w)
|
60 |
+
assert tile % self.window_size == 0, "tile size should be a multiple of window_size"
|
61 |
+
sf = self.scale
|
62 |
+
|
63 |
+
stride = tile - self.tile_overlap
|
64 |
+
h_idx_list = list(range(0, h - tile, stride)) + [h - tile]
|
65 |
+
w_idx_list = list(range(0, w - tile, stride)) + [w - tile]
|
66 |
+
E = torch.zeros(b, c, h * sf, w * sf).type_as(img_lq)
|
67 |
+
W = torch.zeros_like(E)
|
68 |
+
|
69 |
+
for h_idx in h_idx_list:
|
70 |
+
for w_idx in w_idx_list:
|
71 |
+
in_patch = img_lq[..., h_idx:h_idx + tile, w_idx:w_idx + tile]
|
72 |
+
out_patch = self.model(in_patch)
|
73 |
+
out_patch_mask = torch.ones_like(out_patch)
|
74 |
+
|
75 |
+
E[..., h_idx * sf:(h_idx + tile) * sf, w_idx * sf:(w_idx + tile) * sf].add_(out_patch)
|
76 |
+
W[..., h_idx * sf:(h_idx + tile) * sf, w_idx * sf:(w_idx + tile) * sf].add_(out_patch_mask)
|
77 |
+
output = E.div_(W)
|
78 |
+
|
79 |
+
return output
|
80 |
+
|
81 |
+
def infer(self, img_lq):
|
82 |
+
img_lq, h_old, w_old = self.img_preprocess(img_lq, self.device, self.window_size)
|
83 |
+
|
84 |
+
with torch.no_grad():
|
85 |
+
output = self.test(img_lq)
|
86 |
+
output = output[..., :h_old * self.scale, :w_old * self.scale]
|
87 |
+
|
88 |
+
output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
|
89 |
+
if output.ndim == 3:
|
90 |
+
output = np.transpose(output[[0, 1, 2], :, :], (1, 2, 0)) # CHW-RGB to HCW-BGR
|
91 |
+
output = (output * 255.0).round().astype(np.uint8)
|
92 |
+
|
93 |
+
return output
|
SwinIR/main_test_swinir.py
ADDED
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import cv2
|
3 |
+
import glob
|
4 |
+
import numpy as np
|
5 |
+
from collections import OrderedDict
|
6 |
+
import os
|
7 |
+
import torch
|
8 |
+
import requests
|
9 |
+
|
10 |
+
from models.network_swinir import SwinIR as net
|
11 |
+
from utils import util_calculate_psnr_ssim as util
|
12 |
+
|
13 |
+
|
14 |
+
def main():
|
15 |
+
parser = argparse.ArgumentParser()
|
16 |
+
parser.add_argument('--task', type=str, default='color_dn', help='classical_sr, lightweight_sr, real_sr, '
|
17 |
+
'gray_dn, color_dn, jpeg_car, color_jpeg_car')
|
18 |
+
parser.add_argument('--scale', type=int, default=1, help='scale factor: 1, 2, 3, 4, 8') # 1 for dn and jpeg car
|
19 |
+
parser.add_argument('--noise', type=int, default=15, help='noise level: 15, 25, 50')
|
20 |
+
parser.add_argument('--jpeg', type=int, default=40, help='scale factor: 10, 20, 30, 40')
|
21 |
+
parser.add_argument('--training_patch_size', type=int, default=128, help='patch size used in training SwinIR. '
|
22 |
+
'Just used to differentiate two different settings in Table 2 of the paper. '
|
23 |
+
'Images are NOT tested patch by patch.')
|
24 |
+
parser.add_argument('--large_model', action='store_true', help='use large model, only provided for real image sr')
|
25 |
+
parser.add_argument('--model_path', type=str,
|
26 |
+
default='model_zoo/swinir/001_classicalSR_DIV2K_s48w8_SwinIR-M_x2.pth')
|
27 |
+
parser.add_argument('--folder_lq', type=str, default=None, help='input low-quality test image folder')
|
28 |
+
parser.add_argument('--folder_gt', type=str, default=None, help='input ground-truth test image folder')
|
29 |
+
parser.add_argument('--tile', type=int, default=None, help='Tile size, None for no tile during testing (testing as a whole)')
|
30 |
+
parser.add_argument('--tile_overlap', type=int, default=32, help='Overlapping of different tiles')
|
31 |
+
args = parser.parse_args()
|
32 |
+
|
33 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
34 |
+
# set up model
|
35 |
+
if os.path.exists(args.model_path):
|
36 |
+
print(f'loading model from {args.model_path}')
|
37 |
+
else:
|
38 |
+
os.makedirs(os.path.dirname(args.model_path), exist_ok=True)
|
39 |
+
url = 'https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/{}'.format(os.path.basename(args.model_path))
|
40 |
+
r = requests.get(url, allow_redirects=True)
|
41 |
+
print(f'downloading model {args.model_path}')
|
42 |
+
open(args.model_path, 'wb').write(r.content)
|
43 |
+
|
44 |
+
model = define_model(args)
|
45 |
+
model.eval()
|
46 |
+
model = model.to(device)
|
47 |
+
|
48 |
+
# setup folder and path
|
49 |
+
folder, save_dir, border, window_size = setup(args)
|
50 |
+
os.makedirs(save_dir, exist_ok=True)
|
51 |
+
test_results = OrderedDict()
|
52 |
+
test_results['psnr'] = []
|
53 |
+
test_results['ssim'] = []
|
54 |
+
test_results['psnr_y'] = []
|
55 |
+
test_results['ssim_y'] = []
|
56 |
+
test_results['psnrb'] = []
|
57 |
+
test_results['psnrb_y'] = []
|
58 |
+
psnr, ssim, psnr_y, ssim_y, psnrb, psnrb_y = 0, 0, 0, 0, 0, 0
|
59 |
+
|
60 |
+
for idx, path in enumerate(sorted(glob.glob(os.path.join(folder, '*')))):
|
61 |
+
# read image
|
62 |
+
imgname, img_lq, img_gt = get_image_pair(args, path) # image to HWC-BGR, float32
|
63 |
+
img_lq = np.transpose(img_lq if img_lq.shape[2] == 1 else img_lq[:, :, [2, 1, 0]], (2, 0, 1)) # HCW-BGR to CHW-RGB
|
64 |
+
img_lq = torch.from_numpy(img_lq).float().unsqueeze(0).to(device) # CHW-RGB to NCHW-RGB
|
65 |
+
|
66 |
+
# inference
|
67 |
+
with torch.no_grad():
|
68 |
+
# pad input image to be a multiple of window_size
|
69 |
+
_, _, h_old, w_old = img_lq.size()
|
70 |
+
h_pad = (h_old // window_size + 1) * window_size - h_old
|
71 |
+
w_pad = (w_old // window_size + 1) * window_size - w_old
|
72 |
+
img_lq = torch.cat([img_lq, torch.flip(img_lq, [2])], 2)[:, :, :h_old + h_pad, :]
|
73 |
+
img_lq = torch.cat([img_lq, torch.flip(img_lq, [3])], 3)[:, :, :, :w_old + w_pad]
|
74 |
+
output = test(img_lq, model, args, window_size)
|
75 |
+
output = output[..., :h_old * args.scale, :w_old * args.scale]
|
76 |
+
|
77 |
+
# save image
|
78 |
+
output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
|
79 |
+
if output.ndim == 3:
|
80 |
+
output = np.transpose(output[[2, 1, 0], :, :], (1, 2, 0)) # CHW-RGB to HCW-BGR
|
81 |
+
output = (output * 255.0).round().astype(np.uint8) # float32 to uint8
|
82 |
+
cv2.imwrite(f'{save_dir}/{imgname}_SwinIR.png', output)
|
83 |
+
|
84 |
+
# evaluate psnr/ssim/psnr_b
|
85 |
+
if img_gt is not None:
|
86 |
+
img_gt = (img_gt * 255.0).round().astype(np.uint8) # float32 to uint8
|
87 |
+
img_gt = img_gt[:h_old * args.scale, :w_old * args.scale, ...] # crop gt
|
88 |
+
img_gt = np.squeeze(img_gt)
|
89 |
+
|
90 |
+
psnr = util.calculate_psnr(output, img_gt, crop_border=border)
|
91 |
+
ssim = util.calculate_ssim(output, img_gt, crop_border=border)
|
92 |
+
test_results['psnr'].append(psnr)
|
93 |
+
test_results['ssim'].append(ssim)
|
94 |
+
if img_gt.ndim == 3: # RGB image
|
95 |
+
psnr_y = util.calculate_psnr(output, img_gt, crop_border=border, test_y_channel=True)
|
96 |
+
ssim_y = util.calculate_ssim(output, img_gt, crop_border=border, test_y_channel=True)
|
97 |
+
test_results['psnr_y'].append(psnr_y)
|
98 |
+
test_results['ssim_y'].append(ssim_y)
|
99 |
+
if args.task in ['jpeg_car', 'color_jpeg_car']:
|
100 |
+
psnrb = util.calculate_psnrb(output, img_gt, crop_border=border, test_y_channel=False)
|
101 |
+
test_results['psnrb'].append(psnrb)
|
102 |
+
if args.task in ['color_jpeg_car']:
|
103 |
+
psnrb_y = util.calculate_psnrb(output, img_gt, crop_border=border, test_y_channel=True)
|
104 |
+
test_results['psnrb_y'].append(psnrb_y)
|
105 |
+
print('Testing {:d} {:20s} - PSNR: {:.2f} dB; SSIM: {:.4f}; PSNRB: {:.2f} dB;'
|
106 |
+
'PSNR_Y: {:.2f} dB; SSIM_Y: {:.4f}; PSNRB_Y: {:.2f} dB.'.
|
107 |
+
format(idx, imgname, psnr, ssim, psnrb, psnr_y, ssim_y, psnrb_y))
|
108 |
+
else:
|
109 |
+
print('Testing {:d} {:20s}'.format(idx, imgname))
|
110 |
+
|
111 |
+
# summarize psnr/ssim
|
112 |
+
if img_gt is not None:
|
113 |
+
ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
|
114 |
+
ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
|
115 |
+
print('\n{} \n-- Average PSNR/SSIM(RGB): {:.2f} dB; {:.4f}'.format(save_dir, ave_psnr, ave_ssim))
|
116 |
+
if img_gt.ndim == 3:
|
117 |
+
ave_psnr_y = sum(test_results['psnr_y']) / len(test_results['psnr_y'])
|
118 |
+
ave_ssim_y = sum(test_results['ssim_y']) / len(test_results['ssim_y'])
|
119 |
+
print('-- Average PSNR_Y/SSIM_Y: {:.2f} dB; {:.4f}'.format(ave_psnr_y, ave_ssim_y))
|
120 |
+
if args.task in ['jpeg_car', 'color_jpeg_car']:
|
121 |
+
ave_psnrb = sum(test_results['psnrb']) / len(test_results['psnrb'])
|
122 |
+
print('-- Average PSNRB: {:.2f} dB'.format(ave_psnrb))
|
123 |
+
if args.task in ['color_jpeg_car']:
|
124 |
+
ave_psnrb_y = sum(test_results['psnrb_y']) / len(test_results['psnrb_y'])
|
125 |
+
print('-- Average PSNRB_Y: {:.2f} dB'.format(ave_psnrb_y))
|
126 |
+
|
127 |
+
|
128 |
+
def define_model(args):
|
129 |
+
# 001 classical image sr
|
130 |
+
if args.task == 'classical_sr':
|
131 |
+
model = net(upscale=args.scale, in_chans=3, img_size=args.training_patch_size, window_size=8,
|
132 |
+
img_range=1., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
|
133 |
+
mlp_ratio=2, upsampler='pixelshuffle', resi_connection='1conv')
|
134 |
+
param_key_g = 'params'
|
135 |
+
|
136 |
+
# 002 lightweight image sr
|
137 |
+
# use 'pixelshuffledirect' to save parameters
|
138 |
+
elif args.task == 'lightweight_sr':
|
139 |
+
model = net(upscale=args.scale, in_chans=3, img_size=64, window_size=8,
|
140 |
+
img_range=1., depths=[6, 6, 6, 6], embed_dim=60, num_heads=[6, 6, 6, 6],
|
141 |
+
mlp_ratio=2, upsampler='pixelshuffledirect', resi_connection='1conv')
|
142 |
+
param_key_g = 'params'
|
143 |
+
|
144 |
+
# 003 real-world image sr
|
145 |
+
elif args.task == 'real_sr':
|
146 |
+
if not args.large_model:
|
147 |
+
# use 'nearest+conv' to avoid block artifacts
|
148 |
+
model = net(upscale=args.scale, in_chans=3, img_size=64, window_size=8,
|
149 |
+
img_range=1., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
|
150 |
+
mlp_ratio=2, upsampler='nearest+conv', resi_connection='1conv')
|
151 |
+
else:
|
152 |
+
# larger model size; use '3conv' to save parameters and memory; use ema for GAN training
|
153 |
+
model = net(upscale=args.scale, in_chans=3, img_size=64, window_size=8,
|
154 |
+
img_range=1., depths=[6, 6, 6, 6, 6, 6, 6, 6, 6], embed_dim=240,
|
155 |
+
num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],
|
156 |
+
mlp_ratio=2, upsampler='nearest+conv', resi_connection='3conv')
|
157 |
+
param_key_g = 'params_ema'
|
158 |
+
|
159 |
+
# 004 grayscale image denoising
|
160 |
+
elif args.task == 'gray_dn':
|
161 |
+
model = net(upscale=1, in_chans=1, img_size=128, window_size=8,
|
162 |
+
img_range=1., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
|
163 |
+
mlp_ratio=2, upsampler='', resi_connection='1conv')
|
164 |
+
param_key_g = 'params'
|
165 |
+
|
166 |
+
# 005 color image denoising
|
167 |
+
elif args.task == 'color_dn':
|
168 |
+
model = net(upscale=1, in_chans=3, img_size=128, window_size=8,
|
169 |
+
img_range=1., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
|
170 |
+
mlp_ratio=2, upsampler='', resi_connection='1conv')
|
171 |
+
param_key_g = 'params'
|
172 |
+
|
173 |
+
# 006 grayscale JPEG compression artifact reduction
|
174 |
+
# use window_size=7 because JPEG encoding uses 8x8; use img_range=255 because it's sligtly better than 1
|
175 |
+
elif args.task == 'jpeg_car':
|
176 |
+
model = net(upscale=1, in_chans=1, img_size=126, window_size=7,
|
177 |
+
img_range=255., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
|
178 |
+
mlp_ratio=2, upsampler='', resi_connection='1conv')
|
179 |
+
param_key_g = 'params'
|
180 |
+
|
181 |
+
# 006 color JPEG compression artifact reduction
|
182 |
+
# use window_size=7 because JPEG encoding uses 8x8; use img_range=255 because it's sligtly better than 1
|
183 |
+
elif args.task == 'color_jpeg_car':
|
184 |
+
model = net(upscale=1, in_chans=3, img_size=126, window_size=7,
|
185 |
+
img_range=255., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
|
186 |
+
mlp_ratio=2, upsampler='', resi_connection='1conv')
|
187 |
+
param_key_g = 'params'
|
188 |
+
|
189 |
+
pretrained_model = torch.load(args.model_path)
|
190 |
+
model.load_state_dict(pretrained_model[param_key_g] if param_key_g in pretrained_model.keys() else pretrained_model, strict=True)
|
191 |
+
|
192 |
+
return model
|
193 |
+
|
194 |
+
|
195 |
+
def setup(args):
|
196 |
+
# 001 classical image sr/ 002 lightweight image sr
|
197 |
+
if args.task in ['classical_sr', 'lightweight_sr']:
|
198 |
+
save_dir = f'results/swinir_{args.task}_x{args.scale}'
|
199 |
+
folder = args.folder_gt
|
200 |
+
border = args.scale
|
201 |
+
window_size = 8
|
202 |
+
|
203 |
+
# 003 real-world image sr
|
204 |
+
elif args.task in ['real_sr']:
|
205 |
+
save_dir = f'results/swinir_{args.task}_x{args.scale}'
|
206 |
+
if args.large_model:
|
207 |
+
save_dir += '_large'
|
208 |
+
folder = args.folder_lq
|
209 |
+
border = 0
|
210 |
+
window_size = 8
|
211 |
+
|
212 |
+
# 004 grayscale image denoising/ 005 color image denoising
|
213 |
+
elif args.task in ['gray_dn', 'color_dn']:
|
214 |
+
save_dir = f'results/swinir_{args.task}_noise{args.noise}'
|
215 |
+
folder = args.folder_gt
|
216 |
+
border = 0
|
217 |
+
window_size = 8
|
218 |
+
|
219 |
+
# 006 JPEG compression artifact reduction
|
220 |
+
elif args.task in ['jpeg_car', 'color_jpeg_car']:
|
221 |
+
save_dir = f'results/swinir_{args.task}_jpeg{args.jpeg}'
|
222 |
+
folder = args.folder_gt
|
223 |
+
border = 0
|
224 |
+
window_size = 7
|
225 |
+
|
226 |
+
return folder, save_dir, border, window_size
|
227 |
+
|
228 |
+
|
229 |
+
def get_image_pair(args, path):
|
230 |
+
(imgname, imgext) = os.path.splitext(os.path.basename(path))
|
231 |
+
|
232 |
+
# 001 classical image sr/ 002 lightweight image sr (load lq-gt image pairs)
|
233 |
+
if args.task in ['classical_sr', 'lightweight_sr']:
|
234 |
+
img_gt = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32) / 255.
|
235 |
+
img_lq = cv2.imread(f'{args.folder_lq}/{imgname}x{args.scale}{imgext}', cv2.IMREAD_COLOR).astype(
|
236 |
+
np.float32) / 255.
|
237 |
+
|
238 |
+
# 003 real-world image sr (load lq image only)
|
239 |
+
elif args.task in ['real_sr']:
|
240 |
+
img_gt = None
|
241 |
+
img_lq = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32) / 255.
|
242 |
+
|
243 |
+
# 004 grayscale image denoising (load gt image and generate lq image on-the-fly)
|
244 |
+
elif args.task in ['gray_dn']:
|
245 |
+
img_gt = cv2.imread(path, cv2.IMREAD_GRAYSCALE).astype(np.float32) / 255.
|
246 |
+
np.random.seed(seed=0)
|
247 |
+
img_lq = img_gt + np.random.normal(0, args.noise / 255., img_gt.shape)
|
248 |
+
img_gt = np.expand_dims(img_gt, axis=2)
|
249 |
+
img_lq = np.expand_dims(img_lq, axis=2)
|
250 |
+
|
251 |
+
# 005 color image denoising (load gt image and generate lq image on-the-fly)
|
252 |
+
elif args.task in ['color_dn']:
|
253 |
+
img_gt = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32) / 255.
|
254 |
+
np.random.seed(seed=0)
|
255 |
+
img_lq = img_gt + np.random.normal(0, args.noise / 255., img_gt.shape)
|
256 |
+
|
257 |
+
# 006 grayscale JPEG compression artifact reduction (load gt image and generate lq image on-the-fly)
|
258 |
+
elif args.task in ['jpeg_car']:
|
259 |
+
img_gt = cv2.imread(path, cv2.IMREAD_UNCHANGED)
|
260 |
+
if img_gt.ndim != 2:
|
261 |
+
img_gt = util.bgr2ycbcr(img_gt, y_only=True)
|
262 |
+
result, encimg = cv2.imencode('.jpg', img_gt, [int(cv2.IMWRITE_JPEG_QUALITY), args.jpeg])
|
263 |
+
img_lq = cv2.imdecode(encimg, 0)
|
264 |
+
img_gt = np.expand_dims(img_gt, axis=2).astype(np.float32) / 255.
|
265 |
+
img_lq = np.expand_dims(img_lq, axis=2).astype(np.float32) / 255.
|
266 |
+
|
267 |
+
# 006 JPEG compression artifact reduction (load gt image and generate lq image on-the-fly)
|
268 |
+
elif args.task in ['color_jpeg_car']:
|
269 |
+
img_gt = cv2.imread(path)
|
270 |
+
result, encimg = cv2.imencode('.jpg', img_gt, [int(cv2.IMWRITE_JPEG_QUALITY), args.jpeg])
|
271 |
+
img_lq = cv2.imdecode(encimg, 1)
|
272 |
+
img_gt = img_gt.astype(np.float32)/ 255.
|
273 |
+
img_lq = img_lq.astype(np.float32)/ 255.
|
274 |
+
|
275 |
+
return imgname, img_lq, img_gt
|
276 |
+
|
277 |
+
|
278 |
+
def test(img_lq, model, args, window_size):
|
279 |
+
if args.tile is None:
|
280 |
+
# test the image as a whole
|
281 |
+
output = model(img_lq)
|
282 |
+
else:
|
283 |
+
# test the image tile by tile
|
284 |
+
b, c, h, w = img_lq.size()
|
285 |
+
tile = min(args.tile, h, w)
|
286 |
+
assert tile % window_size == 0, "tile size should be a multiple of window_size"
|
287 |
+
tile_overlap = args.tile_overlap
|
288 |
+
sf = args.scale
|
289 |
+
|
290 |
+
stride = tile - tile_overlap
|
291 |
+
h_idx_list = list(range(0, h-tile, stride)) + [h-tile]
|
292 |
+
w_idx_list = list(range(0, w-tile, stride)) + [w-tile]
|
293 |
+
E = torch.zeros(b, c, h*sf, w*sf).type_as(img_lq)
|
294 |
+
W = torch.zeros_like(E)
|
295 |
+
|
296 |
+
for h_idx in h_idx_list:
|
297 |
+
for w_idx in w_idx_list:
|
298 |
+
in_patch = img_lq[..., h_idx:h_idx+tile, w_idx:w_idx+tile]
|
299 |
+
out_patch = model(in_patch)
|
300 |
+
out_patch_mask = torch.ones_like(out_patch)
|
301 |
+
|
302 |
+
E[..., h_idx*sf:(h_idx+tile)*sf, w_idx*sf:(w_idx+tile)*sf].add_(out_patch)
|
303 |
+
W[..., h_idx*sf:(h_idx+tile)*sf, w_idx*sf:(w_idx+tile)*sf].add_(out_patch_mask)
|
304 |
+
output = E.div_(W)
|
305 |
+
|
306 |
+
return output
|
307 |
+
|
308 |
+
if __name__ == '__main__':
|
309 |
+
main()
|
SwinIR/models/network_swinir.py
ADDED
@@ -0,0 +1,867 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -----------------------------------------------------------------------------------
|
2 |
+
# SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257
|
3 |
+
# Originally Written by Ze Liu, Modified by Jingyun Liang.
|
4 |
+
# -----------------------------------------------------------------------------------
|
5 |
+
|
6 |
+
import math
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
import torch.nn.functional as F
|
10 |
+
import torch.utils.checkpoint as checkpoint
|
11 |
+
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
|
12 |
+
|
13 |
+
|
14 |
+
class Mlp(nn.Module):
|
15 |
+
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
16 |
+
super().__init__()
|
17 |
+
out_features = out_features or in_features
|
18 |
+
hidden_features = hidden_features or in_features
|
19 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
20 |
+
self.act = act_layer()
|
21 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
22 |
+
self.drop = nn.Dropout(drop)
|
23 |
+
|
24 |
+
def forward(self, x):
|
25 |
+
x = self.fc1(x)
|
26 |
+
x = self.act(x)
|
27 |
+
x = self.drop(x)
|
28 |
+
x = self.fc2(x)
|
29 |
+
x = self.drop(x)
|
30 |
+
return x
|
31 |
+
|
32 |
+
|
33 |
+
def window_partition(x, window_size):
|
34 |
+
"""
|
35 |
+
Args:
|
36 |
+
x: (B, H, W, C)
|
37 |
+
window_size (int): window size
|
38 |
+
|
39 |
+
Returns:
|
40 |
+
windows: (num_windows*B, window_size, window_size, C)
|
41 |
+
"""
|
42 |
+
B, H, W, C = x.shape
|
43 |
+
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
|
44 |
+
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
45 |
+
return windows
|
46 |
+
|
47 |
+
|
48 |
+
def window_reverse(windows, window_size, H, W):
|
49 |
+
"""
|
50 |
+
Args:
|
51 |
+
windows: (num_windows*B, window_size, window_size, C)
|
52 |
+
window_size (int): Window size
|
53 |
+
H (int): Height of image
|
54 |
+
W (int): Width of image
|
55 |
+
|
56 |
+
Returns:
|
57 |
+
x: (B, H, W, C)
|
58 |
+
"""
|
59 |
+
B = int(windows.shape[0] / (H * W / window_size / window_size))
|
60 |
+
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
|
61 |
+
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
|
62 |
+
return x
|
63 |
+
|
64 |
+
|
65 |
+
class WindowAttention(nn.Module):
|
66 |
+
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
|
67 |
+
It supports both of shifted and non-shifted window.
|
68 |
+
|
69 |
+
Args:
|
70 |
+
dim (int): Number of input channels.
|
71 |
+
window_size (tuple[int]): The height and width of the window.
|
72 |
+
num_heads (int): Number of attention heads.
|
73 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
74 |
+
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
|
75 |
+
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
|
76 |
+
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
|
77 |
+
"""
|
78 |
+
|
79 |
+
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
|
80 |
+
|
81 |
+
super().__init__()
|
82 |
+
self.dim = dim
|
83 |
+
self.window_size = window_size # Wh, Ww
|
84 |
+
self.num_heads = num_heads
|
85 |
+
head_dim = dim // num_heads
|
86 |
+
self.scale = qk_scale or head_dim ** -0.5
|
87 |
+
|
88 |
+
# define a parameter table of relative position bias
|
89 |
+
self.relative_position_bias_table = nn.Parameter(
|
90 |
+
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
91 |
+
|
92 |
+
# get pair-wise relative position index for each token inside the window
|
93 |
+
coords_h = torch.arange(self.window_size[0])
|
94 |
+
coords_w = torch.arange(self.window_size[1])
|
95 |
+
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
96 |
+
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
97 |
+
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
98 |
+
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
99 |
+
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
|
100 |
+
relative_coords[:, :, 1] += self.window_size[1] - 1
|
101 |
+
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
|
102 |
+
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
103 |
+
self.register_buffer("relative_position_index", relative_position_index)
|
104 |
+
|
105 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
106 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
107 |
+
self.proj = nn.Linear(dim, dim)
|
108 |
+
|
109 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
110 |
+
|
111 |
+
trunc_normal_(self.relative_position_bias_table, std=.02)
|
112 |
+
self.softmax = nn.Softmax(dim=-1)
|
113 |
+
|
114 |
+
def forward(self, x, mask=None):
|
115 |
+
"""
|
116 |
+
Args:
|
117 |
+
x: input features with shape of (num_windows*B, N, C)
|
118 |
+
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
|
119 |
+
"""
|
120 |
+
B_, N, C = x.shape
|
121 |
+
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
122 |
+
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
123 |
+
|
124 |
+
q = q * self.scale
|
125 |
+
attn = (q @ k.transpose(-2, -1))
|
126 |
+
|
127 |
+
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
128 |
+
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
|
129 |
+
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
130 |
+
attn = attn + relative_position_bias.unsqueeze(0)
|
131 |
+
|
132 |
+
if mask is not None:
|
133 |
+
nW = mask.shape[0]
|
134 |
+
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
|
135 |
+
attn = attn.view(-1, self.num_heads, N, N)
|
136 |
+
attn = self.softmax(attn)
|
137 |
+
else:
|
138 |
+
attn = self.softmax(attn)
|
139 |
+
|
140 |
+
attn = self.attn_drop(attn)
|
141 |
+
|
142 |
+
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
|
143 |
+
x = self.proj(x)
|
144 |
+
x = self.proj_drop(x)
|
145 |
+
return x
|
146 |
+
|
147 |
+
def extra_repr(self) -> str:
|
148 |
+
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
|
149 |
+
|
150 |
+
def flops(self, N):
|
151 |
+
# calculate flops for 1 window with token length of N
|
152 |
+
flops = 0
|
153 |
+
# qkv = self.qkv(x)
|
154 |
+
flops += N * self.dim * 3 * self.dim
|
155 |
+
# attn = (q @ k.transpose(-2, -1))
|
156 |
+
flops += self.num_heads * N * (self.dim // self.num_heads) * N
|
157 |
+
# x = (attn @ v)
|
158 |
+
flops += self.num_heads * N * N * (self.dim // self.num_heads)
|
159 |
+
# x = self.proj(x)
|
160 |
+
flops += N * self.dim * self.dim
|
161 |
+
return flops
|
162 |
+
|
163 |
+
|
164 |
+
class SwinTransformerBlock(nn.Module):
|
165 |
+
r""" Swin Transformer Block.
|
166 |
+
|
167 |
+
Args:
|
168 |
+
dim (int): Number of input channels.
|
169 |
+
input_resolution (tuple[int]): Input resulotion.
|
170 |
+
num_heads (int): Number of attention heads.
|
171 |
+
window_size (int): Window size.
|
172 |
+
shift_size (int): Shift size for SW-MSA.
|
173 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
174 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
175 |
+
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
176 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
177 |
+
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
178 |
+
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
179 |
+
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
|
180 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
181 |
+
"""
|
182 |
+
|
183 |
+
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
|
184 |
+
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
|
185 |
+
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
|
186 |
+
super().__init__()
|
187 |
+
self.dim = dim
|
188 |
+
self.input_resolution = input_resolution
|
189 |
+
self.num_heads = num_heads
|
190 |
+
self.window_size = window_size
|
191 |
+
self.shift_size = shift_size
|
192 |
+
self.mlp_ratio = mlp_ratio
|
193 |
+
if min(self.input_resolution) <= self.window_size:
|
194 |
+
# if window size is larger than input resolution, we don't partition windows
|
195 |
+
self.shift_size = 0
|
196 |
+
self.window_size = min(self.input_resolution)
|
197 |
+
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
|
198 |
+
|
199 |
+
self.norm1 = norm_layer(dim)
|
200 |
+
self.attn = WindowAttention(
|
201 |
+
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
|
202 |
+
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
|
203 |
+
|
204 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
205 |
+
self.norm2 = norm_layer(dim)
|
206 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
207 |
+
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
208 |
+
|
209 |
+
if self.shift_size > 0:
|
210 |
+
attn_mask = self.calculate_mask(self.input_resolution)
|
211 |
+
else:
|
212 |
+
attn_mask = None
|
213 |
+
|
214 |
+
self.register_buffer("attn_mask", attn_mask)
|
215 |
+
|
216 |
+
def calculate_mask(self, x_size):
|
217 |
+
# calculate attention mask for SW-MSA
|
218 |
+
H, W = x_size
|
219 |
+
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
|
220 |
+
h_slices = (slice(0, -self.window_size),
|
221 |
+
slice(-self.window_size, -self.shift_size),
|
222 |
+
slice(-self.shift_size, None))
|
223 |
+
w_slices = (slice(0, -self.window_size),
|
224 |
+
slice(-self.window_size, -self.shift_size),
|
225 |
+
slice(-self.shift_size, None))
|
226 |
+
cnt = 0
|
227 |
+
for h in h_slices:
|
228 |
+
for w in w_slices:
|
229 |
+
img_mask[:, h, w, :] = cnt
|
230 |
+
cnt += 1
|
231 |
+
|
232 |
+
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
|
233 |
+
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
|
234 |
+
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
|
235 |
+
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
|
236 |
+
|
237 |
+
return attn_mask
|
238 |
+
|
239 |
+
def forward(self, x, x_size):
|
240 |
+
H, W = x_size
|
241 |
+
B, L, C = x.shape
|
242 |
+
# assert L == H * W, "input feature has wrong size"
|
243 |
+
|
244 |
+
shortcut = x
|
245 |
+
x = self.norm1(x)
|
246 |
+
x = x.view(B, H, W, C)
|
247 |
+
|
248 |
+
# cyclic shift
|
249 |
+
if self.shift_size > 0:
|
250 |
+
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
|
251 |
+
else:
|
252 |
+
shifted_x = x
|
253 |
+
|
254 |
+
# partition windows
|
255 |
+
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
|
256 |
+
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
|
257 |
+
|
258 |
+
# W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
|
259 |
+
if self.input_resolution == x_size:
|
260 |
+
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
|
261 |
+
else:
|
262 |
+
attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
|
263 |
+
|
264 |
+
# merge windows
|
265 |
+
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
|
266 |
+
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
|
267 |
+
|
268 |
+
# reverse cyclic shift
|
269 |
+
if self.shift_size > 0:
|
270 |
+
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
|
271 |
+
else:
|
272 |
+
x = shifted_x
|
273 |
+
x = x.view(B, H * W, C)
|
274 |
+
|
275 |
+
# FFN
|
276 |
+
x = shortcut + self.drop_path(x)
|
277 |
+
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
278 |
+
|
279 |
+
return x
|
280 |
+
|
281 |
+
def extra_repr(self) -> str:
|
282 |
+
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
|
283 |
+
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
|
284 |
+
|
285 |
+
def flops(self):
|
286 |
+
flops = 0
|
287 |
+
H, W = self.input_resolution
|
288 |
+
# norm1
|
289 |
+
flops += self.dim * H * W
|
290 |
+
# W-MSA/SW-MSA
|
291 |
+
nW = H * W / self.window_size / self.window_size
|
292 |
+
flops += nW * self.attn.flops(self.window_size * self.window_size)
|
293 |
+
# mlp
|
294 |
+
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
|
295 |
+
# norm2
|
296 |
+
flops += self.dim * H * W
|
297 |
+
return flops
|
298 |
+
|
299 |
+
|
300 |
+
class PatchMerging(nn.Module):
|
301 |
+
r""" Patch Merging Layer.
|
302 |
+
|
303 |
+
Args:
|
304 |
+
input_resolution (tuple[int]): Resolution of input feature.
|
305 |
+
dim (int): Number of input channels.
|
306 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
307 |
+
"""
|
308 |
+
|
309 |
+
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
|
310 |
+
super().__init__()
|
311 |
+
self.input_resolution = input_resolution
|
312 |
+
self.dim = dim
|
313 |
+
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
|
314 |
+
self.norm = norm_layer(4 * dim)
|
315 |
+
|
316 |
+
def forward(self, x):
|
317 |
+
"""
|
318 |
+
x: B, H*W, C
|
319 |
+
"""
|
320 |
+
H, W = self.input_resolution
|
321 |
+
B, L, C = x.shape
|
322 |
+
assert L == H * W, "input feature has wrong size"
|
323 |
+
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
|
324 |
+
|
325 |
+
x = x.view(B, H, W, C)
|
326 |
+
|
327 |
+
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
|
328 |
+
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
|
329 |
+
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
|
330 |
+
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
|
331 |
+
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
|
332 |
+
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
|
333 |
+
|
334 |
+
x = self.norm(x)
|
335 |
+
x = self.reduction(x)
|
336 |
+
|
337 |
+
return x
|
338 |
+
|
339 |
+
def extra_repr(self) -> str:
|
340 |
+
return f"input_resolution={self.input_resolution}, dim={self.dim}"
|
341 |
+
|
342 |
+
def flops(self):
|
343 |
+
H, W = self.input_resolution
|
344 |
+
flops = H * W * self.dim
|
345 |
+
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
|
346 |
+
return flops
|
347 |
+
|
348 |
+
|
349 |
+
class BasicLayer(nn.Module):
|
350 |
+
""" A basic Swin Transformer layer for one stage.
|
351 |
+
|
352 |
+
Args:
|
353 |
+
dim (int): Number of input channels.
|
354 |
+
input_resolution (tuple[int]): Input resolution.
|
355 |
+
depth (int): Number of blocks.
|
356 |
+
num_heads (int): Number of attention heads.
|
357 |
+
window_size (int): Local window size.
|
358 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
359 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
360 |
+
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
361 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
362 |
+
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
363 |
+
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
364 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
365 |
+
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
366 |
+
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
367 |
+
"""
|
368 |
+
|
369 |
+
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
|
370 |
+
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
|
371 |
+
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
|
372 |
+
|
373 |
+
super().__init__()
|
374 |
+
self.dim = dim
|
375 |
+
self.input_resolution = input_resolution
|
376 |
+
self.depth = depth
|
377 |
+
self.use_checkpoint = use_checkpoint
|
378 |
+
|
379 |
+
# build blocks
|
380 |
+
self.blocks = nn.ModuleList([
|
381 |
+
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
|
382 |
+
num_heads=num_heads, window_size=window_size,
|
383 |
+
shift_size=0 if (i % 2 == 0) else window_size // 2,
|
384 |
+
mlp_ratio=mlp_ratio,
|
385 |
+
qkv_bias=qkv_bias, qk_scale=qk_scale,
|
386 |
+
drop=drop, attn_drop=attn_drop,
|
387 |
+
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
|
388 |
+
norm_layer=norm_layer)
|
389 |
+
for i in range(depth)])
|
390 |
+
|
391 |
+
# patch merging layer
|
392 |
+
if downsample is not None:
|
393 |
+
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
|
394 |
+
else:
|
395 |
+
self.downsample = None
|
396 |
+
|
397 |
+
def forward(self, x, x_size):
|
398 |
+
for blk in self.blocks:
|
399 |
+
if self.use_checkpoint:
|
400 |
+
x = checkpoint.checkpoint(blk, x, x_size)
|
401 |
+
else:
|
402 |
+
x = blk(x, x_size)
|
403 |
+
if self.downsample is not None:
|
404 |
+
x = self.downsample(x)
|
405 |
+
return x
|
406 |
+
|
407 |
+
def extra_repr(self) -> str:
|
408 |
+
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
|
409 |
+
|
410 |
+
def flops(self):
|
411 |
+
flops = 0
|
412 |
+
for blk in self.blocks:
|
413 |
+
flops += blk.flops()
|
414 |
+
if self.downsample is not None:
|
415 |
+
flops += self.downsample.flops()
|
416 |
+
return flops
|
417 |
+
|
418 |
+
|
419 |
+
class RSTB(nn.Module):
|
420 |
+
"""Residual Swin Transformer Block (RSTB).
|
421 |
+
|
422 |
+
Args:
|
423 |
+
dim (int): Number of input channels.
|
424 |
+
input_resolution (tuple[int]): Input resolution.
|
425 |
+
depth (int): Number of blocks.
|
426 |
+
num_heads (int): Number of attention heads.
|
427 |
+
window_size (int): Local window size.
|
428 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
429 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
430 |
+
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
431 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
432 |
+
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
433 |
+
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
434 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
435 |
+
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
436 |
+
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
437 |
+
img_size: Input image size.
|
438 |
+
patch_size: Patch size.
|
439 |
+
resi_connection: The convolutional block before residual connection.
|
440 |
+
"""
|
441 |
+
|
442 |
+
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
|
443 |
+
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
|
444 |
+
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
|
445 |
+
img_size=224, patch_size=4, resi_connection='1conv'):
|
446 |
+
super(RSTB, self).__init__()
|
447 |
+
|
448 |
+
self.dim = dim
|
449 |
+
self.input_resolution = input_resolution
|
450 |
+
|
451 |
+
self.residual_group = BasicLayer(dim=dim,
|
452 |
+
input_resolution=input_resolution,
|
453 |
+
depth=depth,
|
454 |
+
num_heads=num_heads,
|
455 |
+
window_size=window_size,
|
456 |
+
mlp_ratio=mlp_ratio,
|
457 |
+
qkv_bias=qkv_bias, qk_scale=qk_scale,
|
458 |
+
drop=drop, attn_drop=attn_drop,
|
459 |
+
drop_path=drop_path,
|
460 |
+
norm_layer=norm_layer,
|
461 |
+
downsample=downsample,
|
462 |
+
use_checkpoint=use_checkpoint)
|
463 |
+
|
464 |
+
if resi_connection == '1conv':
|
465 |
+
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
|
466 |
+
elif resi_connection == '3conv':
|
467 |
+
# to save parameters and memory
|
468 |
+
self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
469 |
+
nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
|
470 |
+
nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
471 |
+
nn.Conv2d(dim // 4, dim, 3, 1, 1))
|
472 |
+
|
473 |
+
self.patch_embed = PatchEmbed(
|
474 |
+
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
|
475 |
+
norm_layer=None)
|
476 |
+
|
477 |
+
self.patch_unembed = PatchUnEmbed(
|
478 |
+
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
|
479 |
+
norm_layer=None)
|
480 |
+
|
481 |
+
def forward(self, x, x_size):
|
482 |
+
return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
|
483 |
+
|
484 |
+
def flops(self):
|
485 |
+
flops = 0
|
486 |
+
flops += self.residual_group.flops()
|
487 |
+
H, W = self.input_resolution
|
488 |
+
flops += H * W * self.dim * self.dim * 9
|
489 |
+
flops += self.patch_embed.flops()
|
490 |
+
flops += self.patch_unembed.flops()
|
491 |
+
|
492 |
+
return flops
|
493 |
+
|
494 |
+
|
495 |
+
class PatchEmbed(nn.Module):
|
496 |
+
r""" Image to Patch Embedding
|
497 |
+
|
498 |
+
Args:
|
499 |
+
img_size (int): Image size. Default: 224.
|
500 |
+
patch_size (int): Patch token size. Default: 4.
|
501 |
+
in_chans (int): Number of input image channels. Default: 3.
|
502 |
+
embed_dim (int): Number of linear projection output channels. Default: 96.
|
503 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: None
|
504 |
+
"""
|
505 |
+
|
506 |
+
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
|
507 |
+
super().__init__()
|
508 |
+
img_size = to_2tuple(img_size)
|
509 |
+
patch_size = to_2tuple(patch_size)
|
510 |
+
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
|
511 |
+
self.img_size = img_size
|
512 |
+
self.patch_size = patch_size
|
513 |
+
self.patches_resolution = patches_resolution
|
514 |
+
self.num_patches = patches_resolution[0] * patches_resolution[1]
|
515 |
+
|
516 |
+
self.in_chans = in_chans
|
517 |
+
self.embed_dim = embed_dim
|
518 |
+
|
519 |
+
if norm_layer is not None:
|
520 |
+
self.norm = norm_layer(embed_dim)
|
521 |
+
else:
|
522 |
+
self.norm = None
|
523 |
+
|
524 |
+
def forward(self, x):
|
525 |
+
x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
|
526 |
+
if self.norm is not None:
|
527 |
+
x = self.norm(x)
|
528 |
+
return x
|
529 |
+
|
530 |
+
def flops(self):
|
531 |
+
flops = 0
|
532 |
+
H, W = self.img_size
|
533 |
+
if self.norm is not None:
|
534 |
+
flops += H * W * self.embed_dim
|
535 |
+
return flops
|
536 |
+
|
537 |
+
|
538 |
+
class PatchUnEmbed(nn.Module):
|
539 |
+
r""" Image to Patch Unembedding
|
540 |
+
|
541 |
+
Args:
|
542 |
+
img_size (int): Image size. Default: 224.
|
543 |
+
patch_size (int): Patch token size. Default: 4.
|
544 |
+
in_chans (int): Number of input image channels. Default: 3.
|
545 |
+
embed_dim (int): Number of linear projection output channels. Default: 96.
|
546 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: None
|
547 |
+
"""
|
548 |
+
|
549 |
+
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
|
550 |
+
super().__init__()
|
551 |
+
img_size = to_2tuple(img_size)
|
552 |
+
patch_size = to_2tuple(patch_size)
|
553 |
+
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
|
554 |
+
self.img_size = img_size
|
555 |
+
self.patch_size = patch_size
|
556 |
+
self.patches_resolution = patches_resolution
|
557 |
+
self.num_patches = patches_resolution[0] * patches_resolution[1]
|
558 |
+
|
559 |
+
self.in_chans = in_chans
|
560 |
+
self.embed_dim = embed_dim
|
561 |
+
|
562 |
+
def forward(self, x, x_size):
|
563 |
+
B, HW, C = x.shape
|
564 |
+
x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
|
565 |
+
return x
|
566 |
+
|
567 |
+
def flops(self):
|
568 |
+
flops = 0
|
569 |
+
return flops
|
570 |
+
|
571 |
+
|
572 |
+
class Upsample(nn.Sequential):
|
573 |
+
"""Upsample module.
|
574 |
+
|
575 |
+
Args:
|
576 |
+
scale (int): Scale factor. Supported scales: 2^n and 3.
|
577 |
+
num_feat (int): Channel number of intermediate features.
|
578 |
+
"""
|
579 |
+
|
580 |
+
def __init__(self, scale, num_feat):
|
581 |
+
m = []
|
582 |
+
if (scale & (scale - 1)) == 0: # scale = 2^n
|
583 |
+
for _ in range(int(math.log(scale, 2))):
|
584 |
+
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
|
585 |
+
m.append(nn.PixelShuffle(2))
|
586 |
+
elif scale == 3:
|
587 |
+
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
|
588 |
+
m.append(nn.PixelShuffle(3))
|
589 |
+
else:
|
590 |
+
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
|
591 |
+
super(Upsample, self).__init__(*m)
|
592 |
+
|
593 |
+
|
594 |
+
class UpsampleOneStep(nn.Sequential):
|
595 |
+
"""UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
|
596 |
+
Used in lightweight SR to save parameters.
|
597 |
+
|
598 |
+
Args:
|
599 |
+
scale (int): Scale factor. Supported scales: 2^n and 3.
|
600 |
+
num_feat (int): Channel number of intermediate features.
|
601 |
+
|
602 |
+
"""
|
603 |
+
|
604 |
+
def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
|
605 |
+
self.num_feat = num_feat
|
606 |
+
self.input_resolution = input_resolution
|
607 |
+
m = []
|
608 |
+
m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
|
609 |
+
m.append(nn.PixelShuffle(scale))
|
610 |
+
super(UpsampleOneStep, self).__init__(*m)
|
611 |
+
|
612 |
+
def flops(self):
|
613 |
+
H, W = self.input_resolution
|
614 |
+
flops = H * W * self.num_feat * 3 * 9
|
615 |
+
return flops
|
616 |
+
|
617 |
+
|
618 |
+
class SwinIR(nn.Module):
|
619 |
+
r""" SwinIR
|
620 |
+
A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.
|
621 |
+
|
622 |
+
Args:
|
623 |
+
img_size (int | tuple(int)): Input image size. Default 64
|
624 |
+
patch_size (int | tuple(int)): Patch size. Default: 1
|
625 |
+
in_chans (int): Number of input image channels. Default: 3
|
626 |
+
embed_dim (int): Patch embedding dimension. Default: 96
|
627 |
+
depths (tuple(int)): Depth of each Swin Transformer layer.
|
628 |
+
num_heads (tuple(int)): Number of attention heads in different layers.
|
629 |
+
window_size (int): Window size. Default: 7
|
630 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
|
631 |
+
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
|
632 |
+
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
|
633 |
+
drop_rate (float): Dropout rate. Default: 0
|
634 |
+
attn_drop_rate (float): Attention dropout rate. Default: 0
|
635 |
+
drop_path_rate (float): Stochastic depth rate. Default: 0.1
|
636 |
+
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
|
637 |
+
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
|
638 |
+
patch_norm (bool): If True, add normalization after patch embedding. Default: True
|
639 |
+
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
|
640 |
+
upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
|
641 |
+
img_range: Image range. 1. or 255.
|
642 |
+
upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
|
643 |
+
resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
|
644 |
+
"""
|
645 |
+
|
646 |
+
def __init__(self, img_size=64, patch_size=1, in_chans=3,
|
647 |
+
embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6],
|
648 |
+
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
|
649 |
+
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
|
650 |
+
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
|
651 |
+
use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv',
|
652 |
+
**kwargs):
|
653 |
+
super(SwinIR, self).__init__()
|
654 |
+
num_in_ch = in_chans
|
655 |
+
num_out_ch = in_chans
|
656 |
+
num_feat = 64
|
657 |
+
self.img_range = img_range
|
658 |
+
if in_chans == 3:
|
659 |
+
rgb_mean = (0.4488, 0.4371, 0.4040)
|
660 |
+
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
|
661 |
+
else:
|
662 |
+
self.mean = torch.zeros(1, 1, 1, 1)
|
663 |
+
self.upscale = upscale
|
664 |
+
self.upsampler = upsampler
|
665 |
+
self.window_size = window_size
|
666 |
+
|
667 |
+
#####################################################################################################
|
668 |
+
################################### 1, shallow feature extraction ###################################
|
669 |
+
self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
|
670 |
+
|
671 |
+
#####################################################################################################
|
672 |
+
################################### 2, deep feature extraction ######################################
|
673 |
+
self.num_layers = len(depths)
|
674 |
+
self.embed_dim = embed_dim
|
675 |
+
self.ape = ape
|
676 |
+
self.patch_norm = patch_norm
|
677 |
+
self.num_features = embed_dim
|
678 |
+
self.mlp_ratio = mlp_ratio
|
679 |
+
|
680 |
+
# split image into non-overlapping patches
|
681 |
+
self.patch_embed = PatchEmbed(
|
682 |
+
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
|
683 |
+
norm_layer=norm_layer if self.patch_norm else None)
|
684 |
+
num_patches = self.patch_embed.num_patches
|
685 |
+
patches_resolution = self.patch_embed.patches_resolution
|
686 |
+
self.patches_resolution = patches_resolution
|
687 |
+
|
688 |
+
# merge non-overlapping patches into image
|
689 |
+
self.patch_unembed = PatchUnEmbed(
|
690 |
+
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
|
691 |
+
norm_layer=norm_layer if self.patch_norm else None)
|
692 |
+
|
693 |
+
# absolute position embedding
|
694 |
+
if self.ape:
|
695 |
+
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
|
696 |
+
trunc_normal_(self.absolute_pos_embed, std=.02)
|
697 |
+
|
698 |
+
self.pos_drop = nn.Dropout(p=drop_rate)
|
699 |
+
|
700 |
+
# stochastic depth
|
701 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
|
702 |
+
|
703 |
+
# build Residual Swin Transformer blocks (RSTB)
|
704 |
+
self.layers = nn.ModuleList()
|
705 |
+
for i_layer in range(self.num_layers):
|
706 |
+
layer = RSTB(dim=embed_dim,
|
707 |
+
input_resolution=(patches_resolution[0],
|
708 |
+
patches_resolution[1]),
|
709 |
+
depth=depths[i_layer],
|
710 |
+
num_heads=num_heads[i_layer],
|
711 |
+
window_size=window_size,
|
712 |
+
mlp_ratio=self.mlp_ratio,
|
713 |
+
qkv_bias=qkv_bias, qk_scale=qk_scale,
|
714 |
+
drop=drop_rate, attn_drop=attn_drop_rate,
|
715 |
+
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
|
716 |
+
norm_layer=norm_layer,
|
717 |
+
downsample=None,
|
718 |
+
use_checkpoint=use_checkpoint,
|
719 |
+
img_size=img_size,
|
720 |
+
patch_size=patch_size,
|
721 |
+
resi_connection=resi_connection
|
722 |
+
|
723 |
+
)
|
724 |
+
self.layers.append(layer)
|
725 |
+
self.norm = norm_layer(self.num_features)
|
726 |
+
|
727 |
+
# build the last conv layer in deep feature extraction
|
728 |
+
if resi_connection == '1conv':
|
729 |
+
self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
|
730 |
+
elif resi_connection == '3conv':
|
731 |
+
# to save parameters and memory
|
732 |
+
self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
|
733 |
+
nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
734 |
+
nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
|
735 |
+
nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
736 |
+
nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
|
737 |
+
|
738 |
+
#####################################################################################################
|
739 |
+
################################ 3, high quality image reconstruction ################################
|
740 |
+
if self.upsampler == 'pixelshuffle':
|
741 |
+
# for classical SR
|
742 |
+
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
|
743 |
+
nn.LeakyReLU(inplace=True))
|
744 |
+
self.upsample = Upsample(upscale, num_feat)
|
745 |
+
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
746 |
+
elif self.upsampler == 'pixelshuffledirect':
|
747 |
+
# for lightweight SR (to save parameters)
|
748 |
+
self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
|
749 |
+
(patches_resolution[0], patches_resolution[1]))
|
750 |
+
elif self.upsampler == 'nearest+conv':
|
751 |
+
# for real-world SR (less artifacts)
|
752 |
+
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
|
753 |
+
nn.LeakyReLU(inplace=True))
|
754 |
+
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
755 |
+
if self.upscale == 4:
|
756 |
+
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
757 |
+
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
758 |
+
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
759 |
+
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
760 |
+
else:
|
761 |
+
# for image denoising and JPEG compression artifact reduction
|
762 |
+
self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
|
763 |
+
|
764 |
+
self.apply(self._init_weights)
|
765 |
+
|
766 |
+
def _init_weights(self, m):
|
767 |
+
if isinstance(m, nn.Linear):
|
768 |
+
trunc_normal_(m.weight, std=.02)
|
769 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
770 |
+
nn.init.constant_(m.bias, 0)
|
771 |
+
elif isinstance(m, nn.LayerNorm):
|
772 |
+
nn.init.constant_(m.bias, 0)
|
773 |
+
nn.init.constant_(m.weight, 1.0)
|
774 |
+
|
775 |
+
@torch.jit.ignore
|
776 |
+
def no_weight_decay(self):
|
777 |
+
return {'absolute_pos_embed'}
|
778 |
+
|
779 |
+
@torch.jit.ignore
|
780 |
+
def no_weight_decay_keywords(self):
|
781 |
+
return {'relative_position_bias_table'}
|
782 |
+
|
783 |
+
def check_image_size(self, x):
|
784 |
+
_, _, h, w = x.size()
|
785 |
+
mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
|
786 |
+
mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
|
787 |
+
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
|
788 |
+
return x
|
789 |
+
|
790 |
+
def forward_features(self, x):
|
791 |
+
x_size = (x.shape[2], x.shape[3])
|
792 |
+
x = self.patch_embed(x)
|
793 |
+
if self.ape:
|
794 |
+
x = x + self.absolute_pos_embed
|
795 |
+
x = self.pos_drop(x)
|
796 |
+
|
797 |
+
for layer in self.layers:
|
798 |
+
x = layer(x, x_size)
|
799 |
+
|
800 |
+
x = self.norm(x) # B L C
|
801 |
+
x = self.patch_unembed(x, x_size)
|
802 |
+
|
803 |
+
return x
|
804 |
+
|
805 |
+
def forward(self, x):
|
806 |
+
H, W = x.shape[2:]
|
807 |
+
x = self.check_image_size(x)
|
808 |
+
|
809 |
+
self.mean = self.mean.type_as(x)
|
810 |
+
x = (x - self.mean) * self.img_range
|
811 |
+
|
812 |
+
if self.upsampler == 'pixelshuffle':
|
813 |
+
# for classical SR
|
814 |
+
x = self.conv_first(x)
|
815 |
+
x = self.conv_after_body(self.forward_features(x)) + x
|
816 |
+
x = self.conv_before_upsample(x)
|
817 |
+
x = self.conv_last(self.upsample(x))
|
818 |
+
elif self.upsampler == 'pixelshuffledirect':
|
819 |
+
# for lightweight SR
|
820 |
+
x = self.conv_first(x)
|
821 |
+
x = self.conv_after_body(self.forward_features(x)) + x
|
822 |
+
x = self.upsample(x)
|
823 |
+
elif self.upsampler == 'nearest+conv':
|
824 |
+
# for real-world SR
|
825 |
+
x = self.conv_first(x)
|
826 |
+
x = self.conv_after_body(self.forward_features(x)) + x
|
827 |
+
x = self.conv_before_upsample(x)
|
828 |
+
x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
|
829 |
+
if self.upscale == 4:
|
830 |
+
x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
|
831 |
+
x = self.conv_last(self.lrelu(self.conv_hr(x)))
|
832 |
+
else:
|
833 |
+
# for image denoising and JPEG compression artifact reduction
|
834 |
+
x_first = self.conv_first(x)
|
835 |
+
res = self.conv_after_body(self.forward_features(x_first)) + x_first
|
836 |
+
x = x + self.conv_last(res)
|
837 |
+
|
838 |
+
x = x / self.img_range + self.mean
|
839 |
+
|
840 |
+
return x[:, :, :H * self.upscale, :W * self.upscale]
|
841 |
+
|
842 |
+
def flops(self):
|
843 |
+
flops = 0
|
844 |
+
H, W = self.patches_resolution
|
845 |
+
flops += H * W * 3 * self.embed_dim * 9
|
846 |
+
flops += self.patch_embed.flops()
|
847 |
+
for i, layer in enumerate(self.layers):
|
848 |
+
flops += layer.flops()
|
849 |
+
flops += H * W * 3 * self.embed_dim * self.embed_dim
|
850 |
+
flops += self.upsample.flops()
|
851 |
+
return flops
|
852 |
+
|
853 |
+
|
854 |
+
if __name__ == '__main__':
|
855 |
+
upscale = 4
|
856 |
+
window_size = 8
|
857 |
+
height = (1024 // upscale // window_size + 1) * window_size
|
858 |
+
width = (720 // upscale // window_size + 1) * window_size
|
859 |
+
model = SwinIR(upscale=2, img_size=(height, width),
|
860 |
+
window_size=window_size, img_range=1., depths=[6, 6, 6, 6],
|
861 |
+
embed_dim=60, num_heads=[6, 6, 6, 6], mlp_ratio=2, upsampler='pixelshuffledirect')
|
862 |
+
print(model)
|
863 |
+
print(height, width, model.flops() / 1e9)
|
864 |
+
|
865 |
+
x = torch.randn((1, 3, height, width))
|
866 |
+
x = model(x)
|
867 |
+
print(x.shape)
|
SwinIR/weight/003_realSR_BSRGAN_DFO_s64w8_SwinIR-M_x4_GAN.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b9afb61e65e04eb7f8aba5095d070bbe9af28df76acd0c9405aeb33b814bcfc6
|
3 |
+
size 67129861
|
app.py
CHANGED
@@ -1,109 +1,40 @@
|
|
1 |
-
import
|
2 |
-
from collections import OrderedDict
|
3 |
-
from pathlib import Path
|
4 |
-
|
5 |
import gradio as gr
|
6 |
import os
|
7 |
|
8 |
-
|
9 |
-
import
|
10 |
-
from PIL import Image
|
11 |
-
from torchvision import transforms
|
12 |
-
|
13 |
-
from sam_diffsr.utils_sr.hparams import set_hparams, hparams
|
14 |
-
from sam_diffsr.utils_sr.matlab_resize import imresize
|
15 |
-
|
16 |
-
|
17 |
-
def get_img_data(img_PIL, hparams, sr_scale=4):
|
18 |
-
img_lr = img_PIL.convert('RGB')
|
19 |
-
img_lr = np.uint8(np.asarray(img_lr))
|
20 |
-
|
21 |
-
h, w, c = img_lr.shape
|
22 |
-
h, w = h * sr_scale, w * sr_scale
|
23 |
-
h = h - h % (sr_scale * 2)
|
24 |
-
w = w - w % (sr_scale * 2)
|
25 |
-
h_l = h // sr_scale
|
26 |
-
w_l = w // sr_scale
|
27 |
-
|
28 |
-
img_lr = img_lr[:h_l, :w_l]
|
29 |
-
|
30 |
-
to_tensor_norm = transforms.Compose([
|
31 |
-
transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
|
32 |
-
])
|
33 |
-
|
34 |
-
img_lr_up = imresize(img_lr / 256, hparams['sr_scale']) # np.float [H, W, C]
|
35 |
-
img_lr, img_lr_up = [to_tensor_norm(x).float() for x in [img_lr, img_lr_up]]
|
36 |
-
|
37 |
-
img_lr = torch.unsqueeze(img_lr, dim=0)
|
38 |
-
img_lr_up = torch.unsqueeze(img_lr_up, dim=0)
|
39 |
-
|
40 |
-
return img_lr, img_lr_up
|
41 |
-
|
42 |
-
|
43 |
-
def load_checkpoint(model, ckpt_path):
|
44 |
-
checkpoint = torch.load(ckpt_path, map_location='cpu')
|
45 |
-
print(f'loding check from: {ckpt_path}')
|
46 |
-
stat_dict = checkpoint['state_dict']['model']
|
47 |
-
|
48 |
-
new_state_dict = OrderedDict()
|
49 |
-
for k, v in stat_dict.items():
|
50 |
-
if k[:7] == 'module.':
|
51 |
-
k = k[7:] # ε»ζ `module.`
|
52 |
-
new_state_dict[k] = v
|
53 |
-
|
54 |
-
model.load_state_dict(new_state_dict)
|
55 |
-
model.cuda()
|
56 |
-
del checkpoint
|
57 |
-
torch.cuda.empty_cache()
|
58 |
-
|
59 |
-
|
60 |
-
def model_init(ckpt_path):
|
61 |
-
set_hparams()
|
62 |
-
|
63 |
-
from sam_diffsr.tasks.srdiff_df2k_sam import SRDiffDf2k_sam as trainer
|
64 |
-
|
65 |
-
trainer = trainer()
|
66 |
-
|
67 |
-
trainer.build_model()
|
68 |
-
load_checkpoint(trainer.model, ckpt_path)
|
69 |
-
|
70 |
-
torch.backends.cudnn.benchmark = False
|
71 |
-
|
72 |
-
return trainer
|
73 |
-
|
74 |
-
|
75 |
-
def image_infer(img_PIL):
|
76 |
-
with torch.no_grad():
|
77 |
-
trainer.model.eval()
|
78 |
-
img_lr, img_lr_up = get_img_data(img_PIL, hparams, sr_scale=4)
|
79 |
-
|
80 |
-
img_lr = img_lr.to('cuda')
|
81 |
-
img_lr_up = img_lr_up.to('cuda')
|
82 |
-
|
83 |
-
img_sr, _ = trainer.model.sample(img_lr, img_lr_up, img_lr_up.shape)
|
84 |
-
|
85 |
-
img_sr = img_sr.clamp(-1, 1)
|
86 |
-
img_sr = trainer.tensor2img(img_sr)[0]
|
87 |
-
img_sr = Image.fromarray(img_sr)
|
88 |
-
|
89 |
-
return img_sr
|
90 |
-
|
91 |
|
92 |
-
root_path = os.path.dirname(__file__)
|
93 |
|
94 |
-
|
95 |
-
|
|
|
|
|
96 |
|
97 |
-
ckpt_path = os.path.join(root_path, 'sam_diffsr/weight/model_ckpt_steps_400000.ckpt')
|
98 |
-
trainer = model_init(ckpt_path)
|
99 |
-
demo = gr.Interface(image_infer, gr.Image(type="pil", value=cheetah), "image",
|
100 |
-
# flagging_options=["blurry", "incorrect", "other"],
|
101 |
-
examples=[
|
102 |
-
os.path.join(root_path, "images/0801x4.png"),
|
103 |
-
os.path.join(root_path, "images/0804x4.png"),
|
104 |
-
os.path.join(root_path, "images/0809x4.png"),
|
105 |
-
]
|
106 |
-
)
|
107 |
|
108 |
if __name__ == "__main__":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
demo.launch()
|
|
|
1 |
+
import gradio
|
|
|
|
|
|
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
|
5 |
+
from SwinIR.infer import SwinIRDemo
|
6 |
+
from sam_diffsr.infer import sam_diffsr_demo
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
|
|
8 |
|
9 |
+
def image_infer(img_PIL, progress= gr.Progress(track_tqdm=True)):
|
10 |
+
sam_diffsr_img = sam_diffsr_infer.infer(img_PIL)
|
11 |
+
swin_ir_img = swin_ir_infer.infer(img_PIL)
|
12 |
+
return sam_diffsr_img, swin_ir_img
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
if __name__ == "__main__":
|
16 |
+
sam_diffsr_infer = sam_diffsr_demo()
|
17 |
+
swin_ir_infer = SwinIRDemo()
|
18 |
+
|
19 |
+
root_path = os.path.dirname(__file__)
|
20 |
+
cheetah = os.path.join(root_path, "images/04011.png")
|
21 |
+
|
22 |
+
demo = gr.Interface(image_infer, gr.Image(type="pil", value=cheetah),
|
23 |
+
[
|
24 |
+
gradio.Image(label='SAM-DiffSR', show_label=True),
|
25 |
+
gradio.Image(label='SwinIR', show_label=True)
|
26 |
+
],
|
27 |
+
# flagging_options=["blurry", "incorrect", "other"],
|
28 |
+
examples=[
|
29 |
+
os.path.join(root_path, "images/04011.png"),
|
30 |
+
os.path.join(root_path, "images/04033.png"),
|
31 |
+
os.path.join(root_path, "images/04064.png"),
|
32 |
+
os.path.join(root_path, "images/04146.png"),
|
33 |
+
# os.path.join(root_path, "images/10091.png"),
|
34 |
+
os.path.join(root_path, "images/0801x4.png"),
|
35 |
+
os.path.join(root_path, "images/0804x4.png"),
|
36 |
+
os.path.join(root_path, "images/0809x4.png"),
|
37 |
+
]
|
38 |
+
)
|
39 |
+
|
40 |
demo.launch()
|
images/04011.png
ADDED
images/04033.png
ADDED
images/04064.png
ADDED
images/04132.png
ADDED
images/04146.png
ADDED
images/10091.png
ADDED
sam_diffsr/cache/hub/checkpoints/alexnet-owt-7be5be79.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7be5be791159472b1fbf3c69796f7cb30dca7ad8466c2df70058c37116cdee02
|
3 |
+
size 244408911
|
sam_diffsr/infer.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from collections import OrderedDict
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
from PIL import Image
|
7 |
+
from torchvision.transforms import transforms
|
8 |
+
|
9 |
+
from sam_diffsr.utils_sr.hparams import set_hparams, hparams
|
10 |
+
from sam_diffsr.utils_sr.matlab_resize import imresize
|
11 |
+
from sam_diffsr.tasks.srdiff_df2k_sam import SRDiffDf2k_sam as trainer_ori
|
12 |
+
|
13 |
+
|
14 |
+
ROOT_PATH = os.path.dirname(__file__)
|
15 |
+
|
16 |
+
|
17 |
+
class sam_diffsr_demo:
|
18 |
+
def __init__(self):
|
19 |
+
set_hparams()
|
20 |
+
ckpt_path = os.path.join(ROOT_PATH, 'weight/model_ckpt_steps_400000.ckpt')
|
21 |
+
self.model_init(ckpt_path)
|
22 |
+
|
23 |
+
def get_img_data(self, img_PIL, hparams, sr_scale=4):
|
24 |
+
img_lr = img_PIL.convert('RGB')
|
25 |
+
img_lr = np.uint8(np.asarray(img_lr))
|
26 |
+
|
27 |
+
h, w, c = img_lr.shape
|
28 |
+
h, w = h * sr_scale, w * sr_scale
|
29 |
+
h = h - h % (sr_scale * 2)
|
30 |
+
w = w - w % (sr_scale * 2)
|
31 |
+
h_l = h // sr_scale
|
32 |
+
w_l = w // sr_scale
|
33 |
+
|
34 |
+
img_lr = img_lr[:h_l, :w_l]
|
35 |
+
|
36 |
+
to_tensor_norm = transforms.Compose([
|
37 |
+
transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
|
38 |
+
])
|
39 |
+
|
40 |
+
img_lr_up = imresize(img_lr / 256, hparams['sr_scale']) # np.float [H, W, C]
|
41 |
+
img_lr, img_lr_up = [to_tensor_norm(x).float() for x in [img_lr, img_lr_up]]
|
42 |
+
|
43 |
+
img_lr = torch.unsqueeze(img_lr, dim=0)
|
44 |
+
img_lr_up = torch.unsqueeze(img_lr_up, dim=0)
|
45 |
+
|
46 |
+
return img_lr, img_lr_up
|
47 |
+
|
48 |
+
def load_checkpoint(self, ckpt_path):
|
49 |
+
checkpoint = torch.load(ckpt_path, map_location='cpu')
|
50 |
+
print(f'loding check from: {ckpt_path}')
|
51 |
+
stat_dict = checkpoint['state_dict']['model']
|
52 |
+
|
53 |
+
new_state_dict = OrderedDict()
|
54 |
+
for k, v in stat_dict.items():
|
55 |
+
if k[:7] == 'module.':
|
56 |
+
k = k[7:] # ε»ζ `module.`
|
57 |
+
new_state_dict[k] = v
|
58 |
+
|
59 |
+
self.model.model.load_state_dict(new_state_dict)
|
60 |
+
self.model.model.cuda()
|
61 |
+
del checkpoint
|
62 |
+
torch.cuda.empty_cache()
|
63 |
+
|
64 |
+
def model_init(self, ckpt_path):
|
65 |
+
self.model = trainer_ori()
|
66 |
+
|
67 |
+
self.model.build_model()
|
68 |
+
self.load_checkpoint(ckpt_path)
|
69 |
+
|
70 |
+
torch.backends.cudnn.benchmark = False
|
71 |
+
|
72 |
+
def infer(self, img_PIL):
|
73 |
+
with torch.no_grad():
|
74 |
+
self.model.model.eval()
|
75 |
+
img_lr, img_lr_up = self.get_img_data(img_PIL, hparams, sr_scale=4)
|
76 |
+
|
77 |
+
img_lr = img_lr.to('cuda')
|
78 |
+
img_lr_up = img_lr_up.to('cuda')
|
79 |
+
|
80 |
+
img_sr, _ = self.model.model.sample(img_lr, img_lr_up, img_lr_up.shape)
|
81 |
+
|
82 |
+
img_sr = img_sr.clamp(-1, 1)
|
83 |
+
img_sr = self.model.tensor2img(img_sr)[0]
|
84 |
+
img_sr = Image.fromarray(img_sr)
|
85 |
+
|
86 |
+
return img_sr
|
sam_diffsr/models_sr/diffusion.py
CHANGED
@@ -7,7 +7,7 @@ from tqdm import tqdm
|
|
7 |
|
8 |
from sam_diffsr.utils_sr.plt_img import plt_tensor_img
|
9 |
from .module_util import default
|
10 |
-
from sam_diffsr.utils_sr.sr_utils import SSIM
|
11 |
from sam_diffsr.utils_sr.hparams import hparams
|
12 |
|
13 |
|
|
|
7 |
|
8 |
from sam_diffsr.utils_sr.plt_img import plt_tensor_img
|
9 |
from .module_util import default
|
10 |
+
from sam_diffsr.utils_sr.sr_utils import SSIM
|
11 |
from sam_diffsr.utils_sr.hparams import hparams
|
12 |
|
13 |
|
sam_diffsr/models_sr/diffusion_sam.py
CHANGED
@@ -52,7 +52,7 @@ class GaussianDiffusion_sam(GaussianDiffusion):
|
|
52 |
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0_pred
|
53 |
|
54 |
@torch.no_grad()
|
55 |
-
def sample(self, img_lr, img_lr_up, shape, sam_mask=None, save_intermediate=False):
|
56 |
device = self.betas.device
|
57 |
b = shape[0]
|
58 |
|
|
|
52 |
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0_pred
|
53 |
|
54 |
@torch.no_grad()
|
55 |
+
def sample(self, img_lr, img_lr_up, shape, sam_mask=None, save_intermediate=False, progress=None):
|
56 |
device = self.betas.device
|
57 |
b = shape[0]
|
58 |
|