import gradio as gr import numpy as np import time from data import write_image_tensor, PatchDataModule, prepare_data, image2tensor, tensor2image import torch from tqdm import tqdm from bigdl.nano.pytorch.trainer import Trainer from torch.utils.data import DataLoader from pathlib import Path from torch.utils.data import Dataset import datetime device = 'cpu' dtype = torch.float32 generator = torch.load("models/generator.pt") generator.eval() generator.to(device, dtype) params = {'batch_size': 1, 'num_workers': 0} class ImageDataset(Dataset): def __init__(self, img): self.imgs = [image2tensor(img)] def __getitem__(self, idx: int) -> dict: return self.imgs[idx] def __len__(self) -> int: return len(self.imgs) # quantize model data_path = Path('data/webcam') train_image_dd = prepare_data(data_path) dm = PatchDataModule(train_image_dd, patch_size=2**6, batch_size=2**3, patch_num=2**6) train_loader = dm.train_dataloader() train_loader_iter = iter(train_loader) quantized_model = Trainer.quantize(generator, accelerator=None, calib_dataloader=train_loader) def original_transfer(input_img): w, h, _ = input_img.shape print(datetime.datetime.now()) print("input size: ", w, h) # resize too large image if w > 3000 or h > 3000: ratio = min(3000 / w, 3000 / h) w = int(w * ratio) h = int(h * ratio) if w % 4 != 0 or h % 4 != 0: NW = int((w // 4) * 4) NH = int((h // 4) * 4) input_img = np.resize(input_img,(NW,NH,3)) st = time.perf_counter() dataset = ImageDataset(input_img) loader = DataLoader(dataset, **params) with torch.no_grad(): for inputs in tqdm(loader): inputs = inputs.to(device, dtype) st = time.perf_counter() outputs = generator(inputs) ori_time = time.perf_counter() - st ori_time = "{:.3f}s".format(ori_time) ori_image = np.array(tensor2image(outputs[0])) del inputs del outputs return ori_image, ori_time def nano_transfer(input_img): w, h, _ = input_img.shape print(datetime.datetime.now()) print("input size: ", w, h) # resize too large image if w > 3000 or h > 3000: ratio = min(3000 / w, 3000 / h) w = int(w * ratio) h = int(h * ratio) if w % 4 != 0 or h % 4 != 0: NW = int((w // 4) * 4) NH = int((h // 4) * 4) input_img = np.resize(input_img,(NW,NH,3)) st = time.perf_counter() dataset = ImageDataset(input_img) loader = DataLoader(dataset, **params) with torch.no_grad(): for inputs in tqdm(loader): inputs = inputs.to(device, dtype) st = time.perf_counter() outputs = quantized_model(inputs) nano_time = time.perf_counter() - st nano_time = "{:.3f}s".format(nano_time) nano_image = np.array(tensor2image(outputs[0])) del inputs del outputs return nano_image, nano_time def clear(): return None, None, None, None demo = gr.Blocks() with demo: gr.Markdown("