abreza commited on
Commit
37bff96
1 Parent(s): b9005a3
Files changed (1) hide show
  1. app.py +124 -0
app.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ import torch
3
+ import torchvision.transforms as T
4
+ from PIL import Image
5
+ import gradio as gr
6
+ from featup.util import norm, unnorm, pca, remove_axes
7
+ from pytorch_lightning import seed_everything
8
+ import os
9
+ import requests
10
+ import csv
11
+ import spaces
12
+
13
+
14
+ def plot_feats(image, lr, hr):
15
+ assert len(image.shape) == len(lr.shape) == len(hr.shape) == 3
16
+ seed_everything(0)
17
+ [lr_feats_pca, hr_feats_pca], _ = pca(
18
+ [lr.unsqueeze(0), hr.unsqueeze(0)], dim=9)
19
+ fig, ax = plt.subplots(3, 3, figsize=(15, 15))
20
+ ax[0, 0].imshow(image.permute(1, 2, 0).detach().cpu())
21
+ ax[1, 0].imshow(image.permute(1, 2, 0).detach().cpu())
22
+ ax[2, 0].imshow(image.permute(1, 2, 0).detach().cpu())
23
+
24
+ ax[0, 0].set_title("Image", fontsize=22)
25
+ ax[0, 1].set_title("Original", fontsize=22)
26
+ ax[0, 2].set_title("Upsampled Features", fontsize=22)
27
+
28
+ ax[0, 1].imshow(lr_feats_pca[0, :3].permute(1, 2, 0).detach().cpu())
29
+ ax[0, 0].set_ylabel("PCA Components 1-3", fontsize=22)
30
+ ax[0, 2].imshow(hr_feats_pca[0, :3].permute(1, 2, 0).detach().cpu())
31
+
32
+ ax[1, 1].imshow(lr_feats_pca[0, 3:6].permute(1, 2, 0).detach().cpu())
33
+ ax[1, 0].set_ylabel("PCA Components 4-6", fontsize=22)
34
+ ax[1, 2].imshow(hr_feats_pca[0, 3:6].permute(1, 2, 0).detach().cpu())
35
+
36
+ ax[2, 1].imshow(lr_feats_pca[0, 6:9].permute(1, 2, 0).detach().cpu())
37
+ ax[2, 0].set_ylabel("PCA Components 7-9", fontsize=22)
38
+ ax[2, 2].imshow(hr_feats_pca[0, 6:9].permute(1, 2, 0).detach().cpu())
39
+
40
+ remove_axes(ax)
41
+ plt.tight_layout()
42
+ plt.close(fig) # Close plt to avoid additional empty plots
43
+ return fig
44
+
45
+
46
+ def download_image(url, save_path):
47
+ response = requests.get(url)
48
+ with open(save_path, 'wb') as file:
49
+ file.write(response.content)
50
+
51
+
52
+ base_url = "https://marhamilresearch4.blob.core.windows.net/feature-upsampling-public/sample_images/"
53
+ sample_images_urls = {
54
+ "skate.jpg": base_url + "skate.jpg",
55
+ "car.jpg": base_url + "car.jpg",
56
+ "plant.png": base_url + "plant.png",
57
+ }
58
+
59
+ sample_images_dir = "/tmp/sample_images"
60
+
61
+ # Ensure the directory for sample images exists
62
+ os.makedirs(sample_images_dir, exist_ok=True)
63
+
64
+ # Download each sample image
65
+ for filename, url in sample_images_urls.items():
66
+ save_path = os.path.join(sample_images_dir, filename)
67
+ # Download the image if it doesn't already exist
68
+ if not os.path.exists(save_path):
69
+ print(f"Downloading {filename}...")
70
+ download_image(url, save_path)
71
+ else:
72
+ print(f"{filename} already exists. Skipping download.")
73
+
74
+ os.environ['TORCH_HOME'] = '/tmp/.cache'
75
+ os.environ['GRADIO_EXAMPLES_CACHE'] = '/tmp/gradio_cache'
76
+ csv.field_size_limit(100000000)
77
+ options = ['dino16', 'vit', 'dinov2', 'clip', 'resnet50']
78
+
79
+ image_input = gr.Image(label="Choose an image to featurize",
80
+ height=480,
81
+ type="pil",
82
+ image_mode='RGB',
83
+ sources=['upload', 'webcam', 'clipboard']
84
+ )
85
+ model_option = gr.Radio(options, value="dino16",
86
+ label='Choose a backbone to upsample')
87
+
88
+ models = {o: torch.hub.load("mhamilton723/FeatUp", o) for o in options}
89
+
90
+
91
+ @spaces.GPU
92
+ def upsample_features(image, model_option):
93
+ # Image preprocessing
94
+ input_size = 224
95
+ transform = T.Compose([
96
+ T.Resize(input_size),
97
+ T.CenterCrop((input_size, input_size)),
98
+ T.ToTensor(),
99
+ norm
100
+ ])
101
+ image_tensor = transform(image).unsqueeze(0).cuda()
102
+
103
+ # Load the selected model
104
+ upsampler = models[model_option].cuda()
105
+ hr_feats = upsampler(image_tensor)
106
+ lr_feats = upsampler.model(image_tensor)
107
+ upsampler.cpu()
108
+
109
+ return plot_feats(unnorm(image_tensor)[0], lr_feats[0], hr_feats[0])
110
+
111
+
112
+ demo = gr.Interface(fn=upsample_features,
113
+ inputs=[image_input, model_option],
114
+ outputs="plot",
115
+ title="Feature Upsampling Demo",
116
+ description="This demo allows you to upsample features of an image using selected models.",
117
+ examples=[
118
+ ["/tmp/sample_images/skate.jpg", "dino16"],
119
+ ["/tmp/sample_images/car.jpg", "dinov2"],
120
+ ["/tmp/sample_images/plant.png", "dino16"],
121
+ ]
122
+ )
123
+
124
+ demo.launch(server_name="0.0.0.0", server_port=7860, debug=True)