Spaces:
Sleeping
Sleeping
yanranxiaoxi
commited on
Commit
•
00fc29f
1
Parent(s):
a37be5f
First commit
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +35 -35
- BOOXEL/__init__.py +0 -0
- BOOXEL/models/BOOXEL_model.py +195 -0
- BOOXEL/models/__init__.py +0 -0
- BOOXEL/modules/BOOXEL_v0.py +716 -0
- BOOXEL/modules/__init__.py +11 -0
- BOOXEL/util.py +179 -0
- BOOXEL/utils/__init__.py +0 -0
- BOOXEL/utils/colorfix.py +120 -0
- BOOXEL/utils/devices.py +137 -0
- BOOXEL/utils/face_restoration_helper.py +515 -0
- BOOXEL/utils/file.py +79 -0
- BOOXEL/utils/tilevae.py +971 -0
- CKPT_PTH.py +2 -0
- Examples/Example1.png +0 -0
- Examples/Example2.jpeg +0 -0
- Examples/Example3.webp +0 -0
- README.md +12 -12
- app.py +854 -0
- llava/__init__.py +1 -0
- llava/constants.py +12 -0
- llava/conversation.py +381 -0
- llava/eval/eval_gpt_review.py +113 -0
- llava/eval/eval_gpt_review_bench.py +121 -0
- llava/eval/eval_gpt_review_visual.py +118 -0
- llava/eval/eval_pope.py +81 -0
- llava/eval/eval_science_qa.py +114 -0
- llava/eval/eval_science_qa_gpt4.py +104 -0
- llava/eval/eval_science_qa_gpt4_requery.py +149 -0
- llava/eval/eval_textvqa.py +65 -0
- llava/eval/generate_webpage_data_from_table.py +111 -0
- llava/eval/m4c_evaluator.py +334 -0
- llava/eval/model_qa.py +85 -0
- llava/eval/model_vqa.py +125 -0
- llava/eval/model_vqa_loader.py +144 -0
- llava/eval/model_vqa_mmbench.py +170 -0
- llava/eval/model_vqa_science.py +147 -0
- llava/eval/qa_baseline_gpt35.py +74 -0
- llava/eval/run_llava.py +97 -0
- llava/eval/summarize_gpt_review.py +60 -0
- llava/eval/table/answer/answer_alpaca-13b.jsonl +80 -0
- llava/eval/table/answer/answer_bard.jsonl +0 -0
- llava/eval/table/answer/answer_gpt35.jsonl +0 -0
- llava/eval/table/answer/answer_llama-13b.jsonl +80 -0
- llava/eval/table/answer/answer_vicuna-13b.jsonl +0 -0
- llava/eval/table/caps_boxes_coco2014_val_80.jsonl +80 -0
- llava/eval/table/model.jsonl +5 -0
- llava/eval/table/prompt.jsonl +4 -0
- llava/eval/table/question.jsonl +80 -0
- llava/eval/table/results/test_sqa_llava_13b_v0.json +0 -0
.gitattributes
CHANGED
@@ -1,35 +1,35 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
BOOXEL/__init__.py
ADDED
File without changes
|
BOOXEL/models/BOOXEL_model.py
ADDED
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from sgm.models.diffusion import DiffusionEngine
|
3 |
+
from sgm.util import instantiate_from_config
|
4 |
+
import copy
|
5 |
+
from sgm.modules.distributions.distributions import DiagonalGaussianDistribution
|
6 |
+
import random
|
7 |
+
from BOOXEL.utils.colorfix import wavelet_reconstruction, adaptive_instance_normalization
|
8 |
+
from pytorch_lightning import seed_everything
|
9 |
+
from torch.nn.functional import interpolate
|
10 |
+
from BOOXEL.utils.tilevae import VAEHook
|
11 |
+
|
12 |
+
class BOOXELModel(DiffusionEngine):
|
13 |
+
def __init__(self, control_stage_config, ae_dtype='fp32', diffusion_dtype='fp32', p_p='', n_p='', *args, **kwargs):
|
14 |
+
super().__init__(*args, **kwargs)
|
15 |
+
control_model = instantiate_from_config(control_stage_config)
|
16 |
+
self.model.load_control_model(control_model)
|
17 |
+
self.first_stage_model.denoise_encoder = copy.deepcopy(self.first_stage_model.encoder)
|
18 |
+
self.sampler_config = kwargs['sampler_config']
|
19 |
+
|
20 |
+
assert (ae_dtype in ['fp32', 'fp16', 'bf16']) and (diffusion_dtype in ['fp32', 'fp16', 'bf16'])
|
21 |
+
if ae_dtype == 'fp32':
|
22 |
+
ae_dtype = torch.float32
|
23 |
+
elif ae_dtype == 'fp16':
|
24 |
+
raise RuntimeError('fp16 cause NaN in AE')
|
25 |
+
elif ae_dtype == 'bf16':
|
26 |
+
ae_dtype = torch.bfloat16
|
27 |
+
|
28 |
+
if diffusion_dtype == 'fp32':
|
29 |
+
diffusion_dtype = torch.float32
|
30 |
+
elif diffusion_dtype == 'fp16':
|
31 |
+
diffusion_dtype = torch.float16
|
32 |
+
elif diffusion_dtype == 'bf16':
|
33 |
+
diffusion_dtype = torch.bfloat16
|
34 |
+
|
35 |
+
self.ae_dtype = ae_dtype
|
36 |
+
self.model.dtype = diffusion_dtype
|
37 |
+
|
38 |
+
self.p_p = p_p
|
39 |
+
self.n_p = n_p
|
40 |
+
|
41 |
+
@torch.no_grad()
|
42 |
+
def encode_first_stage(self, x):
|
43 |
+
with torch.autocast("cuda", dtype=self.ae_dtype):
|
44 |
+
z = self.first_stage_model.encode(x)
|
45 |
+
z = self.scale_factor * z
|
46 |
+
return z
|
47 |
+
|
48 |
+
@torch.no_grad()
|
49 |
+
def encode_first_stage_with_denoise(self, x, use_sample=True, is_stage1=False):
|
50 |
+
with torch.autocast("cuda", dtype=self.ae_dtype):
|
51 |
+
if is_stage1:
|
52 |
+
h = self.first_stage_model.denoise_encoder_s1(x)
|
53 |
+
else:
|
54 |
+
h = self.first_stage_model.denoise_encoder(x)
|
55 |
+
moments = self.first_stage_model.quant_conv(h)
|
56 |
+
posterior = DiagonalGaussianDistribution(moments)
|
57 |
+
if use_sample:
|
58 |
+
z = posterior.sample()
|
59 |
+
else:
|
60 |
+
z = posterior.mode()
|
61 |
+
z = self.scale_factor * z
|
62 |
+
return z
|
63 |
+
|
64 |
+
@torch.no_grad()
|
65 |
+
def decode_first_stage(self, z):
|
66 |
+
z = 1.0 / self.scale_factor * z
|
67 |
+
with torch.autocast("cuda", dtype=self.ae_dtype):
|
68 |
+
out = self.first_stage_model.decode(z)
|
69 |
+
return out.float()
|
70 |
+
|
71 |
+
@torch.no_grad()
|
72 |
+
def batchify_denoise(self, x, is_stage1=False):
|
73 |
+
'''
|
74 |
+
[N, C, H, W], [-1, 1], RGB
|
75 |
+
'''
|
76 |
+
x = self.encode_first_stage_with_denoise(x, use_sample=False, is_stage1=is_stage1)
|
77 |
+
return self.decode_first_stage(x)
|
78 |
+
|
79 |
+
@torch.no_grad()
|
80 |
+
def batchify_sample(self, x, p, p_p='default', n_p='default', num_steps=100, restoration_scale=4.0, s_churn=0, s_noise=1.003, cfg_scale=4.0, seed=-1,
|
81 |
+
num_samples=1, control_scale=1, color_fix_type='None', use_linear_CFG=False, use_linear_control_scale=False,
|
82 |
+
cfg_scale_start=1.0, control_scale_start=0.0, **kwargs):
|
83 |
+
'''
|
84 |
+
[N, C], [-1, 1], RGB
|
85 |
+
'''
|
86 |
+
assert len(x) == len(p)
|
87 |
+
assert color_fix_type in ['Wavelet', 'AdaIn', 'None']
|
88 |
+
|
89 |
+
N = len(x)
|
90 |
+
if num_samples > 1:
|
91 |
+
assert N == 1
|
92 |
+
N = num_samples
|
93 |
+
x = x.repeat(N, 1, 1, 1)
|
94 |
+
p = p * N
|
95 |
+
|
96 |
+
if p_p == 'default':
|
97 |
+
p_p = self.p_p
|
98 |
+
if n_p == 'default':
|
99 |
+
n_p = self.n_p
|
100 |
+
|
101 |
+
self.sampler_config.params.num_steps = num_steps
|
102 |
+
if use_linear_CFG:
|
103 |
+
self.sampler_config.params.guider_config.params.scale_min = cfg_scale
|
104 |
+
self.sampler_config.params.guider_config.params.scale = cfg_scale_start
|
105 |
+
else:
|
106 |
+
self.sampler_config.params.guider_config.params.scale_min = cfg_scale
|
107 |
+
self.sampler_config.params.guider_config.params.scale = cfg_scale
|
108 |
+
self.sampler_config.params.restore_cfg = restoration_scale
|
109 |
+
self.sampler_config.params.s_churn = s_churn
|
110 |
+
self.sampler_config.params.s_noise = s_noise
|
111 |
+
self.sampler = instantiate_from_config(self.sampler_config)
|
112 |
+
|
113 |
+
if seed == -1:
|
114 |
+
seed = random.randint(0, 65535)
|
115 |
+
seed_everything(seed)
|
116 |
+
|
117 |
+
_z = self.encode_first_stage_with_denoise(x, use_sample=False)
|
118 |
+
x_stage1 = self.decode_first_stage(_z)
|
119 |
+
z_stage1 = self.encode_first_stage(x_stage1)
|
120 |
+
|
121 |
+
c, uc = self.prepare_condition(_z, p, p_p, n_p, N)
|
122 |
+
|
123 |
+
denoiser = lambda input, sigma, c, control_scale: self.denoiser(
|
124 |
+
self.model, input, sigma, c, control_scale, **kwargs
|
125 |
+
)
|
126 |
+
|
127 |
+
noised_z = torch.randn_like(_z).to(_z.device)
|
128 |
+
|
129 |
+
_samples = self.sampler(denoiser, noised_z, cond=c, uc=uc, x_center=z_stage1, control_scale=control_scale,
|
130 |
+
use_linear_control_scale=use_linear_control_scale, control_scale_start=control_scale_start)
|
131 |
+
samples = self.decode_first_stage(_samples)
|
132 |
+
if color_fix_type == 'Wavelet':
|
133 |
+
samples = wavelet_reconstruction(samples, x_stage1)
|
134 |
+
elif color_fix_type == 'AdaIn':
|
135 |
+
samples = adaptive_instance_normalization(samples, x_stage1)
|
136 |
+
return samples
|
137 |
+
|
138 |
+
def init_tile_vae(self, encoder_tile_size=512, decoder_tile_size=64):
|
139 |
+
self.first_stage_model.denoise_encoder.original_forward = self.first_stage_model.denoise_encoder.forward
|
140 |
+
self.first_stage_model.encoder.original_forward = self.first_stage_model.encoder.forward
|
141 |
+
self.first_stage_model.decoder.original_forward = self.first_stage_model.decoder.forward
|
142 |
+
self.first_stage_model.denoise_encoder.forward = VAEHook(
|
143 |
+
self.first_stage_model.denoise_encoder, encoder_tile_size, is_decoder=False, fast_decoder=False,
|
144 |
+
fast_encoder=False, color_fix=False, to_gpu=True)
|
145 |
+
self.first_stage_model.encoder.forward = VAEHook(
|
146 |
+
self.first_stage_model.encoder, encoder_tile_size, is_decoder=False, fast_decoder=False,
|
147 |
+
fast_encoder=False, color_fix=False, to_gpu=True)
|
148 |
+
self.first_stage_model.decoder.forward = VAEHook(
|
149 |
+
self.first_stage_model.decoder, decoder_tile_size, is_decoder=True, fast_decoder=False,
|
150 |
+
fast_encoder=False, color_fix=False, to_gpu=True)
|
151 |
+
|
152 |
+
def prepare_condition(self, _z, p, p_p, n_p, N):
|
153 |
+
batch = {}
|
154 |
+
batch['original_size_as_tuple'] = torch.tensor([1024, 1024]).repeat(N, 1).to(_z.device)
|
155 |
+
batch['crop_coords_top_left'] = torch.tensor([0, 0]).repeat(N, 1).to(_z.device)
|
156 |
+
batch['target_size_as_tuple'] = torch.tensor([1024, 1024]).repeat(N, 1).to(_z.device)
|
157 |
+
batch['aesthetic_score'] = torch.tensor([9.0]).repeat(N, 1).to(_z.device)
|
158 |
+
batch['control'] = _z
|
159 |
+
|
160 |
+
batch_uc = copy.deepcopy(batch)
|
161 |
+
batch_uc['txt'] = [n_p for _ in p]
|
162 |
+
|
163 |
+
if not isinstance(p[0], list):
|
164 |
+
batch['txt'] = [''.join([_p, p_p]) for _p in p]
|
165 |
+
with torch.cuda.amp.autocast(dtype=self.ae_dtype):
|
166 |
+
c, uc = self.conditioner.get_unconditional_conditioning(batch, batch_uc)
|
167 |
+
else:
|
168 |
+
assert len(p) == 1, '仅在本地提示调节时支持 bs=1。'
|
169 |
+
p_tiles = p[0]
|
170 |
+
c = []
|
171 |
+
for i, p_tile in enumerate(p_tiles):
|
172 |
+
batch['txt'] = [''.join([p_tile, p_p])]
|
173 |
+
with torch.cuda.amp.autocast(dtype=self.ae_dtype):
|
174 |
+
if i == 0:
|
175 |
+
_c, uc = self.conditioner.get_unconditional_conditioning(batch, batch_uc)
|
176 |
+
else:
|
177 |
+
_c, _ = self.conditioner.get_unconditional_conditioning(batch, None)
|
178 |
+
c.append(_c)
|
179 |
+
return c, uc
|
180 |
+
|
181 |
+
|
182 |
+
if __name__ == '__main__':
|
183 |
+
from BOOXEL.util import create_model, load_state_dict
|
184 |
+
|
185 |
+
model = create_model('../../options/dev/BOOXEL_paper_version.yaml')
|
186 |
+
|
187 |
+
SDXL_CKPT = '/opt/data/private/AIGC_pretrain/SDXL_cache/sd_xl_base_1.0_0.9vae.safetensors'
|
188 |
+
BOOXEL_CKPT = '/opt/data/private/AIGC_pretrain/BOOXEL_cache/BOOXEL-paper.ckpt'
|
189 |
+
model.load_state_dict(load_state_dict(SDXL_CKPT), strict=False)
|
190 |
+
model.load_state_dict(load_state_dict(BOOXEL_CKPT), strict=False)
|
191 |
+
model = model.cuda()
|
192 |
+
|
193 |
+
x = torch.randn(1, 3, 512, 512).cuda()
|
194 |
+
p = ['一张专业、详细、高质量的照片']
|
195 |
+
samples = model.batchify_sample(x, p, num_steps=50, restoration_scale=4.0, s_churn=0, cfg_scale=4.0, seed=-1, num_samples=1)
|
BOOXEL/models/__init__.py
ADDED
File without changes
|
BOOXEL/modules/BOOXEL_v0.py
ADDED
@@ -0,0 +1,716 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# from einops._torch_specific import allow_ops_in_compiled_graph
|
2 |
+
# allow_ops_in_compiled_graph()
|
3 |
+
import einops
|
4 |
+
import torch
|
5 |
+
import torch as th
|
6 |
+
import torch.nn as nn
|
7 |
+
from einops import rearrange, repeat
|
8 |
+
|
9 |
+
from sgm.modules.diffusionmodules.util import (
|
10 |
+
avg_pool_nd,
|
11 |
+
checkpoint,
|
12 |
+
conv_nd,
|
13 |
+
linear,
|
14 |
+
normalization,
|
15 |
+
timestep_embedding,
|
16 |
+
zero_module,
|
17 |
+
)
|
18 |
+
|
19 |
+
from sgm.modules.diffusionmodules.openaimodel import Downsample, Upsample, UNetModel, Timestep, \
|
20 |
+
TimestepEmbedSequential, ResBlock, AttentionBlock, TimestepBlock
|
21 |
+
from sgm.modules.attention import SpatialTransformer, MemoryEfficientCrossAttention, CrossAttention
|
22 |
+
from sgm.util import default, log_txt_as_img, exists, instantiate_from_config
|
23 |
+
import re
|
24 |
+
import torch
|
25 |
+
from functools import partial
|
26 |
+
|
27 |
+
|
28 |
+
try:
|
29 |
+
import xformers
|
30 |
+
import xformers.ops
|
31 |
+
XFORMERS_IS_AVAILBLE = True
|
32 |
+
except:
|
33 |
+
XFORMERS_IS_AVAILBLE = False
|
34 |
+
|
35 |
+
|
36 |
+
# 虚拟替代
|
37 |
+
def convert_module_to_f16(x):
|
38 |
+
pass
|
39 |
+
|
40 |
+
|
41 |
+
def convert_module_to_f32(x):
|
42 |
+
pass
|
43 |
+
|
44 |
+
|
45 |
+
class ZeroConv(nn.Module):
|
46 |
+
def __init__(self, label_nc, norm_nc, mask=False):
|
47 |
+
super().__init__()
|
48 |
+
self.zero_conv = zero_module(conv_nd(2, label_nc, norm_nc, 1, 1, 0))
|
49 |
+
self.mask = mask
|
50 |
+
|
51 |
+
def forward(self, c, h, h_ori=None):
|
52 |
+
# with torch.cuda.amp.autocast(enabled=False, dtype=torch.float32):
|
53 |
+
if not self.mask:
|
54 |
+
h = h + self.zero_conv(c)
|
55 |
+
else:
|
56 |
+
h = h + self.zero_conv(c) * torch.zeros_like(h)
|
57 |
+
if h_ori is not None:
|
58 |
+
h = th.cat([h_ori, h], dim=1)
|
59 |
+
return h
|
60 |
+
|
61 |
+
|
62 |
+
class ZeroSFT(nn.Module):
|
63 |
+
def __init__(self, label_nc, norm_nc, concat_channels=0, norm=True, mask=False):
|
64 |
+
super().__init__()
|
65 |
+
|
66 |
+
# param_free_norm_type = str(parsed.group(1))
|
67 |
+
ks = 3
|
68 |
+
pw = ks // 2
|
69 |
+
|
70 |
+
self.norm = norm
|
71 |
+
if self.norm:
|
72 |
+
self.param_free_norm = normalization(norm_nc + concat_channels)
|
73 |
+
else:
|
74 |
+
self.param_free_norm = nn.Identity()
|
75 |
+
|
76 |
+
nhidden = 128
|
77 |
+
|
78 |
+
self.mlp_shared = nn.Sequential(
|
79 |
+
nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),
|
80 |
+
nn.SiLU()
|
81 |
+
)
|
82 |
+
self.zero_mul = zero_module(nn.Conv2d(nhidden, norm_nc + concat_channels, kernel_size=ks, padding=pw))
|
83 |
+
self.zero_add = zero_module(nn.Conv2d(nhidden, norm_nc + concat_channels, kernel_size=ks, padding=pw))
|
84 |
+
# self.zero_mul = nn.Conv2d(nhidden, norm_nc + concat_channels, kernel_size=ks, padding=pw)
|
85 |
+
# self.zero_add = nn.Conv2d(nhidden, norm_nc + concat_channels, kernel_size=ks, padding=pw)
|
86 |
+
|
87 |
+
self.zero_conv = zero_module(conv_nd(2, label_nc, norm_nc, 1, 1, 0))
|
88 |
+
self.pre_concat = bool(concat_channels != 0)
|
89 |
+
self.mask = mask
|
90 |
+
|
91 |
+
def forward(self, c, h, h_ori=None, control_scale=1):
|
92 |
+
assert self.mask is False
|
93 |
+
if h_ori is not None and self.pre_concat:
|
94 |
+
h_raw = th.cat([h_ori, h], dim=1)
|
95 |
+
else:
|
96 |
+
h_raw = h
|
97 |
+
|
98 |
+
if self.mask:
|
99 |
+
h = h + self.zero_conv(c) * torch.zeros_like(h)
|
100 |
+
else:
|
101 |
+
h = h + self.zero_conv(c)
|
102 |
+
if h_ori is not None and self.pre_concat:
|
103 |
+
h = th.cat([h_ori, h], dim=1)
|
104 |
+
actv = self.mlp_shared(c)
|
105 |
+
gamma = self.zero_mul(actv)
|
106 |
+
beta = self.zero_add(actv)
|
107 |
+
if self.mask:
|
108 |
+
gamma = gamma * torch.zeros_like(gamma)
|
109 |
+
beta = beta * torch.zeros_like(beta)
|
110 |
+
h = self.param_free_norm(h) * (gamma + 1) + beta
|
111 |
+
if h_ori is not None and not self.pre_concat:
|
112 |
+
h = th.cat([h_ori, h], dim=1)
|
113 |
+
return h * control_scale + h_raw * (1 - control_scale)
|
114 |
+
|
115 |
+
|
116 |
+
class ZeroCrossAttn(nn.Module):
|
117 |
+
ATTENTION_MODES = {
|
118 |
+
"softmax": CrossAttention, # 原始注意力
|
119 |
+
"softmax-xformers": MemoryEfficientCrossAttention
|
120 |
+
}
|
121 |
+
|
122 |
+
def __init__(self, context_dim, query_dim, zero_out=True, mask=False):
|
123 |
+
super().__init__()
|
124 |
+
attn_mode = "softmax-xformers" if XFORMERS_IS_AVAILBLE else "softmax"
|
125 |
+
assert attn_mode in self.ATTENTION_MODES
|
126 |
+
attn_cls = self.ATTENTION_MODES[attn_mode]
|
127 |
+
self.attn = attn_cls(query_dim=query_dim, context_dim=context_dim, heads=query_dim//64, dim_head=64)
|
128 |
+
self.norm1 = normalization(query_dim)
|
129 |
+
self.norm2 = normalization(context_dim)
|
130 |
+
|
131 |
+
self.mask = mask
|
132 |
+
|
133 |
+
# if zero_out:
|
134 |
+
# # for p in self.attn.to_out.parameters():
|
135 |
+
# # p.detach().zero_()
|
136 |
+
# self.attn.to_out = zero_module(self.attn.to_out)
|
137 |
+
|
138 |
+
def forward(self, context, x, control_scale=1):
|
139 |
+
assert self.mask is False
|
140 |
+
x_in = x
|
141 |
+
x = self.norm1(x)
|
142 |
+
context = self.norm2(context)
|
143 |
+
b, c, h, w = x.shape
|
144 |
+
x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
|
145 |
+
context = rearrange(context, 'b c h w -> b (h w) c').contiguous()
|
146 |
+
x = self.attn(x, context)
|
147 |
+
x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()
|
148 |
+
if self.mask:
|
149 |
+
x = x * torch.zeros_like(x)
|
150 |
+
x = x_in + x * control_scale
|
151 |
+
|
152 |
+
return x
|
153 |
+
|
154 |
+
|
155 |
+
class GLVControl(nn.Module):
|
156 |
+
def __init__(
|
157 |
+
self,
|
158 |
+
in_channels,
|
159 |
+
model_channels,
|
160 |
+
out_channels,
|
161 |
+
num_res_blocks,
|
162 |
+
attention_resolutions,
|
163 |
+
dropout=0,
|
164 |
+
channel_mult=(1, 2, 4, 8),
|
165 |
+
conv_resample=True,
|
166 |
+
dims=2,
|
167 |
+
num_classes=None,
|
168 |
+
use_checkpoint=False,
|
169 |
+
use_fp16=False,
|
170 |
+
num_heads=-1,
|
171 |
+
num_head_channels=-1,
|
172 |
+
num_heads_upsample=-1,
|
173 |
+
use_scale_shift_norm=False,
|
174 |
+
resblock_updown=False,
|
175 |
+
use_new_attention_order=False,
|
176 |
+
use_spatial_transformer=False, # 自定义 transformer 支持
|
177 |
+
transformer_depth=1, # 自定义 transformer 支持
|
178 |
+
context_dim=None, # 自定义 transformer 支持
|
179 |
+
n_embed=None, # 自定义支持将离散 ID 预测到第一阶段 VQ 模型的编码本中
|
180 |
+
legacy=True,
|
181 |
+
disable_self_attentions=None,
|
182 |
+
num_attention_blocks=None,
|
183 |
+
disable_middle_self_attn=False,
|
184 |
+
use_linear_in_transformer=False,
|
185 |
+
spatial_transformer_attn_type="softmax",
|
186 |
+
adm_in_channels=None,
|
187 |
+
use_fairscale_checkpoint=False,
|
188 |
+
offload_to_cpu=False,
|
189 |
+
transformer_depth_middle=None,
|
190 |
+
input_upscale=1,
|
191 |
+
):
|
192 |
+
super().__init__()
|
193 |
+
from omegaconf.listconfig import ListConfig
|
194 |
+
|
195 |
+
if use_spatial_transformer:
|
196 |
+
assert (
|
197 |
+
context_dim is not None
|
198 |
+
), "傻瓜!你忘了把你的交叉注意力调节的尺寸包括在内..."
|
199 |
+
|
200 |
+
if context_dim is not None:
|
201 |
+
assert (
|
202 |
+
use_spatial_transformer
|
203 |
+
), "傻瓜!你忘了使用 spatial transformer 进行交叉注意调节..."
|
204 |
+
if type(context_dim) == ListConfig:
|
205 |
+
context_dim = list(context_dim)
|
206 |
+
|
207 |
+
if num_heads_upsample == -1:
|
208 |
+
num_heads_upsample = num_heads
|
209 |
+
|
210 |
+
if num_heads == -1:
|
211 |
+
assert (
|
212 |
+
num_head_channels != -1
|
213 |
+
), "必须设置头数(num_heads)或通道数(num_head_channels)。"
|
214 |
+
|
215 |
+
if num_head_channels == -1:
|
216 |
+
assert (
|
217 |
+
num_heads != -1
|
218 |
+
), "必须设置头数(num_heads)或通道数(num_head_channels)。"
|
219 |
+
|
220 |
+
self.in_channels = in_channels
|
221 |
+
self.model_channels = model_channels
|
222 |
+
self.out_channels = out_channels
|
223 |
+
if isinstance(transformer_depth, int):
|
224 |
+
transformer_depth = len(channel_mult) * [transformer_depth]
|
225 |
+
elif isinstance(transformer_depth, ListConfig):
|
226 |
+
transformer_depth = list(transformer_depth)
|
227 |
+
transformer_depth_middle = default(
|
228 |
+
transformer_depth_middle, transformer_depth[-1]
|
229 |
+
)
|
230 |
+
|
231 |
+
if isinstance(num_res_blocks, int):
|
232 |
+
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
|
233 |
+
else:
|
234 |
+
if len(num_res_blocks) != len(channel_mult):
|
235 |
+
raise ValueError(
|
236 |
+
"将 num_res_blocks 作为 int(全局常量)或与 channel_mult 相同长度的列表/元组(按层)提供"
|
237 |
+
)
|
238 |
+
self.num_res_blocks = num_res_blocks
|
239 |
+
# self.num_res_blocks = num_res_blocks
|
240 |
+
if disable_self_attentions is not None:
|
241 |
+
# 应为布尔值列表,表示是否在 TransformerBlocks 中禁用自注意功能
|
242 |
+
assert len(disable_self_attentions) == len(channel_mult)
|
243 |
+
if num_attention_blocks is not None:
|
244 |
+
assert len(num_attention_blocks) == len(self.num_res_blocks)
|
245 |
+
assert all(
|
246 |
+
map(
|
247 |
+
lambda i: self.num_res_blocks[i] >= num_attention_blocks[i],
|
248 |
+
range(len(num_attention_blocks)),
|
249 |
+
)
|
250 |
+
)
|
251 |
+
print(
|
252 |
+
f"收到的 UNetModel 的构造函数 num_attention_blocks={num_attention_blocks}。"
|
253 |
+
f"该选项的优先级低于 attention_resolutions {attention_resolutions},"
|
254 |
+
f"也就是说,在 num_attention_blocks[i] > 0 但 2**i 不在 attention_resolutions 中的情况下,仍不会设置注意力。"
|
255 |
+
) # todo: 转为警告
|
256 |
+
|
257 |
+
self.attention_resolutions = attention_resolutions
|
258 |
+
self.dropout = dropout
|
259 |
+
self.channel_mult = channel_mult
|
260 |
+
self.conv_resample = conv_resample
|
261 |
+
self.num_classes = num_classes
|
262 |
+
self.use_checkpoint = use_checkpoint
|
263 |
+
if use_fp16:
|
264 |
+
print("WARNING: use_fp16 已被丢弃,不再有任何影响。")
|
265 |
+
# self.dtype = th.float16 if use_fp16 else th.float32
|
266 |
+
self.num_heads = num_heads
|
267 |
+
self.num_head_channels = num_head_channels
|
268 |
+
self.num_heads_upsample = num_heads_upsample
|
269 |
+
self.predict_codebook_ids = n_embed is not None
|
270 |
+
|
271 |
+
assert use_fairscale_checkpoint != use_checkpoint or not (
|
272 |
+
use_checkpoint or use_fairscale_checkpoint
|
273 |
+
)
|
274 |
+
|
275 |
+
self.use_fairscale_checkpoint = False
|
276 |
+
checkpoint_wrapper_fn = (
|
277 |
+
partial(checkpoint_wrapper, offload_to_cpu=offload_to_cpu)
|
278 |
+
if self.use_fairscale_checkpoint
|
279 |
+
else lambda x: x
|
280 |
+
)
|
281 |
+
|
282 |
+
time_embed_dim = model_channels * 4
|
283 |
+
self.time_embed = checkpoint_wrapper_fn(
|
284 |
+
nn.Sequential(
|
285 |
+
linear(model_channels, time_embed_dim),
|
286 |
+
nn.SiLU(),
|
287 |
+
linear(time_embed_dim, time_embed_dim),
|
288 |
+
)
|
289 |
+
)
|
290 |
+
|
291 |
+
if self.num_classes is not None:
|
292 |
+
if isinstance(self.num_classes, int):
|
293 |
+
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
|
294 |
+
elif self.num_classes == "continuous":
|
295 |
+
print("设置线性 c_adm 嵌入层")
|
296 |
+
self.label_emb = nn.Linear(1, time_embed_dim)
|
297 |
+
elif self.num_classes == "timestep":
|
298 |
+
self.label_emb = checkpoint_wrapper_fn(
|
299 |
+
nn.Sequential(
|
300 |
+
Timestep(model_channels),
|
301 |
+
nn.Sequential(
|
302 |
+
linear(model_channels, time_embed_dim),
|
303 |
+
nn.SiLU(),
|
304 |
+
linear(time_embed_dim, time_embed_dim),
|
305 |
+
),
|
306 |
+
)
|
307 |
+
)
|
308 |
+
elif self.num_classes == "sequential":
|
309 |
+
assert adm_in_channels is not None
|
310 |
+
self.label_emb = nn.Sequential(
|
311 |
+
nn.Sequential(
|
312 |
+
linear(adm_in_channels, time_embed_dim),
|
313 |
+
nn.SiLU(),
|
314 |
+
linear(time_embed_dim, time_embed_dim),
|
315 |
+
)
|
316 |
+
)
|
317 |
+
else:
|
318 |
+
raise ValueError()
|
319 |
+
|
320 |
+
self.input_blocks = nn.ModuleList(
|
321 |
+
[
|
322 |
+
TimestepEmbedSequential(
|
323 |
+
conv_nd(dims, in_channels, model_channels, 3, padding=1)
|
324 |
+
)
|
325 |
+
]
|
326 |
+
)
|
327 |
+
self._feature_size = model_channels
|
328 |
+
input_block_chans = [model_channels]
|
329 |
+
ch = model_channels
|
330 |
+
ds = 1
|
331 |
+
for level, mult in enumerate(channel_mult):
|
332 |
+
for nr in range(self.num_res_blocks[level]):
|
333 |
+
layers = [
|
334 |
+
checkpoint_wrapper_fn(
|
335 |
+
ResBlock(
|
336 |
+
ch,
|
337 |
+
time_embed_dim,
|
338 |
+
dropout,
|
339 |
+
out_channels=mult * model_channels,
|
340 |
+
dims=dims,
|
341 |
+
use_checkpoint=use_checkpoint,
|
342 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
343 |
+
)
|
344 |
+
)
|
345 |
+
]
|
346 |
+
ch = mult * model_channels
|
347 |
+
if ds in attention_resolutions:
|
348 |
+
if num_head_channels == -1:
|
349 |
+
dim_head = ch // num_heads
|
350 |
+
else:
|
351 |
+
num_heads = ch // num_head_channels
|
352 |
+
dim_head = num_head_channels
|
353 |
+
if legacy:
|
354 |
+
# num_heads = 1
|
355 |
+
dim_head = (
|
356 |
+
ch // num_heads
|
357 |
+
if use_spatial_transformer
|
358 |
+
else num_head_channels
|
359 |
+
)
|
360 |
+
if exists(disable_self_attentions):
|
361 |
+
disabled_sa = disable_self_attentions[level]
|
362 |
+
else:
|
363 |
+
disabled_sa = False
|
364 |
+
|
365 |
+
if (
|
366 |
+
not exists(num_attention_blocks)
|
367 |
+
or nr < num_attention_blocks[level]
|
368 |
+
):
|
369 |
+
layers.append(
|
370 |
+
checkpoint_wrapper_fn(
|
371 |
+
AttentionBlock(
|
372 |
+
ch,
|
373 |
+
use_checkpoint=use_checkpoint,
|
374 |
+
num_heads=num_heads,
|
375 |
+
num_head_channels=dim_head,
|
376 |
+
use_new_attention_order=use_new_attention_order,
|
377 |
+
)
|
378 |
+
)
|
379 |
+
if not use_spatial_transformer
|
380 |
+
else checkpoint_wrapper_fn(
|
381 |
+
SpatialTransformer(
|
382 |
+
ch,
|
383 |
+
num_heads,
|
384 |
+
dim_head,
|
385 |
+
depth=transformer_depth[level],
|
386 |
+
context_dim=context_dim,
|
387 |
+
disable_self_attn=disabled_sa,
|
388 |
+
use_linear=use_linear_in_transformer,
|
389 |
+
attn_type=spatial_transformer_attn_type,
|
390 |
+
use_checkpoint=use_checkpoint,
|
391 |
+
)
|
392 |
+
)
|
393 |
+
)
|
394 |
+
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
395 |
+
self._feature_size += ch
|
396 |
+
input_block_chans.append(ch)
|
397 |
+
if level != len(channel_mult) - 1:
|
398 |
+
out_ch = ch
|
399 |
+
self.input_blocks.append(
|
400 |
+
TimestepEmbedSequential(
|
401 |
+
checkpoint_wrapper_fn(
|
402 |
+
ResBlock(
|
403 |
+
ch,
|
404 |
+
time_embed_dim,
|
405 |
+
dropout,
|
406 |
+
out_channels=out_ch,
|
407 |
+
dims=dims,
|
408 |
+
use_checkpoint=use_checkpoint,
|
409 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
410 |
+
down=True,
|
411 |
+
)
|
412 |
+
)
|
413 |
+
if resblock_updown
|
414 |
+
else Downsample(
|
415 |
+
ch, conv_resample, dims=dims, out_channels=out_ch
|
416 |
+
)
|
417 |
+
)
|
418 |
+
)
|
419 |
+
ch = out_ch
|
420 |
+
input_block_chans.append(ch)
|
421 |
+
ds *= 2
|
422 |
+
self._feature_size += ch
|
423 |
+
|
424 |
+
if num_head_channels == -1:
|
425 |
+
dim_head = ch // num_heads
|
426 |
+
else:
|
427 |
+
num_heads = ch // num_head_channels
|
428 |
+
dim_head = num_head_channels
|
429 |
+
if legacy:
|
430 |
+
# num_heads = 1
|
431 |
+
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
432 |
+
self.middle_block = TimestepEmbedSequential(
|
433 |
+
checkpoint_wrapper_fn(
|
434 |
+
ResBlock(
|
435 |
+
ch,
|
436 |
+
time_embed_dim,
|
437 |
+
dropout,
|
438 |
+
dims=dims,
|
439 |
+
use_checkpoint=use_checkpoint,
|
440 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
441 |
+
)
|
442 |
+
),
|
443 |
+
checkpoint_wrapper_fn(
|
444 |
+
AttentionBlock(
|
445 |
+
ch,
|
446 |
+
use_checkpoint=use_checkpoint,
|
447 |
+
num_heads=num_heads,
|
448 |
+
num_head_channels=dim_head,
|
449 |
+
use_new_attention_order=use_new_attention_order,
|
450 |
+
)
|
451 |
+
)
|
452 |
+
if not use_spatial_transformer
|
453 |
+
else checkpoint_wrapper_fn(
|
454 |
+
SpatialTransformer( # 总是使用自带的
|
455 |
+
ch,
|
456 |
+
num_heads,
|
457 |
+
dim_head,
|
458 |
+
depth=transformer_depth_middle,
|
459 |
+
context_dim=context_dim,
|
460 |
+
disable_self_attn=disable_middle_self_attn,
|
461 |
+
use_linear=use_linear_in_transformer,
|
462 |
+
attn_type=spatial_transformer_attn_type,
|
463 |
+
use_checkpoint=use_checkpoint,
|
464 |
+
)
|
465 |
+
),
|
466 |
+
checkpoint_wrapper_fn(
|
467 |
+
ResBlock(
|
468 |
+
ch,
|
469 |
+
time_embed_dim,
|
470 |
+
dropout,
|
471 |
+
dims=dims,
|
472 |
+
use_checkpoint=use_checkpoint,
|
473 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
474 |
+
)
|
475 |
+
),
|
476 |
+
)
|
477 |
+
|
478 |
+
self.input_upscale = input_upscale
|
479 |
+
self.input_hint_block = TimestepEmbedSequential(
|
480 |
+
zero_module(conv_nd(dims, in_channels, model_channels, 3, padding=1))
|
481 |
+
)
|
482 |
+
|
483 |
+
def convert_to_fp16(self):
|
484 |
+
"""
|
485 |
+
Convert the torso of the model to float16.
|
486 |
+
"""
|
487 |
+
self.input_blocks.apply(convert_module_to_f16)
|
488 |
+
self.middle_block.apply(convert_module_to_f16)
|
489 |
+
|
490 |
+
def convert_to_fp32(self):
|
491 |
+
"""
|
492 |
+
Convert the torso of the model to float32.
|
493 |
+
"""
|
494 |
+
self.input_blocks.apply(convert_module_to_f32)
|
495 |
+
self.middle_block.apply(convert_module_to_f32)
|
496 |
+
|
497 |
+
def forward(self, x, timesteps, xt, context=None, y=None, **kwargs):
|
498 |
+
# with torch.cuda.amp.autocast(enabled=False, dtype=torch.float32):
|
499 |
+
# x = x.to(torch.float32)
|
500 |
+
# timesteps = timesteps.to(torch.float32)
|
501 |
+
# xt = xt.to(torch.float32)
|
502 |
+
# context = context.to(torch.float32)
|
503 |
+
# y = y.to(torch.float32)
|
504 |
+
# print(x.dtype)
|
505 |
+
xt, context, y = xt.to(x.dtype), context.to(x.dtype), y.to(x.dtype)
|
506 |
+
|
507 |
+
if self.input_upscale != 1:
|
508 |
+
x = nn.functional.interpolate(x, scale_factor=self.input_upscale, mode='bilinear', antialias=True)
|
509 |
+
assert (y is not None) == (
|
510 |
+
self.num_classes is not None
|
511 |
+
), "must specify y if and only if the model is class-conditional"
|
512 |
+
hs = []
|
513 |
+
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(x.dtype)
|
514 |
+
# import pdb
|
515 |
+
# pdb.set_trace()
|
516 |
+
emb = self.time_embed(t_emb)
|
517 |
+
|
518 |
+
if self.num_classes is not None:
|
519 |
+
assert y.shape[0] == xt.shape[0]
|
520 |
+
emb = emb + self.label_emb(y)
|
521 |
+
|
522 |
+
guided_hint = self.input_hint_block(x, emb, context)
|
523 |
+
|
524 |
+
# h = x.type(self.dtype)
|
525 |
+
h = xt
|
526 |
+
for module in self.input_blocks:
|
527 |
+
if guided_hint is not None:
|
528 |
+
h = module(h, emb, context)
|
529 |
+
h += guided_hint
|
530 |
+
guided_hint = None
|
531 |
+
else:
|
532 |
+
h = module(h, emb, context)
|
533 |
+
hs.append(h)
|
534 |
+
# print(module)
|
535 |
+
# print(h.shape)
|
536 |
+
h = self.middle_block(h, emb, context)
|
537 |
+
hs.append(h)
|
538 |
+
return hs
|
539 |
+
|
540 |
+
|
541 |
+
class LightGLVUNet(UNetModel):
|
542 |
+
def __init__(self, mode='', project_type='ZeroSFT', project_channel_scale=1,
|
543 |
+
*args, **kwargs):
|
544 |
+
super().__init__(*args, **kwargs)
|
545 |
+
if mode == 'XL-base':
|
546 |
+
cond_output_channels = [320] * 4 + [640] * 3 + [1280] * 3
|
547 |
+
project_channels = [160] * 4 + [320] * 3 + [640] * 3
|
548 |
+
concat_channels = [320] * 2 + [640] * 3 + [1280] * 4 + [0]
|
549 |
+
cross_attn_insert_idx = [6, 3]
|
550 |
+
self.progressive_mask_nums = [0, 3, 7, 11]
|
551 |
+
elif mode == 'XL-refine':
|
552 |
+
cond_output_channels = [384] * 4 + [768] * 3 + [1536] * 6
|
553 |
+
project_channels = [192] * 4 + [384] * 3 + [768] * 6
|
554 |
+
concat_channels = [384] * 2 + [768] * 3 + [1536] * 7 + [0]
|
555 |
+
cross_attn_insert_idx = [9, 6, 3]
|
556 |
+
self.progressive_mask_nums = [0, 3, 6, 10, 14]
|
557 |
+
else:
|
558 |
+
raise NotImplementedError
|
559 |
+
|
560 |
+
project_channels = [int(c * project_channel_scale) for c in project_channels]
|
561 |
+
|
562 |
+
self.project_modules = nn.ModuleList()
|
563 |
+
for i in range(len(cond_output_channels)):
|
564 |
+
# if i == len(cond_output_channels) - 1:
|
565 |
+
# _project_type = 'ZeroCrossAttn'
|
566 |
+
# else:
|
567 |
+
# _project_type = project_type
|
568 |
+
_project_type = project_type
|
569 |
+
if _project_type == 'ZeroSFT':
|
570 |
+
self.project_modules.append(ZeroSFT(project_channels[i], cond_output_channels[i],
|
571 |
+
concat_channels=concat_channels[i]))
|
572 |
+
elif _project_type == 'ZeroCrossAttn':
|
573 |
+
self.project_modules.append(ZeroCrossAttn(cond_output_channels[i], project_channels[i]))
|
574 |
+
else:
|
575 |
+
raise NotImplementedError
|
576 |
+
|
577 |
+
for i in cross_attn_insert_idx:
|
578 |
+
self.project_modules.insert(i, ZeroCrossAttn(cond_output_channels[i], concat_channels[i]))
|
579 |
+
# print(self.project_modules[i])
|
580 |
+
|
581 |
+
def step_progressive_mask(self):
|
582 |
+
if len(self.progressive_mask_nums) > 0:
|
583 |
+
mask_num = self.progressive_mask_nums.pop()
|
584 |
+
for i in range(len(self.project_modules)):
|
585 |
+
if i < mask_num:
|
586 |
+
self.project_modules[i].mask = True
|
587 |
+
else:
|
588 |
+
self.project_modules[i].mask = False
|
589 |
+
return
|
590 |
+
# print(f'step_progressive_mask, current masked layers: {mask_num}')
|
591 |
+
else:
|
592 |
+
return
|
593 |
+
# print('step_progressive_mask, no more masked layers')
|
594 |
+
# for i in range(len(self.project_modules)):
|
595 |
+
# print(self.project_modules[i].mask)
|
596 |
+
|
597 |
+
|
598 |
+
def forward(self, x, timesteps=None, context=None, y=None, control=None, control_scale=1, **kwargs):
|
599 |
+
"""
|
600 |
+
将模型应用于输入批次。
|
601 |
+
:param x: an [N x C x ...] 输入张量
|
602 |
+
:param timesteps: 一个 1-D 时间步批次
|
603 |
+
:param context: 通过 crossattn 接入的调节
|
604 |
+
:param y: an [N] 标签张量,如果以类别为条件
|
605 |
+
:return: an [N x C x ...] 输出张量
|
606 |
+
"""
|
607 |
+
assert (y is not None) == (
|
608 |
+
self.num_classes is not None
|
609 |
+
), "当且仅当模型是类条件模型时,必须指定 y"
|
610 |
+
hs = []
|
611 |
+
|
612 |
+
_dtype = control[0].dtype
|
613 |
+
x, context, y = x.to(_dtype), context.to(_dtype), y.to(_dtype)
|
614 |
+
|
615 |
+
with torch.no_grad():
|
616 |
+
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(x.dtype)
|
617 |
+
emb = self.time_embed(t_emb)
|
618 |
+
|
619 |
+
if self.num_classes is not None:
|
620 |
+
assert y.shape[0] == x.shape[0]
|
621 |
+
emb = emb + self.label_emb(y)
|
622 |
+
|
623 |
+
# h = x.type(self.dtype)
|
624 |
+
h = x
|
625 |
+
for module in self.input_blocks:
|
626 |
+
h = module(h, emb, context)
|
627 |
+
hs.append(h)
|
628 |
+
|
629 |
+
adapter_idx = len(self.project_modules) - 1
|
630 |
+
control_idx = len(control) - 1
|
631 |
+
h = self.middle_block(h, emb, context)
|
632 |
+
h = self.project_modules[adapter_idx](control[control_idx], h, control_scale=control_scale)
|
633 |
+
adapter_idx -= 1
|
634 |
+
control_idx -= 1
|
635 |
+
|
636 |
+
for i, module in enumerate(self.output_blocks):
|
637 |
+
_h = hs.pop()
|
638 |
+
h = self.project_modules[adapter_idx](control[control_idx], _h, h, control_scale=control_scale)
|
639 |
+
adapter_idx -= 1
|
640 |
+
# h = th.cat([h, _h], dim=1)
|
641 |
+
if len(module) == 3:
|
642 |
+
assert isinstance(module[2], Upsample)
|
643 |
+
for layer in module[:2]:
|
644 |
+
if isinstance(layer, TimestepBlock):
|
645 |
+
h = layer(h, emb)
|
646 |
+
elif isinstance(layer, SpatialTransformer):
|
647 |
+
h = layer(h, context)
|
648 |
+
else:
|
649 |
+
h = layer(h)
|
650 |
+
# print('cross_attn_here')
|
651 |
+
h = self.project_modules[adapter_idx](control[control_idx], h, control_scale=control_scale)
|
652 |
+
adapter_idx -= 1
|
653 |
+
h = module[2](h)
|
654 |
+
else:
|
655 |
+
h = module(h, emb, context)
|
656 |
+
control_idx -= 1
|
657 |
+
# print(module)
|
658 |
+
# print(h.shape)
|
659 |
+
|
660 |
+
h = h.type(x.dtype)
|
661 |
+
if self.predict_codebook_ids:
|
662 |
+
assert False, "不再支持了,你**的在干什么?"
|
663 |
+
else:
|
664 |
+
return self.out(h)
|
665 |
+
|
666 |
+
if __name__ == '__main__':
|
667 |
+
from omegaconf import OmegaConf
|
668 |
+
|
669 |
+
# refiner
|
670 |
+
# opt = OmegaConf.load('../../options/train/debug_p2_xl.yaml')
|
671 |
+
#
|
672 |
+
# model = instantiate_from_config(opt.model.params.control_stage_config)
|
673 |
+
# hint = model(torch.randn([1, 4, 64, 64]), torch.randn([1]), torch.randn([1, 4, 64, 64]))
|
674 |
+
# hint = [h.cuda() for h in hint]
|
675 |
+
# print(sum(map(lambda hint: hint.numel(), model.parameters())))
|
676 |
+
#
|
677 |
+
# unet = instantiate_from_config(opt.model.params.network_config)
|
678 |
+
# unet = unet.cuda()
|
679 |
+
#
|
680 |
+
# _output = unet(torch.randn([1, 4, 64, 64]).cuda(), torch.randn([1]).cuda(), torch.randn([1, 77, 1280]).cuda(),
|
681 |
+
# torch.randn([1, 2560]).cuda(), hint)
|
682 |
+
# print(sum(map(lambda _output: _output.numel(), unet.parameters())))
|
683 |
+
|
684 |
+
# 基层
|
685 |
+
with torch.no_grad():
|
686 |
+
opt = OmegaConf.load('../../options/dev/BOOXEL_tmp.yaml')
|
687 |
+
|
688 |
+
model = instantiate_from_config(opt.model.params.control_stage_config)
|
689 |
+
model = model.cuda()
|
690 |
+
|
691 |
+
hint = model(torch.randn([1, 4, 64, 64]).cuda(), torch.randn([1]).cuda(), torch.randn([1, 4, 64, 64]).cuda(), torch.randn([1, 77, 2048]).cuda(),
|
692 |
+
torch.randn([1, 2816]).cuda())
|
693 |
+
|
694 |
+
for h in hint:
|
695 |
+
print(h.shape)
|
696 |
+
#
|
697 |
+
unet = instantiate_from_config(opt.model.params.network_config)
|
698 |
+
unet = unet.cuda()
|
699 |
+
_output = unet(torch.randn([1, 4, 64, 64]).cuda(), torch.randn([1]).cuda(), torch.randn([1, 77, 2048]).cuda(),
|
700 |
+
torch.randn([1, 2816]).cuda(), hint)
|
701 |
+
|
702 |
+
|
703 |
+
# model = instantiate_from_config(opt.model.params.control_stage_config)
|
704 |
+
# model = model.cuda()
|
705 |
+
# # hint = model(torch.randn([1, 4, 64, 64]), torch.randn([1]), torch.randn([1, 4, 64, 64]))
|
706 |
+
# hint = model(torch.randn([1, 4, 64, 64]).cuda(), torch.randn([1]).cuda(), torch.randn([1, 4, 64, 64]).cuda(), torch.randn([1, 77, 1280]).cuda(),
|
707 |
+
# torch.randn([1, 2560]).cuda())
|
708 |
+
# # hint = [h.cuda() for h in hint]
|
709 |
+
#
|
710 |
+
# for h in hint:
|
711 |
+
# print(h.shape)
|
712 |
+
#
|
713 |
+
# unet = instantiate_from_config(opt.model.params.network_config)
|
714 |
+
# unet = unet.cuda()
|
715 |
+
# _output = unet(torch.randn([1, 4, 64, 64]).cuda(), torch.randn([1]).cuda(), torch.randn([1, 77, 1280]).cuda(),
|
716 |
+
# torch.randn([1, 2560]).cuda(), hint)
|
BOOXEL/modules/__init__.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
SDXL_BASE_CHANNEL_DICT = {
|
2 |
+
'cond_output_channels': [320] * 4 + [640] * 3 + [1280] * 3,
|
3 |
+
'project_channels': [160] * 4 + [320] * 3 + [640] * 3,
|
4 |
+
'concat_channels': [320] * 2 + [640] * 3 + [1280] * 4 + [0]
|
5 |
+
}
|
6 |
+
|
7 |
+
SDXL_REFINE_CHANNEL_DICT = {
|
8 |
+
'cond_output_channels': [384] * 4 + [768] * 3 + [1536] * 6,
|
9 |
+
'project_channels': [192] * 4 + [384] * 3 + [768] * 6,
|
10 |
+
'concat_channels': [384] * 2 + [768] * 3 + [1536] * 7 + [0]
|
11 |
+
}
|
BOOXEL/util.py
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
import numpy as np
|
4 |
+
import cv2
|
5 |
+
from PIL import Image
|
6 |
+
from torch.nn.functional import interpolate
|
7 |
+
from omegaconf import OmegaConf
|
8 |
+
from sgm.util import instantiate_from_config
|
9 |
+
|
10 |
+
|
11 |
+
def get_state_dict(d):
|
12 |
+
return d.get('state_dict', d)
|
13 |
+
|
14 |
+
|
15 |
+
def load_state_dict(ckpt_path, location='cpu'):
|
16 |
+
_, extension = os.path.splitext(ckpt_path)
|
17 |
+
if extension.lower() == ".safetensors":
|
18 |
+
import safetensors.torch
|
19 |
+
state_dict = safetensors.torch.load_file(ckpt_path, device=location)
|
20 |
+
else:
|
21 |
+
state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location)))
|
22 |
+
state_dict = get_state_dict(state_dict)
|
23 |
+
print(f'从 [{ckpt_path}] 加载 state_dict')
|
24 |
+
return state_dict
|
25 |
+
|
26 |
+
|
27 |
+
def create_model(config_path):
|
28 |
+
config = OmegaConf.load(config_path)
|
29 |
+
model = instantiate_from_config(config.model).cpu()
|
30 |
+
print(f'从 [{config_path}] 加载模型配置')
|
31 |
+
return model
|
32 |
+
|
33 |
+
|
34 |
+
def create_BOOXEL_model(config_path, BOOXEL_sign=None, load_default_setting=False):
|
35 |
+
config = OmegaConf.load(config_path)
|
36 |
+
model = instantiate_from_config(config.model).cpu()
|
37 |
+
print(f'从 [{config_path}] 加载模型配置')
|
38 |
+
if config.SDXL_CKPT is not None:
|
39 |
+
model.load_state_dict(load_state_dict(config.SDXL_CKPT), strict=False)
|
40 |
+
if config.BOOXEL_CKPT is not None:
|
41 |
+
model.load_state_dict(load_state_dict(config.BOOXEL_CKPT), strict=False)
|
42 |
+
if BOOXEL_sign is not None:
|
43 |
+
assert BOOXEL_sign in ['F', 'Q']
|
44 |
+
if BOOXEL_sign == 'F':
|
45 |
+
model.load_state_dict(load_state_dict(config.BOOXEL_CKPT_F), strict=False)
|
46 |
+
elif BOOXEL_sign == 'Q':
|
47 |
+
model.load_state_dict(load_state_dict(config.BOOXEL_CKPT_Q), strict=False)
|
48 |
+
if load_default_setting:
|
49 |
+
default_setting = config.default_setting
|
50 |
+
return model, default_setting
|
51 |
+
return model
|
52 |
+
|
53 |
+
def load_QF_ckpt(config_path):
|
54 |
+
config = OmegaConf.load(config_path)
|
55 |
+
ckpt_F = torch.load(config.BOOXEL_CKPT_F, map_location='cpu')
|
56 |
+
ckpt_Q = torch.load(config.BOOXEL_CKPT_Q, map_location='cpu')
|
57 |
+
return ckpt_Q, ckpt_F
|
58 |
+
|
59 |
+
|
60 |
+
def PIL2Tensor(img, upsacle=1, min_size=1024, fix_resize=None):
|
61 |
+
'''
|
62 |
+
PIL.Image -> Tensor[C, H, W], RGB, [-1, 1]
|
63 |
+
'''
|
64 |
+
# 大小
|
65 |
+
w, h = img.size
|
66 |
+
w *= upsacle
|
67 |
+
h *= upsacle
|
68 |
+
w0, h0 = round(w), round(h)
|
69 |
+
if min(w, h) < min_size:
|
70 |
+
_upsacle = min_size / min(w, h)
|
71 |
+
w *= _upsacle
|
72 |
+
h *= _upsacle
|
73 |
+
if fix_resize is not None:
|
74 |
+
_upsacle = fix_resize / min(w, h)
|
75 |
+
w *= _upsacle
|
76 |
+
h *= _upsacle
|
77 |
+
w0, h0 = round(w), round(h)
|
78 |
+
w = int(np.round(w / 64.0)) * 64
|
79 |
+
h = int(np.round(h / 64.0)) * 64
|
80 |
+
x = img.resize((w, h), Image.BICUBIC)
|
81 |
+
x = np.array(x).round().clip(0, 255).astype(np.uint8)
|
82 |
+
x = x / 255 * 2 - 1
|
83 |
+
x = torch.tensor(x, dtype=torch.float32).permute(2, 0, 1)
|
84 |
+
return x, h0, w0
|
85 |
+
|
86 |
+
|
87 |
+
def Tensor2PIL(x, h0, w0):
|
88 |
+
'''
|
89 |
+
Tensor[C, H, W], RGB, [-1, 1] -> PIL.Image
|
90 |
+
'''
|
91 |
+
x = x.unsqueeze(0)
|
92 |
+
x = interpolate(x, size=(h0, w0), mode='bicubic')
|
93 |
+
x = (x.squeeze(0).permute(1, 2, 0) * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
|
94 |
+
return Image.fromarray(x)
|
95 |
+
|
96 |
+
|
97 |
+
def HWC3(x):
|
98 |
+
assert x.dtype == np.uint8
|
99 |
+
if x.ndim == 2:
|
100 |
+
x = x[:, :, None]
|
101 |
+
assert x.ndim == 3
|
102 |
+
H, W, C = x.shape
|
103 |
+
assert C == 1 or C == 3 or C == 4
|
104 |
+
if C == 3:
|
105 |
+
return x
|
106 |
+
if C == 1:
|
107 |
+
return np.concatenate([x, x, x], axis=2)
|
108 |
+
if C == 4:
|
109 |
+
color = x[:, :, 0:3].astype(np.float32)
|
110 |
+
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
|
111 |
+
y = color * alpha + 255.0 * (1.0 - alpha)
|
112 |
+
y = y.clip(0, 255).astype(np.uint8)
|
113 |
+
return y
|
114 |
+
|
115 |
+
|
116 |
+
def upscale_image(input_image, upscale, min_size=None, unit_resolution=64):
|
117 |
+
H, W, C = input_image.shape
|
118 |
+
H = float(H)
|
119 |
+
W = float(W)
|
120 |
+
H *= upscale
|
121 |
+
W *= upscale
|
122 |
+
if min_size is not None:
|
123 |
+
if min(H, W) < min_size:
|
124 |
+
_upsacle = min_size / min(W, H)
|
125 |
+
W *= _upsacle
|
126 |
+
H *= _upsacle
|
127 |
+
H = int(np.round(H / unit_resolution)) * unit_resolution
|
128 |
+
W = int(np.round(W / unit_resolution)) * unit_resolution
|
129 |
+
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if upscale > 1 else cv2.INTER_AREA)
|
130 |
+
img = img.round().clip(0, 255).astype(np.uint8)
|
131 |
+
return img
|
132 |
+
|
133 |
+
|
134 |
+
def fix_resize(input_image, size=512, unit_resolution=64):
|
135 |
+
H, W, C = input_image.shape
|
136 |
+
H = float(H)
|
137 |
+
W = float(W)
|
138 |
+
upscale = size / min(H, W)
|
139 |
+
H *= upscale
|
140 |
+
W *= upscale
|
141 |
+
H = int(np.round(H / unit_resolution)) * unit_resolution
|
142 |
+
W = int(np.round(W / unit_resolution)) * unit_resolution
|
143 |
+
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if upscale > 1 else cv2.INTER_AREA)
|
144 |
+
img = img.round().clip(0, 255).astype(np.uint8)
|
145 |
+
return img
|
146 |
+
|
147 |
+
|
148 |
+
|
149 |
+
def Numpy2Tensor(img):
|
150 |
+
'''
|
151 |
+
np.array[H, w, C] [0, 255] -> Tensor[C, H, W], RGB, [-1, 1]
|
152 |
+
'''
|
153 |
+
# size
|
154 |
+
img = np.array(img) / 255 * 2 - 1
|
155 |
+
img = torch.tensor(img, dtype=torch.float32).permute(2, 0, 1)
|
156 |
+
return img
|
157 |
+
|
158 |
+
|
159 |
+
def Tensor2Numpy(x, h0=None, w0=None):
|
160 |
+
'''
|
161 |
+
Tensor[C, H, W], RGB, [-1, 1] -> PIL.Image
|
162 |
+
'''
|
163 |
+
if h0 is not None and w0 is not None:
|
164 |
+
x = x.unsqueeze(0)
|
165 |
+
x = interpolate(x, size=(h0, w0), mode='bicubic')
|
166 |
+
x = x.squeeze(0)
|
167 |
+
x = (x.permute(1, 2, 0) * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
|
168 |
+
return x
|
169 |
+
|
170 |
+
|
171 |
+
def convert_dtype(dtype_str):
|
172 |
+
if dtype_str == 'fp32':
|
173 |
+
return torch.float32
|
174 |
+
elif dtype_str == 'fp16':
|
175 |
+
return torch.float16
|
176 |
+
elif dtype_str == 'bf16':
|
177 |
+
return torch.bfloat16
|
178 |
+
else:
|
179 |
+
raise NotImplementedError
|
BOOXEL/utils/__init__.py
ADDED
File without changes
|
BOOXEL/utils/colorfix.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
# --------------------------------------------------------------------------------
|
3 |
+
# 来自于 Li Yi (https://github.com/pkuliyi2015/sd-webui-stablesr/blob/master/srmodule/colorfix.py) 的颜色修复脚本
|
4 |
+
# --------------------------------------------------------------------------------
|
5 |
+
'''
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from PIL import Image
|
9 |
+
from torch import Tensor
|
10 |
+
from torch.nn import functional as F
|
11 |
+
|
12 |
+
from torchvision.transforms import ToTensor, ToPILImage
|
13 |
+
|
14 |
+
def adain_color_fix(target: Image, source: Image):
|
15 |
+
# Convert images to tensors
|
16 |
+
to_tensor = ToTensor()
|
17 |
+
target_tensor = to_tensor(target).unsqueeze(0)
|
18 |
+
source_tensor = to_tensor(source).unsqueeze(0)
|
19 |
+
|
20 |
+
# Apply adaptive instance normalization
|
21 |
+
result_tensor = adaptive_instance_normalization(target_tensor, source_tensor)
|
22 |
+
|
23 |
+
# Convert tensor back to image
|
24 |
+
to_image = ToPILImage()
|
25 |
+
result_image = to_image(result_tensor.squeeze(0).clamp_(0.0, 1.0))
|
26 |
+
|
27 |
+
return result_image
|
28 |
+
|
29 |
+
def wavelet_color_fix(target: Image, source: Image):
|
30 |
+
# Convert images to tensors
|
31 |
+
to_tensor = ToTensor()
|
32 |
+
target_tensor = to_tensor(target).unsqueeze(0)
|
33 |
+
source_tensor = to_tensor(source).unsqueeze(0)
|
34 |
+
|
35 |
+
# Apply wavelet reconstruction
|
36 |
+
result_tensor = wavelet_reconstruction(target_tensor, source_tensor)
|
37 |
+
|
38 |
+
# Convert tensor back to image
|
39 |
+
to_image = ToPILImage()
|
40 |
+
result_image = to_image(result_tensor.squeeze(0).clamp_(0.0, 1.0))
|
41 |
+
|
42 |
+
return result_image
|
43 |
+
|
44 |
+
def calc_mean_std(feat: Tensor, eps=1e-5):
|
45 |
+
"""Calculate mean and std for adaptive_instance_normalization.
|
46 |
+
Args:
|
47 |
+
feat (Tensor): 4D tensor.
|
48 |
+
eps (float): A small value added to the variance to avoid
|
49 |
+
divide-by-zero. Default: 1e-5.
|
50 |
+
"""
|
51 |
+
size = feat.size()
|
52 |
+
assert len(size) == 4, 'The input feature should be 4D tensor.'
|
53 |
+
b, c = size[:2]
|
54 |
+
feat_var = feat.reshape(b, c, -1).var(dim=2) + eps
|
55 |
+
feat_std = feat_var.sqrt().reshape(b, c, 1, 1)
|
56 |
+
feat_mean = feat.reshape(b, c, -1).mean(dim=2).reshape(b, c, 1, 1)
|
57 |
+
return feat_mean, feat_std
|
58 |
+
|
59 |
+
def adaptive_instance_normalization(content_feat:Tensor, style_feat:Tensor):
|
60 |
+
"""Adaptive instance normalization.
|
61 |
+
Adjust the reference features to have the similar color and illuminations
|
62 |
+
as those in the degradate features.
|
63 |
+
Args:
|
64 |
+
content_feat (Tensor): The reference feature.
|
65 |
+
style_feat (Tensor): The degradate features.
|
66 |
+
"""
|
67 |
+
size = content_feat.size()
|
68 |
+
style_mean, style_std = calc_mean_std(style_feat)
|
69 |
+
content_mean, content_std = calc_mean_std(content_feat)
|
70 |
+
normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size)
|
71 |
+
return normalized_feat * style_std.expand(size) + style_mean.expand(size)
|
72 |
+
|
73 |
+
def wavelet_blur(image: Tensor, radius: int):
|
74 |
+
"""
|
75 |
+
Apply wavelet blur to the input tensor.
|
76 |
+
"""
|
77 |
+
# input shape: (1, 3, H, W)
|
78 |
+
# convolution kernel
|
79 |
+
kernel_vals = [
|
80 |
+
[0.0625, 0.125, 0.0625],
|
81 |
+
[0.125, 0.25, 0.125],
|
82 |
+
[0.0625, 0.125, 0.0625],
|
83 |
+
]
|
84 |
+
kernel = torch.tensor(kernel_vals, dtype=image.dtype, device=image.device)
|
85 |
+
# add channel dimensions to the kernel to make it a 4D tensor
|
86 |
+
kernel = kernel[None, None]
|
87 |
+
# repeat the kernel across all input channels
|
88 |
+
kernel = kernel.repeat(3, 1, 1, 1)
|
89 |
+
image = F.pad(image, (radius, radius, radius, radius), mode='replicate')
|
90 |
+
# apply convolution
|
91 |
+
output = F.conv2d(image, kernel, groups=3, dilation=radius)
|
92 |
+
return output
|
93 |
+
|
94 |
+
def wavelet_decomposition(image: Tensor, levels=5):
|
95 |
+
"""
|
96 |
+
Apply wavelet decomposition to the input tensor.
|
97 |
+
This function only returns the low frequency & the high frequency.
|
98 |
+
"""
|
99 |
+
high_freq = torch.zeros_like(image)
|
100 |
+
for i in range(levels):
|
101 |
+
radius = 2 ** i
|
102 |
+
low_freq = wavelet_blur(image, radius)
|
103 |
+
high_freq += (image - low_freq)
|
104 |
+
image = low_freq
|
105 |
+
|
106 |
+
return high_freq, low_freq
|
107 |
+
|
108 |
+
def wavelet_reconstruction(content_feat:Tensor, style_feat:Tensor):
|
109 |
+
"""
|
110 |
+
Apply wavelet decomposition, so that the content will have the same color as the style.
|
111 |
+
"""
|
112 |
+
# calculate the wavelet decomposition of the content feature
|
113 |
+
content_high_freq, content_low_freq = wavelet_decomposition(content_feat)
|
114 |
+
del content_low_freq
|
115 |
+
# calculate the wavelet decomposition of the style feature
|
116 |
+
style_high_freq, style_low_freq = wavelet_decomposition(style_feat)
|
117 |
+
del style_high_freq
|
118 |
+
# reconstruct the content feature with the style's high frequency
|
119 |
+
return content_high_freq + style_low_freq
|
120 |
+
|
BOOXEL/utils/devices.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import contextlib
|
3 |
+
from functools import lru_cache
|
4 |
+
|
5 |
+
import torch
|
6 |
+
#from modules import errors
|
7 |
+
|
8 |
+
if sys.platform == "darwin":
|
9 |
+
from modules import mac_specific
|
10 |
+
|
11 |
+
|
12 |
+
def has_mps() -> bool:
|
13 |
+
if sys.platform != "darwin":
|
14 |
+
return False
|
15 |
+
else:
|
16 |
+
return mac_specific.has_mps
|
17 |
+
|
18 |
+
|
19 |
+
def get_cuda_device_string():
|
20 |
+
return "cuda"
|
21 |
+
|
22 |
+
|
23 |
+
def get_optimal_device_name():
|
24 |
+
if torch.cuda.is_available():
|
25 |
+
return get_cuda_device_string()
|
26 |
+
|
27 |
+
if has_mps():
|
28 |
+
return "mps"
|
29 |
+
|
30 |
+
return "cpu"
|
31 |
+
|
32 |
+
|
33 |
+
def get_optimal_device():
|
34 |
+
return torch.device(get_optimal_device_name())
|
35 |
+
|
36 |
+
|
37 |
+
def get_device_for(task):
|
38 |
+
return get_optimal_device()
|
39 |
+
|
40 |
+
|
41 |
+
def torch_gc():
|
42 |
+
|
43 |
+
if torch.cuda.is_available():
|
44 |
+
with torch.cuda.device(get_cuda_device_string()):
|
45 |
+
torch.cuda.empty_cache()
|
46 |
+
torch.cuda.ipc_collect()
|
47 |
+
|
48 |
+
if has_mps():
|
49 |
+
mac_specific.torch_mps_gc()
|
50 |
+
|
51 |
+
|
52 |
+
def enable_tf32():
|
53 |
+
if torch.cuda.is_available():
|
54 |
+
|
55 |
+
# 启用基准选项似乎能让一系列显卡在无法使用 fp16 时使用 fp16
|
56 |
+
# see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407
|
57 |
+
if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())):
|
58 |
+
torch.backends.cudnn.benchmark = True
|
59 |
+
|
60 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
61 |
+
torch.backends.cudnn.allow_tf32 = True
|
62 |
+
|
63 |
+
|
64 |
+
enable_tf32()
|
65 |
+
#errors.run(enable_tf32, "Enabling TF32")
|
66 |
+
|
67 |
+
cpu = torch.device("cpu")
|
68 |
+
device = device_interrogate = device_gfpgan = device_esrgan = device_codeformer = torch.device("cuda")
|
69 |
+
dtype = torch.float16
|
70 |
+
dtype_vae = torch.float16
|
71 |
+
dtype_unet = torch.float16
|
72 |
+
unet_needs_upcast = False
|
73 |
+
|
74 |
+
|
75 |
+
def cond_cast_unet(input):
|
76 |
+
return input.to(dtype_unet) if unet_needs_upcast else input
|
77 |
+
|
78 |
+
|
79 |
+
def cond_cast_float(input):
|
80 |
+
return input.float() if unet_needs_upcast else input
|
81 |
+
|
82 |
+
|
83 |
+
def randn(seed, shape):
|
84 |
+
torch.manual_seed(seed)
|
85 |
+
return torch.randn(shape, device=device)
|
86 |
+
|
87 |
+
|
88 |
+
def randn_without_seed(shape):
|
89 |
+
return torch.randn(shape, device=device)
|
90 |
+
|
91 |
+
|
92 |
+
def autocast(disable=False):
|
93 |
+
if disable:
|
94 |
+
return contextlib.nullcontext()
|
95 |
+
|
96 |
+
return torch.autocast("cuda")
|
97 |
+
|
98 |
+
|
99 |
+
def without_autocast(disable=False):
|
100 |
+
return torch.autocast("cuda", enabled=False) if torch.is_autocast_enabled() and not disable else contextlib.nullcontext()
|
101 |
+
|
102 |
+
|
103 |
+
class NansException(Exception):
|
104 |
+
pass
|
105 |
+
|
106 |
+
|
107 |
+
def test_for_nans(x, where):
|
108 |
+
if not torch.all(torch.isnan(x)).item():
|
109 |
+
return
|
110 |
+
|
111 |
+
if where == "unet":
|
112 |
+
message = "在 Unet 中生成了一个包含所有 NaNs 的张量。"
|
113 |
+
|
114 |
+
elif where == "vae":
|
115 |
+
message = "在 VAE 中生成了一个包含所有 NaN 的张量。"
|
116 |
+
|
117 |
+
else:
|
118 |
+
message = "产生了一个包含所有 NaN 的张量。"
|
119 |
+
|
120 |
+
message += " 使用 --disable-nan-check 命令行参数禁用此检查。"
|
121 |
+
|
122 |
+
raise NansException(message)
|
123 |
+
|
124 |
+
|
125 |
+
@lru_cache
|
126 |
+
def first_time_calculation():
|
127 |
+
"""
|
128 |
+
只要用 pytorch 层进行任何计算,第一次计算就会分配约 700MB 内存,耗时约 2.7 秒,至少在 NVIDIA 上是这样。
|
129 |
+
"""
|
130 |
+
|
131 |
+
x = torch.zeros((1, 1)).to(device, dtype)
|
132 |
+
linear = torch.nn.Linear(1, 1).to(device, dtype)
|
133 |
+
linear(x)
|
134 |
+
|
135 |
+
x = torch.zeros((1, 1, 3, 3)).to(device, dtype)
|
136 |
+
conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype)
|
137 |
+
conv2d(x)
|
BOOXEL/utils/face_restoration_helper.py
ADDED
@@ -0,0 +1,515 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import os
|
4 |
+
import torch
|
5 |
+
from torchvision.transforms.functional import normalize
|
6 |
+
|
7 |
+
from facexlib.detection import init_detection_model
|
8 |
+
from facexlib.parsing import init_parsing_model
|
9 |
+
from facexlib.utils.misc import img2tensor, imwrite
|
10 |
+
|
11 |
+
from .file import load_file_from_url
|
12 |
+
|
13 |
+
|
14 |
+
def get_largest_face(det_faces, h, w):
|
15 |
+
def get_location(val, length):
|
16 |
+
if val < 0:
|
17 |
+
return 0
|
18 |
+
elif val > length:
|
19 |
+
return length
|
20 |
+
else:
|
21 |
+
return val
|
22 |
+
|
23 |
+
face_areas = []
|
24 |
+
for det_face in det_faces:
|
25 |
+
left = get_location(det_face[0], w)
|
26 |
+
right = get_location(det_face[2], w)
|
27 |
+
top = get_location(det_face[1], h)
|
28 |
+
bottom = get_location(det_face[3], h)
|
29 |
+
face_area = (right - left) * (bottom - top)
|
30 |
+
face_areas.append(face_area)
|
31 |
+
largest_idx = face_areas.index(max(face_areas))
|
32 |
+
return det_faces[largest_idx], largest_idx
|
33 |
+
|
34 |
+
|
35 |
+
def get_center_face(det_faces, h=0, w=0, center=None):
|
36 |
+
if center is not None:
|
37 |
+
center = np.array(center)
|
38 |
+
else:
|
39 |
+
center = np.array([w / 2, h / 2])
|
40 |
+
center_dist = []
|
41 |
+
for det_face in det_faces:
|
42 |
+
face_center = np.array([(det_face[0] + det_face[2]) / 2, (det_face[1] + det_face[3]) / 2])
|
43 |
+
dist = np.linalg.norm(face_center - center)
|
44 |
+
center_dist.append(dist)
|
45 |
+
center_idx = center_dist.index(min(center_dist))
|
46 |
+
return det_faces[center_idx], center_idx
|
47 |
+
|
48 |
+
|
49 |
+
class FaceRestoreHelper(object):
|
50 |
+
"""面部修复管道的辅助工具(基类)。"""
|
51 |
+
|
52 |
+
def __init__(self,
|
53 |
+
upscale_factor,
|
54 |
+
face_size=512,
|
55 |
+
crop_ratio=(1, 1),
|
56 |
+
det_model='retinaface_resnet50',
|
57 |
+
save_ext='png',
|
58 |
+
template_3points=False,
|
59 |
+
pad_blur=False,
|
60 |
+
use_parse=False,
|
61 |
+
device=None):
|
62 |
+
self.template_3points = template_3points # 提高鲁棒性
|
63 |
+
self.upscale_factor = int(upscale_factor)
|
64 |
+
# 基于方形脸的裁剪脸部比例
|
65 |
+
self.crop_ratio = crop_ratio # (h, w)
|
66 |
+
assert (self.crop_ratio[0] >= 1 and self.crop_ratio[1] >= 1), 'crop ration only supports >=1'
|
67 |
+
self.face_size = (int(face_size * self.crop_ratio[1]), int(face_size * self.crop_ratio[0]))
|
68 |
+
self.det_model = det_model
|
69 |
+
|
70 |
+
if self.det_model == 'dlib':
|
71 |
+
# 标准 5 个标定,用于 1024 x 1024 的 FFHQ 人脸
|
72 |
+
self.face_template = np.array([[686.77227723, 488.62376238], [586.77227723, 493.59405941],
|
73 |
+
[337.91089109, 488.38613861], [437.95049505, 493.51485149],
|
74 |
+
[513.58415842, 678.5049505]])
|
75 |
+
self.face_template = self.face_template / (1024 // face_size)
|
76 |
+
elif self.template_3points:
|
77 |
+
self.face_template = np.array([[192, 240], [319, 240], [257, 371]])
|
78 |
+
else:
|
79 |
+
# 标准 5 个标定,用于 512 x 512 的 FFHQ 人脸
|
80 |
+
# facexlib
|
81 |
+
self.face_template = np.array([[192.98138, 239.94708], [318.90277, 240.1936], [256.63416, 314.01935],
|
82 |
+
[201.26117, 371.41043], [313.08905, 371.15118]])
|
83 |
+
|
84 |
+
# dlib: left_eye: 36:41 right_eye: 42:47 nose: 30,32,33,34 left mouth corner: 48 right mouth corner: 54
|
85 |
+
# self.face_template = np.array([[193.65928, 242.98541], [318.32558, 243.06108], [255.67984, 328.82894],
|
86 |
+
# [198.22603, 372.82502], [313.91018, 372.75659]])
|
87 |
+
|
88 |
+
self.face_template = self.face_template * (face_size / 512.0)
|
89 |
+
if self.crop_ratio[0] > 1:
|
90 |
+
self.face_template[:, 1] += face_size * (self.crop_ratio[0] - 1) / 2
|
91 |
+
if self.crop_ratio[1] > 1:
|
92 |
+
self.face_template[:, 0] += face_size * (self.crop_ratio[1] - 1) / 2
|
93 |
+
self.save_ext = save_ext
|
94 |
+
self.pad_blur = pad_blur
|
95 |
+
if self.pad_blur is True:
|
96 |
+
self.template_3points = False
|
97 |
+
|
98 |
+
self.all_landmarks_5 = []
|
99 |
+
self.det_faces = []
|
100 |
+
self.affine_matrices = []
|
101 |
+
self.inverse_affine_matrices = []
|
102 |
+
self.cropped_faces = []
|
103 |
+
self.restored_faces = []
|
104 |
+
self.pad_input_imgs = []
|
105 |
+
|
106 |
+
if device is None:
|
107 |
+
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
108 |
+
# self.device = get_device()
|
109 |
+
else:
|
110 |
+
self.device = device
|
111 |
+
|
112 |
+
# 启动人脸检测模型
|
113 |
+
self.face_detector = init_detection_model(det_model, half=False, device=self.device)
|
114 |
+
|
115 |
+
# 启动人脸识别模型
|
116 |
+
self.use_parse = use_parse
|
117 |
+
self.face_parse = init_parsing_model(model_name='parsenet', device=self.device)
|
118 |
+
|
119 |
+
def set_upscale_factor(self, upscale_factor):
|
120 |
+
self.upscale_factor = upscale_factor
|
121 |
+
|
122 |
+
def read_image(self, img):
|
123 |
+
"""img can be image path or cv2 loaded image."""
|
124 |
+
# self.input_img is Numpy array, (h, w, c), BGR, uint8, [0, 255]
|
125 |
+
if isinstance(img, str):
|
126 |
+
img = cv2.imread(img)
|
127 |
+
|
128 |
+
if np.max(img) > 256: # 16-bit 图
|
129 |
+
img = img / 65535 * 255
|
130 |
+
if len(img.shape) == 2: # 灰度图
|
131 |
+
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
132 |
+
elif img.shape[2] == 4: # BGRA 图像(包含 Alpha 通道)
|
133 |
+
img = img[:, :, 0:3]
|
134 |
+
|
135 |
+
self.input_img = img
|
136 |
+
# self.is_gray = is_gray(img, threshold=10)
|
137 |
+
# if self.is_gray:
|
138 |
+
# print('Grayscale input: True')
|
139 |
+
|
140 |
+
if min(self.input_img.shape[:2]) < 512:
|
141 |
+
f = 512.0 / min(self.input_img.shape[:2])
|
142 |
+
self.input_img = cv2.resize(self.input_img, (0, 0), fx=f, fy=f, interpolation=cv2.INTER_LINEAR)
|
143 |
+
|
144 |
+
def init_dlib(self, detection_path, landmark5_path):
|
145 |
+
"""初始化 dlib 检测器和预测器。"""
|
146 |
+
try:
|
147 |
+
import dlib
|
148 |
+
except ImportError:
|
149 |
+
print('请运行以下命令安装 dlib:' 'conda install -c conda-forge dlib')
|
150 |
+
detection_path = load_file_from_url(url=detection_path, model_dir='weights/dlib', progress=True, file_name=None)
|
151 |
+
landmark5_path = load_file_from_url(url=landmark5_path, model_dir='weights/dlib', progress=True, file_name=None)
|
152 |
+
face_detector = dlib.cnn_face_detection_model_v1(detection_path)
|
153 |
+
shape_predictor_5 = dlib.shape_predictor(landmark5_path)
|
154 |
+
return face_detector, shape_predictor_5
|
155 |
+
|
156 |
+
def get_face_landmarks_5_dlib(self,
|
157 |
+
only_keep_largest=False,
|
158 |
+
scale=1):
|
159 |
+
det_faces = self.face_detector(self.input_img, scale)
|
160 |
+
|
161 |
+
if len(det_faces) == 0:
|
162 |
+
print('未检测到人脸。尝试增加 upsample_num_times。')
|
163 |
+
return 0
|
164 |
+
else:
|
165 |
+
if only_keep_largest:
|
166 |
+
print('检测多个面孔,只保留最大的一个。')
|
167 |
+
face_areas = []
|
168 |
+
for i in range(len(det_faces)):
|
169 |
+
face_area = (det_faces[i].rect.right() - det_faces[i].rect.left()) * (
|
170 |
+
det_faces[i].rect.bottom() - det_faces[i].rect.top())
|
171 |
+
face_areas.append(face_area)
|
172 |
+
largest_idx = face_areas.index(max(face_areas))
|
173 |
+
self.det_faces = [det_faces[largest_idx]]
|
174 |
+
else:
|
175 |
+
self.det_faces = det_faces
|
176 |
+
|
177 |
+
if len(self.det_faces) == 0:
|
178 |
+
return 0
|
179 |
+
|
180 |
+
for face in self.det_faces:
|
181 |
+
shape = self.shape_predictor_5(self.input_img, face.rect)
|
182 |
+
landmark = np.array([[part.x, part.y] for part in shape.parts()])
|
183 |
+
self.all_landmarks_5.append(landmark)
|
184 |
+
|
185 |
+
return len(self.all_landmarks_5)
|
186 |
+
|
187 |
+
def get_face_landmarks_5(self,
|
188 |
+
only_keep_largest=False,
|
189 |
+
only_center_face=False,
|
190 |
+
resize=None,
|
191 |
+
blur_ratio=0.01,
|
192 |
+
eye_dist_threshold=None):
|
193 |
+
if self.det_model == 'dlib':
|
194 |
+
return self.get_face_landmarks_5_dlib(only_keep_largest)
|
195 |
+
|
196 |
+
if resize is None:
|
197 |
+
scale = 1
|
198 |
+
input_img = self.input_img
|
199 |
+
else:
|
200 |
+
h, w = self.input_img.shape[0:2]
|
201 |
+
scale = resize / min(h, w)
|
202 |
+
scale = max(1, scale) # 始终扩大规模
|
203 |
+
h, w = int(h * scale), int(w * scale)
|
204 |
+
interp = cv2.INTER_AREA if scale < 1 else cv2.INTER_LINEAR
|
205 |
+
input_img = cv2.resize(self.input_img, (w, h), interpolation=interp)
|
206 |
+
|
207 |
+
with torch.no_grad():
|
208 |
+
bboxes = self.face_detector.detect_faces(input_img)
|
209 |
+
|
210 |
+
if bboxes is None or bboxes.shape[0] == 0:
|
211 |
+
return 0
|
212 |
+
else:
|
213 |
+
bboxes = bboxes / scale
|
214 |
+
|
215 |
+
for bbox in bboxes:
|
216 |
+
# 移除眼距过小的面孔:侧脸或过小的面孔
|
217 |
+
eye_dist = np.linalg.norm([bbox[6] - bbox[8], bbox[7] - bbox[9]])
|
218 |
+
if eye_dist_threshold is not None and (eye_dist < eye_dist_threshold):
|
219 |
+
continue
|
220 |
+
|
221 |
+
if self.template_3points:
|
222 |
+
landmark = np.array([[bbox[i], bbox[i + 1]] for i in range(5, 11, 2)])
|
223 |
+
else:
|
224 |
+
landmark = np.array([[bbox[i], bbox[i + 1]] for i in range(5, 15, 2)])
|
225 |
+
self.all_landmarks_5.append(landmark)
|
226 |
+
self.det_faces.append(bbox[0:5])
|
227 |
+
|
228 |
+
if len(self.det_faces) == 0:
|
229 |
+
return 0
|
230 |
+
if only_keep_largest:
|
231 |
+
h, w, _ = self.input_img.shape
|
232 |
+
self.det_faces, largest_idx = get_largest_face(self.det_faces, h, w)
|
233 |
+
self.all_landmarks_5 = [self.all_landmarks_5[largest_idx]]
|
234 |
+
elif only_center_face:
|
235 |
+
h, w, _ = self.input_img.shape
|
236 |
+
self.det_faces, center_idx = get_center_face(self.det_faces, h, w)
|
237 |
+
self.all_landmarks_5 = [self.all_landmarks_5[center_idx]]
|
238 |
+
|
239 |
+
# 图像模糊
|
240 |
+
if self.pad_blur:
|
241 |
+
self.pad_input_imgs = []
|
242 |
+
for landmarks in self.all_landmarks_5:
|
243 |
+
# 获取地标
|
244 |
+
eye_left = landmarks[0, :]
|
245 |
+
eye_right = landmarks[1, :]
|
246 |
+
eye_avg = (eye_left + eye_right) * 0.5
|
247 |
+
mouth_avg = (landmarks[3, :] + landmarks[4, :]) * 0.5
|
248 |
+
eye_to_eye = eye_right - eye_left
|
249 |
+
eye_to_mouth = mouth_avg - eye_avg
|
250 |
+
|
251 |
+
# 获取定向裁剪矩形
|
252 |
+
# x: 定向裁剪矩形的一半宽度
|
253 |
+
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
|
254 |
+
# - np.flipud(eye_to_mouth) * [-1, 1]: 顺时针旋转 90
|
255 |
+
# norm with the hypotenuse: 得到方向
|
256 |
+
x /= np.hypot(*x) # 求直角三角形的斜边
|
257 |
+
rect_scale = 1.5
|
258 |
+
x *= max(np.hypot(*eye_to_eye) * 2.0 * rect_scale, np.hypot(*eye_to_mouth) * 1.8 * rect_scale)
|
259 |
+
# y: 定向裁剪矩形的半高
|
260 |
+
y = np.flipud(x) * [-1, 1]
|
261 |
+
|
262 |
+
# c: 中心
|
263 |
+
c = eye_avg + eye_to_mouth * 0.1
|
264 |
+
# quad: (left_top, left_bottom, right_bottom, right_top)
|
265 |
+
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
|
266 |
+
# qsize: 正方形边长
|
267 |
+
qsize = np.hypot(*x) * 2
|
268 |
+
border = max(int(np.rint(qsize * 0.1)), 3)
|
269 |
+
|
270 |
+
# 获取垫子
|
271 |
+
# pad: (width_left, height_top, width_right, height_bottom)
|
272 |
+
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
|
273 |
+
int(np.ceil(max(quad[:, 1]))))
|
274 |
+
pad = [
|
275 |
+
max(-pad[0] + border, 1),
|
276 |
+
max(-pad[1] + border, 1),
|
277 |
+
max(pad[2] - self.input_img.shape[0] + border, 1),
|
278 |
+
max(pad[3] - self.input_img.shape[1] + border, 1)
|
279 |
+
]
|
280 |
+
|
281 |
+
if max(pad) > 1:
|
282 |
+
# 垫图像
|
283 |
+
pad_img = np.pad(self.input_img, ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
|
284 |
+
# 修改地标坐标
|
285 |
+
landmarks[:, 0] += pad[0]
|
286 |
+
landmarks[:, 1] += pad[1]
|
287 |
+
# 模糊垫图像
|
288 |
+
h, w, _ = pad_img.shape
|
289 |
+
y, x, _ = np.ogrid[:h, :w, :1]
|
290 |
+
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0],
|
291 |
+
np.float32(w - 1 - x) / pad[2]),
|
292 |
+
1.0 - np.minimum(np.float32(y) / pad[1],
|
293 |
+
np.float32(h - 1 - y) / pad[3]))
|
294 |
+
blur = int(qsize * blur_ratio)
|
295 |
+
if blur % 2 == 0:
|
296 |
+
blur += 1
|
297 |
+
blur_img = cv2.boxFilter(pad_img, 0, ksize=(blur, blur))
|
298 |
+
# blur_img = cv2.GaussianBlur(pad_img, (blur, blur), 0)
|
299 |
+
|
300 |
+
pad_img = pad_img.astype('float32')
|
301 |
+
pad_img += (blur_img - pad_img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
|
302 |
+
pad_img += (np.median(pad_img, axis=(0, 1)) - pad_img) * np.clip(mask, 0.0, 1.0)
|
303 |
+
pad_img = np.clip(pad_img, 0, 255) # float32, [0, 255]
|
304 |
+
self.pad_input_imgs.append(pad_img)
|
305 |
+
else:
|
306 |
+
self.pad_input_imgs.append(np.copy(self.input_img))
|
307 |
+
|
308 |
+
return len(self.all_landmarks_5)
|
309 |
+
|
310 |
+
def align_warp_face(self, save_cropped_path=None, border_mode='constant'):
|
311 |
+
"""
|
312 |
+
用面模板对齐和翘曲面。
|
313 |
+
"""
|
314 |
+
if self.pad_blur:
|
315 |
+
assert len(self.pad_input_imgs) == len(
|
316 |
+
self.all_landmarks_5), f'样子不匹配: {len(self.pad_input_imgs)} 和 {len(self.all_landmarks_5)}'
|
317 |
+
for idx, landmark in enumerate(self.all_landmarks_5):
|
318 |
+
# 使用 5 个地标获取仿射矩阵
|
319 |
+
# 使用 cv2.LMEDS 方法实现与 skimage 变换的等价性
|
320 |
+
# ref: https://blog.csdn.net/yichxi/article/details/115827338
|
321 |
+
affine_matrix = cv2.estimateAffinePartial2D(landmark, self.face_template, method=cv2.LMEDS)[0]
|
322 |
+
self.affine_matrices.append(affine_matrix)
|
323 |
+
# 扭曲和裁剪面孔
|
324 |
+
if border_mode == 'constant':
|
325 |
+
border_mode = cv2.BORDER_CONSTANT
|
326 |
+
elif border_mode == 'reflect101':
|
327 |
+
border_mode = cv2.BORDER_REFLECT101
|
328 |
+
elif border_mode == 'reflect':
|
329 |
+
border_mode = cv2.BORDER_REFLECT
|
330 |
+
if self.pad_blur:
|
331 |
+
input_img = self.pad_input_imgs[idx]
|
332 |
+
else:
|
333 |
+
input_img = self.input_img
|
334 |
+
cropped_face = cv2.warpAffine(
|
335 |
+
input_img, affine_matrix, self.face_size, borderMode=border_mode, borderValue=(135, 133, 132)) # gray
|
336 |
+
self.cropped_faces.append(cropped_face)
|
337 |
+
# 救救被剪掉的脸
|
338 |
+
if save_cropped_path is not None:
|
339 |
+
path = os.path.splitext(save_cropped_path)[0]
|
340 |
+
save_path = f'{path}_{idx:02d}.{self.save_ext}'
|
341 |
+
imwrite(cropped_face, save_path)
|
342 |
+
|
343 |
+
def get_inverse_affine(self, save_inverse_affine_path=None):
|
344 |
+
"""获取反仿射矩阵"""
|
345 |
+
for idx, affine_matrix in enumerate(self.affine_matrices):
|
346 |
+
inverse_affine = cv2.invertAffineTransform(affine_matrix)
|
347 |
+
inverse_affine *= self.upscale_factor
|
348 |
+
self.inverse_affine_matrices.append(inverse_affine)
|
349 |
+
# 保存反仿射矩阵
|
350 |
+
if save_inverse_affine_path is not None:
|
351 |
+
path, _ = os.path.splitext(save_inverse_affine_path)
|
352 |
+
save_path = f'{path}_{idx:02d}.pth'
|
353 |
+
torch.save(inverse_affine, save_path)
|
354 |
+
|
355 |
+
def add_restored_face(self, restored_face, input_face=None):
|
356 |
+
# if self.is_gray:
|
357 |
+
# restored_face = bgr2gray(restored_face) # convert img into grayscale
|
358 |
+
# if input_face is not None:
|
359 |
+
# restored_face = adain_npy(restored_face, input_face) # transfer the color
|
360 |
+
self.restored_faces.append(restored_face)
|
361 |
+
|
362 |
+
def paste_faces_to_input_image(self, save_path=None, upsample_img=None, draw_box=False, face_upsampler=None):
|
363 |
+
h, w, _ = self.input_img.shape
|
364 |
+
h_up, w_up = int(h * self.upscale_factor), int(w * self.upscale_factor)
|
365 |
+
|
366 |
+
if upsample_img is None:
|
367 |
+
# 只需调整背景大小
|
368 |
+
# upsample_img = cv2.resize(self.input_img, (w_up, h_up), interpolation=cv2.INTER_LANCZOS4)
|
369 |
+
upsample_img = cv2.resize(self.input_img, (w_up, h_up), interpolation=cv2.INTER_LINEAR)
|
370 |
+
else:
|
371 |
+
upsample_img = cv2.resize(upsample_img, (w_up, h_up), interpolation=cv2.INTER_LANCZOS4)
|
372 |
+
|
373 |
+
assert len(self.restored_faces) == len(
|
374 |
+
self.inverse_affine_matrices), ('restored_faces 和 affine_matrices 的长度不同。')
|
375 |
+
|
376 |
+
inv_mask_borders = []
|
377 |
+
for restored_face, inverse_affine in zip(self.restored_faces, self.inverse_affine_matrices):
|
378 |
+
if face_upsampler is not None:
|
379 |
+
restored_face = face_upsampler.enhance(restored_face, outscale=self.upscale_factor)[0]
|
380 |
+
inverse_affine /= self.upscale_factor
|
381 |
+
inverse_affine[:, 2] *= self.upscale_factor
|
382 |
+
face_size = (self.face_size[0] * self.upscale_factor, self.face_size[1] * self.upscale_factor)
|
383 |
+
else:
|
384 |
+
# 为反仿射矩阵添加偏移量,以实现更精确的背面对齐
|
385 |
+
if self.upscale_factor > 1:
|
386 |
+
extra_offset = 0.5 * self.upscale_factor
|
387 |
+
else:
|
388 |
+
extra_offset = 0
|
389 |
+
inverse_affine[:, 2] += extra_offset
|
390 |
+
face_size = self.face_size
|
391 |
+
inv_restored = cv2.warpAffine(restored_face, inverse_affine, (w_up, h_up))
|
392 |
+
|
393 |
+
# if draw_box or not self.use_parse: # 使用方形解析图
|
394 |
+
# mask = np.ones(face_size, dtype=np.float32)
|
395 |
+
# inv_mask = cv2.warpAffine(mask, inverse_affine, (w_up, h_up))
|
396 |
+
# # 去除黑色边框
|
397 |
+
# inv_mask_erosion = cv2.erode(
|
398 |
+
# inv_mask, np.ones((int(2 * self.upscale_factor), int(2 * self.upscale_factor)), np.uint8))
|
399 |
+
# pasted_face = inv_mask_erosion[:, :, None] * inv_restored
|
400 |
+
# total_face_area = np.sum(inv_mask_erosion) # // 3
|
401 |
+
# # 添加边框
|
402 |
+
# if draw_box:
|
403 |
+
# h, w = face_size
|
404 |
+
# mask_border = np.ones((h, w, 3), dtype=np.float32)
|
405 |
+
# border = int(1400/np.sqrt(total_face_area))
|
406 |
+
# mask_border[border:h-border, border:w-border,:] = 0
|
407 |
+
# inv_mask_border = cv2.warpAffine(mask_border, inverse_affine, (w_up, h_up))
|
408 |
+
# inv_mask_borders.append(inv_mask_border)
|
409 |
+
# if not self.use_parse:
|
410 |
+
# # 根据人脸区域计算融合边缘
|
411 |
+
# w_edge = int(total_face_area**0.5) // 20
|
412 |
+
# erosion_radius = w_edge * 2
|
413 |
+
# inv_mask_center = cv2.erode(inv_mask_erosion, np.ones((erosion_radius, erosion_radius), np.uint8))
|
414 |
+
# blur_size = w_edge * 2
|
415 |
+
# inv_soft_mask = cv2.GaussianBlur(inv_mask_center, (blur_size + 1, blur_size + 1), 0)
|
416 |
+
# if len(upsample_img.shape) == 2: # upsample_img 为灰色图像
|
417 |
+
# upsample_img = upsample_img[:, :, None]
|
418 |
+
# inv_soft_mask = inv_soft_mask[:, :, None]
|
419 |
+
|
420 |
+
# 始终使用方形遮罩
|
421 |
+
mask = np.ones(face_size, dtype=np.float32)
|
422 |
+
inv_mask = cv2.warpAffine(mask, inverse_affine, (w_up, h_up))
|
423 |
+
# 去除黑色边框
|
424 |
+
inv_mask_erosion = cv2.erode(
|
425 |
+
inv_mask, np.ones((int(2 * self.upscale_factor), int(2 * self.upscale_factor)), np.uint8))
|
426 |
+
pasted_face = inv_mask_erosion[:, :, None] * inv_restored
|
427 |
+
total_face_area = np.sum(inv_mask_erosion) # // 3
|
428 |
+
# 添加边框
|
429 |
+
if draw_box:
|
430 |
+
h, w = face_size
|
431 |
+
mask_border = np.ones((h, w, 3), dtype=np.float32)
|
432 |
+
border = int(1400 / np.sqrt(total_face_area))
|
433 |
+
mask_border[border:h - border, border:w - border, :] = 0
|
434 |
+
inv_mask_border = cv2.warpAffine(mask_border, inverse_affine, (w_up, h_up))
|
435 |
+
inv_mask_borders.append(inv_mask_border)
|
436 |
+
# 根据人脸区域计算融合边缘
|
437 |
+
w_edge = int(total_face_area ** 0.5) // 20
|
438 |
+
erosion_radius = w_edge * 2
|
439 |
+
inv_mask_center = cv2.erode(inv_mask_erosion, np.ones((erosion_radius, erosion_radius), np.uint8))
|
440 |
+
blur_size = w_edge * 2
|
441 |
+
inv_soft_mask = cv2.GaussianBlur(inv_mask_center, (blur_size + 1, blur_size + 1), 0)
|
442 |
+
if len(upsample_img.shape) == 2: # upsample_img 为灰色图像
|
443 |
+
upsample_img = upsample_img[:, :, None]
|
444 |
+
inv_soft_mask = inv_soft_mask[:, :, None]
|
445 |
+
|
446 |
+
# 解析遮罩
|
447 |
+
if self.use_parse:
|
448 |
+
# 推论
|
449 |
+
face_input = cv2.resize(restored_face, (512, 512), interpolation=cv2.INTER_LINEAR)
|
450 |
+
face_input = img2tensor(face_input.astype('float32') / 255., bgr2rgb=True, float32=True)
|
451 |
+
normalize(face_input, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
|
452 |
+
face_input = torch.unsqueeze(face_input, 0).to(self.device)
|
453 |
+
with torch.no_grad():
|
454 |
+
out = self.face_parse(face_input)[0]
|
455 |
+
out = out.argmax(dim=1).squeeze().cpu().numpy()
|
456 |
+
|
457 |
+
parse_mask = np.zeros(out.shape)
|
458 |
+
MASK_COLORMAP = [0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 0, 0, 0]
|
459 |
+
for idx, color in enumerate(MASK_COLORMAP):
|
460 |
+
parse_mask[out == idx] = color
|
461 |
+
# 模糊遮罩
|
462 |
+
parse_mask = cv2.GaussianBlur(parse_mask, (101, 101), 11)
|
463 |
+
parse_mask = cv2.GaussianBlur(parse_mask, (101, 101), 11)
|
464 |
+
# 去除黑色边框
|
465 |
+
thres = 10
|
466 |
+
parse_mask[:thres, :] = 0
|
467 |
+
parse_mask[-thres:, :] = 0
|
468 |
+
parse_mask[:, :thres] = 0
|
469 |
+
parse_mask[:, -thres:] = 0
|
470 |
+
parse_mask = parse_mask / 255.
|
471 |
+
|
472 |
+
parse_mask = cv2.resize(parse_mask, face_size)
|
473 |
+
parse_mask = cv2.warpAffine(parse_mask, inverse_affine, (w_up, h_up), flags=3)
|
474 |
+
inv_soft_parse_mask = parse_mask[:, :, None]
|
475 |
+
# pasted_face = inv_restored
|
476 |
+
fuse_mask = (inv_soft_parse_mask < inv_soft_mask).astype('int')
|
477 |
+
inv_soft_mask = inv_soft_parse_mask * fuse_mask + inv_soft_mask * (1 - fuse_mask)
|
478 |
+
|
479 |
+
if len(upsample_img.shape) == 3 and upsample_img.shape[2] == 4: # alpha 通道
|
480 |
+
alpha = upsample_img[:, :, 3:]
|
481 |
+
upsample_img = inv_soft_mask * pasted_face + (1 - inv_soft_mask) * upsample_img[:, :, 0:3]
|
482 |
+
upsample_img = np.concatenate((upsample_img, alpha), axis=2)
|
483 |
+
else:
|
484 |
+
upsample_img = inv_soft_mask * pasted_face + (1 - inv_soft_mask) * upsample_img
|
485 |
+
|
486 |
+
if np.max(upsample_img) > 256: # 16-bit 图像
|
487 |
+
upsample_img = upsample_img.astype(np.uint16)
|
488 |
+
else:
|
489 |
+
upsample_img = upsample_img.astype(np.uint8)
|
490 |
+
|
491 |
+
# 绘制边界框
|
492 |
+
if draw_box:
|
493 |
+
# upsample_input_img = cv2.resize(input_img, (w_up, h_up))
|
494 |
+
img_color = np.ones([*upsample_img.shape], dtype=np.float32)
|
495 |
+
img_color[:, :, 0] = 0
|
496 |
+
img_color[:, :, 1] = 255
|
497 |
+
img_color[:, :, 2] = 0
|
498 |
+
for inv_mask_border in inv_mask_borders:
|
499 |
+
upsample_img = inv_mask_border * img_color + (1 - inv_mask_border) * upsample_img
|
500 |
+
# upsample_input_img = inv_mask_border * img_color + (1 - inv_mask_border) * upsample_input_img
|
501 |
+
|
502 |
+
if save_path is not None:
|
503 |
+
path = os.path.splitext(save_path)[0]
|
504 |
+
save_path = f'{path}.{self.save_ext}'
|
505 |
+
imwrite(upsample_img, save_path)
|
506 |
+
return upsample_img
|
507 |
+
|
508 |
+
def clean_all(self):
|
509 |
+
self.all_landmarks_5 = []
|
510 |
+
self.restored_faces = []
|
511 |
+
self.affine_matrices = []
|
512 |
+
self.cropped_faces = []
|
513 |
+
self.inverse_affine_matrices = []
|
514 |
+
self.det_faces = []
|
515 |
+
self.pad_input_imgs = []
|
BOOXEL/utils/file.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import List, Tuple
|
3 |
+
|
4 |
+
from urllib.parse import urlparse
|
5 |
+
from torch.hub import download_url_to_file, get_dir
|
6 |
+
|
7 |
+
|
8 |
+
def load_file_list(file_list_path: str) -> List[str]:
|
9 |
+
files = []
|
10 |
+
# 文件列表中的每一行都包含一个图像的路径
|
11 |
+
with open(file_list_path, "r") as fin:
|
12 |
+
for line in fin:
|
13 |
+
path = line.strip()
|
14 |
+
if path:
|
15 |
+
files.append(path)
|
16 |
+
return files
|
17 |
+
|
18 |
+
|
19 |
+
def list_image_files(
|
20 |
+
img_dir: str,
|
21 |
+
exts: Tuple[str]=(".jpg", ".png", ".jpeg"),
|
22 |
+
follow_links: bool=False,
|
23 |
+
log_progress: bool=False,
|
24 |
+
log_every_n_files: int=10000,
|
25 |
+
max_size: int=-1
|
26 |
+
) -> List[str]:
|
27 |
+
files = []
|
28 |
+
for dir_path, _, file_names in os.walk(img_dir, followlinks=follow_links):
|
29 |
+
early_stop = False
|
30 |
+
for file_name in file_names:
|
31 |
+
if os.path.splitext(file_name)[1].lower() in exts:
|
32 |
+
if max_size >= 0 and len(files) >= max_size:
|
33 |
+
early_stop = True
|
34 |
+
break
|
35 |
+
files.append(os.path.join(dir_path, file_name))
|
36 |
+
if log_progress and len(files) % log_every_n_files == 0:
|
37 |
+
print(f"文件 {len(files)} 图像在 {img_dir}")
|
38 |
+
if early_stop:
|
39 |
+
break
|
40 |
+
return files
|
41 |
+
|
42 |
+
|
43 |
+
def get_file_name_parts(file_path: str) -> Tuple[str, str, str]:
|
44 |
+
parent_path, file_name = os.path.split(file_path)
|
45 |
+
stem, ext = os.path.splitext(file_name)
|
46 |
+
return parent_path, stem, ext
|
47 |
+
|
48 |
+
|
49 |
+
# https://github.com/XPixelGroup/BasicSR/blob/master/basicsr/utils/download_util.py/
|
50 |
+
def load_file_from_url(url, model_dir=None, progress=True, file_name=None):
|
51 |
+
"""通过 http 网址加载文件,必要时下载模型。
|
52 |
+
|
53 |
+
Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py
|
54 |
+
|
55 |
+
Args:
|
56 |
+
url (str): URL to be downloaded.
|
57 |
+
model_dir (str): The path to save the downloaded model. Should be a full path. If None, use pytorch hub_dir.
|
58 |
+
Default: None.
|
59 |
+
progress (bool): Whether to show the download progress. Default: True.
|
60 |
+
file_name (str): The downloaded file name. If None, use the file name in the url. Default: None.
|
61 |
+
|
62 |
+
Returns:
|
63 |
+
str: The path to the downloaded file.
|
64 |
+
"""
|
65 |
+
if model_dir is None: # 使用 pytorch hub_dir
|
66 |
+
hub_dir = get_dir()
|
67 |
+
model_dir = os.path.join(hub_dir, 'checkpoints')
|
68 |
+
|
69 |
+
os.makedirs(model_dir, exist_ok=True)
|
70 |
+
|
71 |
+
parts = urlparse(url)
|
72 |
+
filename = os.path.basename(parts.path)
|
73 |
+
if file_name is not None:
|
74 |
+
filename = file_name
|
75 |
+
cached_file = os.path.abspath(os.path.join(model_dir, filename))
|
76 |
+
if not os.path.exists(cached_file):
|
77 |
+
print(f'下载:"{url}" 到 {cached_file}\n')
|
78 |
+
download_url_to_file(url, cached_file, hash_prefix=None, progress=progress)
|
79 |
+
return cached_file
|
BOOXEL/utils/tilevae.py
ADDED
@@ -0,0 +1,971 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ------------------------------------------------------------------------
|
2 |
+
#
|
3 |
+
# Ultimate VAE Tile Optimization
|
4 |
+
#
|
5 |
+
# Introducing a revolutionary new optimization designed to make
|
6 |
+
# the VAE work with giant images on limited VRAM!
|
7 |
+
# Say goodbye to the frustration of OOM and hello to seamless output!
|
8 |
+
#
|
9 |
+
# ------------------------------------------------------------------------
|
10 |
+
#
|
11 |
+
# This script is a wild hack that splits the image into tiles,
|
12 |
+
# encodes each tile separately, and merges the result back together.
|
13 |
+
#
|
14 |
+
# Advantages:
|
15 |
+
# - The VAE can now work with giant images on limited VRAM
|
16 |
+
# (~10 GB for 8K images!)
|
17 |
+
# - The merged output is completely seamless without any post-processing.
|
18 |
+
#
|
19 |
+
# Drawbacks:
|
20 |
+
# - Giant RAM needed. To store the intermediate results for a 4096x4096
|
21 |
+
# images, you need 32 GB RAM it consumes ~20GB); for 8192x8192
|
22 |
+
# you need 128 GB RAM machine (it consumes ~100 GB)
|
23 |
+
# - NaNs always appear in for 8k images when you use fp16 (half) VAE
|
24 |
+
# You must use --no-half-vae to disable half VAE for that giant image.
|
25 |
+
# - Slow speed. With default tile size, it takes around 50/200 seconds
|
26 |
+
# to encode/decode a 4096x4096 image; and 200/900 seconds to encode/decode
|
27 |
+
# a 8192x8192 image. (The speed is limited by both the GPU and the CPU.)
|
28 |
+
# - The gradient calculation is not compatible with this hack. It
|
29 |
+
# will break any backward() or torch.autograd.grad() that passes VAE.
|
30 |
+
# (But you can still use the VAE to generate training data.)
|
31 |
+
#
|
32 |
+
# How it works:
|
33 |
+
# 1) The image is split into tiles.
|
34 |
+
# - To ensure perfect results, each tile is padded with 32 pixels
|
35 |
+
# on each side.
|
36 |
+
# - Then the conv2d/silu/upsample/downsample can produce identical
|
37 |
+
# results to the original image without splitting.
|
38 |
+
# 2) The original forward is decomposed into a task queue and a task worker.
|
39 |
+
# - The task queue is a list of functions that will be executed in order.
|
40 |
+
# - The task worker is a loop that executes the tasks in the queue.
|
41 |
+
# 3) The task queue is executed for each tile.
|
42 |
+
# - Current tile is sent to GPU.
|
43 |
+
# - local operations are directly executed.
|
44 |
+
# - Group norm calculation is temporarily suspended until the mean
|
45 |
+
# and var of all tiles are calculated.
|
46 |
+
# - The residual is pre-calculated and stored and addded back later.
|
47 |
+
# - When need to go to the next tile, the current tile is send to cpu.
|
48 |
+
# 4) After all tiles are processed, tiles are merged on cpu and return.
|
49 |
+
#
|
50 |
+
# Enjoy!
|
51 |
+
#
|
52 |
+
# @author: LI YI @ Nanyang Technological University - Singapore
|
53 |
+
# @date: 2023-03-02
|
54 |
+
# @license: MIT License
|
55 |
+
#
|
56 |
+
# Please give me a star if you like this project!
|
57 |
+
#
|
58 |
+
# -------------------------------------------------------------------------
|
59 |
+
|
60 |
+
import gc
|
61 |
+
from time import time
|
62 |
+
import math
|
63 |
+
from tqdm import tqdm
|
64 |
+
|
65 |
+
import torch
|
66 |
+
import torch.version
|
67 |
+
import torch.nn.functional as F
|
68 |
+
from einops import rearrange
|
69 |
+
from diffusers.utils.import_utils import is_xformers_available
|
70 |
+
|
71 |
+
import BOOXEL.utils.devices as devices
|
72 |
+
|
73 |
+
try:
|
74 |
+
import xformers
|
75 |
+
import xformers.ops
|
76 |
+
except ImportError:
|
77 |
+
pass
|
78 |
+
|
79 |
+
sd_flag = True
|
80 |
+
|
81 |
+
def get_recommend_encoder_tile_size():
|
82 |
+
if torch.cuda.is_available():
|
83 |
+
total_memory = torch.cuda.get_device_properties(
|
84 |
+
devices.device).total_memory // 2**20
|
85 |
+
if total_memory > 16*1000:
|
86 |
+
ENCODER_TILE_SIZE = 3072
|
87 |
+
elif total_memory > 12*1000:
|
88 |
+
ENCODER_TILE_SIZE = 2048
|
89 |
+
elif total_memory > 8*1000:
|
90 |
+
ENCODER_TILE_SIZE = 1536
|
91 |
+
else:
|
92 |
+
ENCODER_TILE_SIZE = 960
|
93 |
+
else:
|
94 |
+
ENCODER_TILE_SIZE = 512
|
95 |
+
return ENCODER_TILE_SIZE
|
96 |
+
|
97 |
+
|
98 |
+
def get_recommend_decoder_tile_size():
|
99 |
+
if torch.cuda.is_available():
|
100 |
+
total_memory = torch.cuda.get_device_properties(
|
101 |
+
devices.device).total_memory // 2**20
|
102 |
+
if total_memory > 30*1000:
|
103 |
+
DECODER_TILE_SIZE = 256
|
104 |
+
elif total_memory > 16*1000:
|
105 |
+
DECODER_TILE_SIZE = 192
|
106 |
+
elif total_memory > 12*1000:
|
107 |
+
DECODER_TILE_SIZE = 128
|
108 |
+
elif total_memory > 8*1000:
|
109 |
+
DECODER_TILE_SIZE = 96
|
110 |
+
else:
|
111 |
+
DECODER_TILE_SIZE = 64
|
112 |
+
else:
|
113 |
+
DECODER_TILE_SIZE = 64
|
114 |
+
return DECODER_TILE_SIZE
|
115 |
+
|
116 |
+
|
117 |
+
if 'global const':
|
118 |
+
DEFAULT_ENABLED = False
|
119 |
+
DEFAULT_MOVE_TO_GPU = False
|
120 |
+
DEFAULT_FAST_ENCODER = True
|
121 |
+
DEFAULT_FAST_DECODER = True
|
122 |
+
DEFAULT_COLOR_FIX = 0
|
123 |
+
DEFAULT_ENCODER_TILE_SIZE = get_recommend_encoder_tile_size()
|
124 |
+
DEFAULT_DECODER_TILE_SIZE = get_recommend_decoder_tile_size()
|
125 |
+
|
126 |
+
|
127 |
+
# inplace version of silu
|
128 |
+
def inplace_nonlinearity(x):
|
129 |
+
# Test: fix for Nans
|
130 |
+
return F.silu(x, inplace=True)
|
131 |
+
|
132 |
+
# extracted from ldm.modules.diffusionmodules.model
|
133 |
+
|
134 |
+
# from diffusers lib
|
135 |
+
def attn_forward_new(self, h_):
|
136 |
+
batch_size, channel, height, width = h_.shape
|
137 |
+
hidden_states = h_.view(batch_size, channel, height * width).transpose(1, 2)
|
138 |
+
|
139 |
+
attention_mask = None
|
140 |
+
encoder_hidden_states = None
|
141 |
+
batch_size, sequence_length, _ = hidden_states.shape
|
142 |
+
attention_mask = self.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
143 |
+
|
144 |
+
query = self.to_q(hidden_states)
|
145 |
+
|
146 |
+
if encoder_hidden_states is None:
|
147 |
+
encoder_hidden_states = hidden_states
|
148 |
+
elif self.norm_cross:
|
149 |
+
encoder_hidden_states = self.norm_encoder_hidden_states(encoder_hidden_states)
|
150 |
+
|
151 |
+
key = self.to_k(encoder_hidden_states)
|
152 |
+
value = self.to_v(encoder_hidden_states)
|
153 |
+
|
154 |
+
query = self.head_to_batch_dim(query)
|
155 |
+
key = self.head_to_batch_dim(key)
|
156 |
+
value = self.head_to_batch_dim(value)
|
157 |
+
|
158 |
+
attention_probs = self.get_attention_scores(query, key, attention_mask)
|
159 |
+
hidden_states = torch.bmm(attention_probs, value)
|
160 |
+
hidden_states = self.batch_to_head_dim(hidden_states)
|
161 |
+
|
162 |
+
# linear proj
|
163 |
+
hidden_states = self.to_out[0](hidden_states)
|
164 |
+
# dropout
|
165 |
+
hidden_states = self.to_out[1](hidden_states)
|
166 |
+
|
167 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
168 |
+
|
169 |
+
return hidden_states
|
170 |
+
|
171 |
+
def attn_forward_new_pt2_0(self, hidden_states,):
|
172 |
+
scale = 1
|
173 |
+
attention_mask = None
|
174 |
+
encoder_hidden_states = None
|
175 |
+
|
176 |
+
input_ndim = hidden_states.ndim
|
177 |
+
|
178 |
+
if input_ndim == 4:
|
179 |
+
batch_size, channel, height, width = hidden_states.shape
|
180 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
181 |
+
|
182 |
+
batch_size, sequence_length, _ = (
|
183 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
184 |
+
)
|
185 |
+
|
186 |
+
if attention_mask is not None:
|
187 |
+
attention_mask = self.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
188 |
+
# scaled_dot_product_attention expects attention_mask shape to be
|
189 |
+
# (batch, heads, source_length, target_length)
|
190 |
+
attention_mask = attention_mask.view(batch_size, self.heads, -1, attention_mask.shape[-1])
|
191 |
+
|
192 |
+
if self.group_norm is not None:
|
193 |
+
hidden_states = self.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
194 |
+
|
195 |
+
query = self.to_q(hidden_states, scale=scale)
|
196 |
+
|
197 |
+
if encoder_hidden_states is None:
|
198 |
+
encoder_hidden_states = hidden_states
|
199 |
+
elif self.norm_cross:
|
200 |
+
encoder_hidden_states = self.norm_encoder_hidden_states(encoder_hidden_states)
|
201 |
+
|
202 |
+
key = self.to_k(encoder_hidden_states, scale=scale)
|
203 |
+
value = self.to_v(encoder_hidden_states, scale=scale)
|
204 |
+
|
205 |
+
inner_dim = key.shape[-1]
|
206 |
+
head_dim = inner_dim // self.heads
|
207 |
+
|
208 |
+
query = query.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
|
209 |
+
|
210 |
+
key = key.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
|
211 |
+
value = value.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
|
212 |
+
|
213 |
+
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
214 |
+
# TODO: add support for attn.scale when we move to Torch 2.1
|
215 |
+
hidden_states = F.scaled_dot_product_attention(
|
216 |
+
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
|
217 |
+
)
|
218 |
+
|
219 |
+
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.heads * head_dim)
|
220 |
+
hidden_states = hidden_states.to(query.dtype)
|
221 |
+
|
222 |
+
# linear proj
|
223 |
+
hidden_states = self.to_out[0](hidden_states, scale=scale)
|
224 |
+
# dropout
|
225 |
+
hidden_states = self.to_out[1](hidden_states)
|
226 |
+
|
227 |
+
if input_ndim == 4:
|
228 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
229 |
+
|
230 |
+
return hidden_states
|
231 |
+
|
232 |
+
def attn_forward_new_xformers(self, hidden_states):
|
233 |
+
scale = 1
|
234 |
+
attention_op = None
|
235 |
+
attention_mask = None
|
236 |
+
encoder_hidden_states = None
|
237 |
+
|
238 |
+
input_ndim = hidden_states.ndim
|
239 |
+
|
240 |
+
if input_ndim == 4:
|
241 |
+
batch_size, channel, height, width = hidden_states.shape
|
242 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
243 |
+
|
244 |
+
batch_size, key_tokens, _ = (
|
245 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
246 |
+
)
|
247 |
+
|
248 |
+
attention_mask = self.prepare_attention_mask(attention_mask, key_tokens, batch_size)
|
249 |
+
if attention_mask is not None:
|
250 |
+
# expand our mask's singleton query_tokens dimension:
|
251 |
+
# [batch*heads, 1, key_tokens] ->
|
252 |
+
# [batch*heads, query_tokens, key_tokens]
|
253 |
+
# so that it can be added as a bias onto the attention scores that xformers computes:
|
254 |
+
# [batch*heads, query_tokens, key_tokens]
|
255 |
+
# we do this explicitly because xformers doesn't broadcast the singleton dimension for us.
|
256 |
+
_, query_tokens, _ = hidden_states.shape
|
257 |
+
attention_mask = attention_mask.expand(-1, query_tokens, -1)
|
258 |
+
|
259 |
+
if self.group_norm is not None:
|
260 |
+
hidden_states = self.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
261 |
+
|
262 |
+
query = self.to_q(hidden_states, scale=scale)
|
263 |
+
|
264 |
+
if encoder_hidden_states is None:
|
265 |
+
encoder_hidden_states = hidden_states
|
266 |
+
elif self.norm_cross:
|
267 |
+
encoder_hidden_states = self.norm_encoder_hidden_states(encoder_hidden_states)
|
268 |
+
|
269 |
+
key = self.to_k(encoder_hidden_states, scale=scale)
|
270 |
+
value = self.to_v(encoder_hidden_states, scale=scale)
|
271 |
+
|
272 |
+
query = self.head_to_batch_dim(query).contiguous()
|
273 |
+
key = self.head_to_batch_dim(key).contiguous()
|
274 |
+
value = self.head_to_batch_dim(value).contiguous()
|
275 |
+
|
276 |
+
hidden_states = xformers.ops.memory_efficient_attention(
|
277 |
+
query, key, value, attn_bias=attention_mask, op=attention_op#, scale=scale
|
278 |
+
)
|
279 |
+
hidden_states = hidden_states.to(query.dtype)
|
280 |
+
hidden_states = self.batch_to_head_dim(hidden_states)
|
281 |
+
|
282 |
+
# linear proj
|
283 |
+
hidden_states = self.to_out[0](hidden_states, scale=scale)
|
284 |
+
# dropout
|
285 |
+
hidden_states = self.to_out[1](hidden_states)
|
286 |
+
|
287 |
+
if input_ndim == 4:
|
288 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
289 |
+
|
290 |
+
return hidden_states
|
291 |
+
|
292 |
+
def attn_forward(self, h_):
|
293 |
+
q = self.q(h_)
|
294 |
+
k = self.k(h_)
|
295 |
+
v = self.v(h_)
|
296 |
+
|
297 |
+
# compute attention
|
298 |
+
b, c, h, w = q.shape
|
299 |
+
q = q.reshape(b, c, h*w)
|
300 |
+
q = q.permute(0, 2, 1) # b,hw,c
|
301 |
+
k = k.reshape(b, c, h*w) # b,c,hw
|
302 |
+
w_ = torch.bmm(q, k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
|
303 |
+
w_ = w_ * (int(c)**(-0.5))
|
304 |
+
w_ = torch.nn.functional.softmax(w_, dim=2)
|
305 |
+
|
306 |
+
# attend to values
|
307 |
+
v = v.reshape(b, c, h*w)
|
308 |
+
w_ = w_.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q)
|
309 |
+
# b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
|
310 |
+
h_ = torch.bmm(v, w_)
|
311 |
+
h_ = h_.reshape(b, c, h, w)
|
312 |
+
|
313 |
+
h_ = self.proj_out(h_)
|
314 |
+
|
315 |
+
return h_
|
316 |
+
|
317 |
+
|
318 |
+
def xformer_attn_forward(self, h_):
|
319 |
+
q = self.q(h_)
|
320 |
+
k = self.k(h_)
|
321 |
+
v = self.v(h_)
|
322 |
+
|
323 |
+
# compute attention
|
324 |
+
B, C, H, W = q.shape
|
325 |
+
q, k, v = map(lambda x: rearrange(x, 'b c h w -> b (h w) c'), (q, k, v))
|
326 |
+
|
327 |
+
q, k, v = map(
|
328 |
+
lambda t: t.unsqueeze(3)
|
329 |
+
.reshape(B, t.shape[1], 1, C)
|
330 |
+
.permute(0, 2, 1, 3)
|
331 |
+
.reshape(B * 1, t.shape[1], C)
|
332 |
+
.contiguous(),
|
333 |
+
(q, k, v),
|
334 |
+
)
|
335 |
+
out = xformers.ops.memory_efficient_attention(
|
336 |
+
q, k, v, attn_bias=None, op=self.attention_op)
|
337 |
+
|
338 |
+
out = (
|
339 |
+
out.unsqueeze(0)
|
340 |
+
.reshape(B, 1, out.shape[1], C)
|
341 |
+
.permute(0, 2, 1, 3)
|
342 |
+
.reshape(B, out.shape[1], C)
|
343 |
+
)
|
344 |
+
out = rearrange(out, 'b (h w) c -> b c h w', b=B, h=H, w=W, c=C)
|
345 |
+
out = self.proj_out(out)
|
346 |
+
return out
|
347 |
+
|
348 |
+
|
349 |
+
def attn2task(task_queue, net):
|
350 |
+
if False: #isinstance(net, AttnBlock):
|
351 |
+
task_queue.append(('store_res', lambda x: x))
|
352 |
+
task_queue.append(('pre_norm', net.norm))
|
353 |
+
task_queue.append(('attn', lambda x, net=net: attn_forward(net, x)))
|
354 |
+
task_queue.append(['add_res', None])
|
355 |
+
elif False: #isinstance(net, MemoryEfficientAttnBlock):
|
356 |
+
task_queue.append(('store_res', lambda x: x))
|
357 |
+
task_queue.append(('pre_norm', net.norm))
|
358 |
+
task_queue.append(
|
359 |
+
('attn', lambda x, net=net: xformer_attn_forward(net, x)))
|
360 |
+
task_queue.append(['add_res', None])
|
361 |
+
else:
|
362 |
+
task_queue.append(('store_res', lambda x: x))
|
363 |
+
task_queue.append(('pre_norm', net.norm))
|
364 |
+
if is_xformers_available:
|
365 |
+
# task_queue.append(('attn', lambda x, net=net: attn_forward_new_xformers(net, x)))
|
366 |
+
task_queue.append(
|
367 |
+
('attn', lambda x, net=net: xformer_attn_forward(net, x)))
|
368 |
+
elif hasattr(F, "scaled_dot_product_attention"):
|
369 |
+
task_queue.append(('attn', lambda x, net=net: attn_forward_new_pt2_0(net, x)))
|
370 |
+
else:
|
371 |
+
task_queue.append(('attn', lambda x, net=net: attn_forward_new(net, x)))
|
372 |
+
task_queue.append(['add_res', None])
|
373 |
+
|
374 |
+
def resblock2task(queue, block):
|
375 |
+
"""
|
376 |
+
Turn a ResNetBlock into a sequence of tasks and append to the task queue
|
377 |
+
|
378 |
+
@param queue: the target task queue
|
379 |
+
@param block: ResNetBlock
|
380 |
+
|
381 |
+
"""
|
382 |
+
if block.in_channels != block.out_channels:
|
383 |
+
if sd_flag:
|
384 |
+
if block.use_conv_shortcut:
|
385 |
+
queue.append(('store_res', block.conv_shortcut))
|
386 |
+
else:
|
387 |
+
queue.append(('store_res', block.nin_shortcut))
|
388 |
+
else:
|
389 |
+
if block.use_in_shortcut:
|
390 |
+
queue.append(('store_res', block.conv_shortcut))
|
391 |
+
else:
|
392 |
+
queue.append(('store_res', block.nin_shortcut))
|
393 |
+
|
394 |
+
else:
|
395 |
+
queue.append(('store_res', lambda x: x))
|
396 |
+
queue.append(('pre_norm', block.norm1))
|
397 |
+
queue.append(('silu', inplace_nonlinearity))
|
398 |
+
queue.append(('conv1', block.conv1))
|
399 |
+
queue.append(('pre_norm', block.norm2))
|
400 |
+
queue.append(('silu', inplace_nonlinearity))
|
401 |
+
queue.append(('conv2', block.conv2))
|
402 |
+
queue.append(['add_res', None])
|
403 |
+
|
404 |
+
|
405 |
+
def build_sampling(task_queue, net, is_decoder):
|
406 |
+
"""
|
407 |
+
Build the sampling part of a task queue
|
408 |
+
@param task_queue: the target task queue
|
409 |
+
@param net: the network
|
410 |
+
@param is_decoder: currently building decoder or encoder
|
411 |
+
"""
|
412 |
+
if is_decoder:
|
413 |
+
if sd_flag:
|
414 |
+
resblock2task(task_queue, net.mid.block_1)
|
415 |
+
attn2task(task_queue, net.mid.attn_1)
|
416 |
+
print(task_queue)
|
417 |
+
resblock2task(task_queue, net.mid.block_2)
|
418 |
+
resolution_iter = reversed(range(net.num_resolutions))
|
419 |
+
block_ids = net.num_res_blocks + 1
|
420 |
+
condition = 0
|
421 |
+
module = net.up
|
422 |
+
func_name = 'upsample'
|
423 |
+
else:
|
424 |
+
resblock2task(task_queue, net.mid_block.resnets[0])
|
425 |
+
attn2task(task_queue, net.mid_block.attentions[0])
|
426 |
+
resblock2task(task_queue, net.mid_block.resnets[1])
|
427 |
+
resolution_iter = (range(len(net.up_blocks))) # net.num_resolutions = 3
|
428 |
+
block_ids = 2 + 1
|
429 |
+
condition = len(net.up_blocks) - 1
|
430 |
+
module = net.up_blocks
|
431 |
+
func_name = 'upsamplers'
|
432 |
+
else:
|
433 |
+
if sd_flag:
|
434 |
+
resolution_iter = range(net.num_resolutions)
|
435 |
+
block_ids = net.num_res_blocks
|
436 |
+
condition = net.num_resolutions - 1
|
437 |
+
module = net.down
|
438 |
+
func_name = 'downsample'
|
439 |
+
else:
|
440 |
+
resolution_iter = range(len(net.down_blocks))
|
441 |
+
block_ids = 2
|
442 |
+
condition = len(net.down_blocks) - 1
|
443 |
+
module = net.down_blocks
|
444 |
+
func_name = 'downsamplers'
|
445 |
+
|
446 |
+
for i_level in resolution_iter:
|
447 |
+
for i_block in range(block_ids):
|
448 |
+
if sd_flag:
|
449 |
+
resblock2task(task_queue, module[i_level].block[i_block])
|
450 |
+
else:
|
451 |
+
resblock2task(task_queue, module[i_level].resnets[i_block])
|
452 |
+
if i_level != condition:
|
453 |
+
if sd_flag:
|
454 |
+
task_queue.append((func_name, getattr(module[i_level], func_name)))
|
455 |
+
else:
|
456 |
+
if is_decoder:
|
457 |
+
task_queue.append((func_name, module[i_level].upsamplers[0]))
|
458 |
+
else:
|
459 |
+
task_queue.append((func_name, module[i_level].downsamplers[0]))
|
460 |
+
|
461 |
+
if not is_decoder:
|
462 |
+
if sd_flag:
|
463 |
+
resblock2task(task_queue, net.mid.block_1)
|
464 |
+
attn2task(task_queue, net.mid.attn_1)
|
465 |
+
resblock2task(task_queue, net.mid.block_2)
|
466 |
+
else:
|
467 |
+
resblock2task(task_queue, net.mid_block.resnets[0])
|
468 |
+
attn2task(task_queue, net.mid_block.attentions[0])
|
469 |
+
resblock2task(task_queue, net.mid_block.resnets[1])
|
470 |
+
|
471 |
+
|
472 |
+
def build_task_queue(net, is_decoder):
|
473 |
+
"""
|
474 |
+
Build a single task queue for the encoder or decoder
|
475 |
+
@param net: the VAE decoder or encoder network
|
476 |
+
@param is_decoder: currently building decoder or encoder
|
477 |
+
@return: the task queue
|
478 |
+
"""
|
479 |
+
task_queue = []
|
480 |
+
task_queue.append(('conv_in', net.conv_in))
|
481 |
+
|
482 |
+
# construct the sampling part of the task queue
|
483 |
+
# because encoder and decoder share the same architecture, we extract the sampling part
|
484 |
+
build_sampling(task_queue, net, is_decoder)
|
485 |
+
if is_decoder and not sd_flag:
|
486 |
+
net.give_pre_end = False
|
487 |
+
net.tanh_out = False
|
488 |
+
|
489 |
+
if not is_decoder or not net.give_pre_end:
|
490 |
+
if sd_flag:
|
491 |
+
task_queue.append(('pre_norm', net.norm_out))
|
492 |
+
else:
|
493 |
+
task_queue.append(('pre_norm', net.conv_norm_out))
|
494 |
+
task_queue.append(('silu', inplace_nonlinearity))
|
495 |
+
task_queue.append(('conv_out', net.conv_out))
|
496 |
+
if is_decoder and net.tanh_out:
|
497 |
+
task_queue.append(('tanh', torch.tanh))
|
498 |
+
|
499 |
+
return task_queue
|
500 |
+
|
501 |
+
|
502 |
+
def clone_task_queue(task_queue):
|
503 |
+
"""
|
504 |
+
Clone a task queue
|
505 |
+
@param task_queue: the task queue to be cloned
|
506 |
+
@return: the cloned task queue
|
507 |
+
"""
|
508 |
+
return [[item for item in task] for task in task_queue]
|
509 |
+
|
510 |
+
|
511 |
+
def get_var_mean(input, num_groups, eps=1e-6):
|
512 |
+
"""
|
513 |
+
Get mean and var for group norm
|
514 |
+
"""
|
515 |
+
b, c = input.size(0), input.size(1)
|
516 |
+
channel_in_group = int(c/num_groups)
|
517 |
+
input_reshaped = input.contiguous().view(
|
518 |
+
1, int(b * num_groups), channel_in_group, *input.size()[2:])
|
519 |
+
var, mean = torch.var_mean(
|
520 |
+
input_reshaped, dim=[0, 2, 3, 4], unbiased=False)
|
521 |
+
return var, mean
|
522 |
+
|
523 |
+
|
524 |
+
def custom_group_norm(input, num_groups, mean, var, weight=None, bias=None, eps=1e-6):
|
525 |
+
"""
|
526 |
+
Custom group norm with fixed mean and var
|
527 |
+
|
528 |
+
@param input: input tensor
|
529 |
+
@param num_groups: number of groups. by default, num_groups = 32
|
530 |
+
@param mean: mean, must be pre-calculated by get_var_mean
|
531 |
+
@param var: var, must be pre-calculated by get_var_mean
|
532 |
+
@param weight: weight, should be fetched from the original group norm
|
533 |
+
@param bias: bias, should be fetched from the original group norm
|
534 |
+
@param eps: epsilon, by default, eps = 1e-6 to match the original group norm
|
535 |
+
|
536 |
+
@return: normalized tensor
|
537 |
+
"""
|
538 |
+
b, c = input.size(0), input.size(1)
|
539 |
+
channel_in_group = int(c/num_groups)
|
540 |
+
input_reshaped = input.contiguous().view(
|
541 |
+
1, int(b * num_groups), channel_in_group, *input.size()[2:])
|
542 |
+
|
543 |
+
out = F.batch_norm(input_reshaped, mean, var, weight=None, bias=None,
|
544 |
+
training=False, momentum=0, eps=eps)
|
545 |
+
|
546 |
+
out = out.view(b, c, *input.size()[2:])
|
547 |
+
|
548 |
+
# post affine transform
|
549 |
+
if weight is not None:
|
550 |
+
out *= weight.view(1, -1, 1, 1)
|
551 |
+
if bias is not None:
|
552 |
+
out += bias.view(1, -1, 1, 1)
|
553 |
+
return out
|
554 |
+
|
555 |
+
|
556 |
+
def crop_valid_region(x, input_bbox, target_bbox, is_decoder):
|
557 |
+
"""
|
558 |
+
Crop the valid region from the tile
|
559 |
+
@param x: input tile
|
560 |
+
@param input_bbox: original input bounding box
|
561 |
+
@param target_bbox: output bounding box
|
562 |
+
@param scale: scale factor
|
563 |
+
@return: cropped tile
|
564 |
+
"""
|
565 |
+
padded_bbox = [i * 8 if is_decoder else i//8 for i in input_bbox]
|
566 |
+
margin = [target_bbox[i] - padded_bbox[i] for i in range(4)]
|
567 |
+
return x[:, :, margin[2]:x.size(2)+margin[3], margin[0]:x.size(3)+margin[1]]
|
568 |
+
|
569 |
+
# ↓↓↓ https://github.com/Kahsolt/stable-diffusion-webui-vae-tile-infer ↓↓↓
|
570 |
+
|
571 |
+
|
572 |
+
def perfcount(fn):
|
573 |
+
def wrapper(*args, **kwargs):
|
574 |
+
ts = time()
|
575 |
+
|
576 |
+
if torch.cuda.is_available():
|
577 |
+
torch.cuda.reset_peak_memory_stats(devices.device)
|
578 |
+
devices.torch_gc()
|
579 |
+
gc.collect()
|
580 |
+
|
581 |
+
ret = fn(*args, **kwargs)
|
582 |
+
|
583 |
+
devices.torch_gc()
|
584 |
+
gc.collect()
|
585 |
+
if torch.cuda.is_available():
|
586 |
+
vram = torch.cuda.max_memory_allocated(devices.device) / 2**20
|
587 |
+
torch.cuda.reset_peak_memory_stats(devices.device)
|
588 |
+
print(
|
589 |
+
f'[Tiled VAE]: Done in {time() - ts:.3f}s, max VRAM alloc {vram:.3f} MB')
|
590 |
+
else:
|
591 |
+
print(f'[Tiled VAE]: Done in {time() - ts:.3f}s')
|
592 |
+
|
593 |
+
return ret
|
594 |
+
return wrapper
|
595 |
+
|
596 |
+
# copy end :)
|
597 |
+
|
598 |
+
|
599 |
+
class GroupNormParam:
|
600 |
+
def __init__(self):
|
601 |
+
self.var_list = []
|
602 |
+
self.mean_list = []
|
603 |
+
self.pixel_list = []
|
604 |
+
self.weight = None
|
605 |
+
self.bias = None
|
606 |
+
|
607 |
+
def add_tile(self, tile, layer):
|
608 |
+
var, mean = get_var_mean(tile, 32)
|
609 |
+
# For giant images, the variance can be larger than max float16
|
610 |
+
# In this case we create a copy to float32
|
611 |
+
if var.dtype == torch.float16 and var.isinf().any():
|
612 |
+
fp32_tile = tile.float()
|
613 |
+
var, mean = get_var_mean(fp32_tile, 32)
|
614 |
+
# ============= DEBUG: test for infinite =============
|
615 |
+
# if torch.isinf(var).any():
|
616 |
+
# print('var: ', var)
|
617 |
+
# ====================================================
|
618 |
+
self.var_list.append(var)
|
619 |
+
self.mean_list.append(mean)
|
620 |
+
self.pixel_list.append(
|
621 |
+
tile.shape[2]*tile.shape[3])
|
622 |
+
if hasattr(layer, 'weight'):
|
623 |
+
self.weight = layer.weight
|
624 |
+
self.bias = layer.bias
|
625 |
+
else:
|
626 |
+
self.weight = None
|
627 |
+
self.bias = None
|
628 |
+
|
629 |
+
def summary(self):
|
630 |
+
"""
|
631 |
+
summarize the mean and var and return a function
|
632 |
+
that apply group norm on each tile
|
633 |
+
"""
|
634 |
+
if len(self.var_list) == 0:
|
635 |
+
return None
|
636 |
+
var = torch.vstack(self.var_list)
|
637 |
+
mean = torch.vstack(self.mean_list)
|
638 |
+
max_value = max(self.pixel_list)
|
639 |
+
pixels = torch.tensor(
|
640 |
+
self.pixel_list, dtype=torch.float32, device=devices.device) / max_value
|
641 |
+
sum_pixels = torch.sum(pixels)
|
642 |
+
pixels = pixels.unsqueeze(
|
643 |
+
1) / sum_pixels
|
644 |
+
var = torch.sum(
|
645 |
+
var * pixels, dim=0)
|
646 |
+
mean = torch.sum(
|
647 |
+
mean * pixels, dim=0)
|
648 |
+
return lambda x: custom_group_norm(x, 32, mean, var, self.weight, self.bias)
|
649 |
+
|
650 |
+
@staticmethod
|
651 |
+
def from_tile(tile, norm):
|
652 |
+
"""
|
653 |
+
create a function from a single tile without summary
|
654 |
+
"""
|
655 |
+
var, mean = get_var_mean(tile, 32)
|
656 |
+
if var.dtype == torch.float16 and var.isinf().any():
|
657 |
+
fp32_tile = tile.float()
|
658 |
+
var, mean = get_var_mean(fp32_tile, 32)
|
659 |
+
# if it is a macbook, we need to convert back to float16
|
660 |
+
if var.device.type == 'mps':
|
661 |
+
# clamp to avoid overflow
|
662 |
+
var = torch.clamp(var, 0, 60000)
|
663 |
+
var = var.half()
|
664 |
+
mean = mean.half()
|
665 |
+
if hasattr(norm, 'weight'):
|
666 |
+
weight = norm.weight
|
667 |
+
bias = norm.bias
|
668 |
+
else:
|
669 |
+
weight = None
|
670 |
+
bias = None
|
671 |
+
|
672 |
+
def group_norm_func(x, mean=mean, var=var, weight=weight, bias=bias):
|
673 |
+
return custom_group_norm(x, 32, mean, var, weight, bias, 1e-6)
|
674 |
+
return group_norm_func
|
675 |
+
|
676 |
+
|
677 |
+
class VAEHook:
|
678 |
+
def __init__(self, net, tile_size, is_decoder, fast_decoder, fast_encoder, color_fix, to_gpu=False):
|
679 |
+
self.net = net # encoder | decoder
|
680 |
+
self.tile_size = tile_size
|
681 |
+
self.is_decoder = is_decoder
|
682 |
+
self.fast_mode = (fast_encoder and not is_decoder) or (
|
683 |
+
fast_decoder and is_decoder)
|
684 |
+
self.color_fix = color_fix and not is_decoder
|
685 |
+
self.to_gpu = to_gpu
|
686 |
+
self.pad = 11 if is_decoder else 32
|
687 |
+
|
688 |
+
def __call__(self, x):
|
689 |
+
B, C, H, W = x.shape
|
690 |
+
original_device = next(self.net.parameters()).device
|
691 |
+
try:
|
692 |
+
if self.to_gpu:
|
693 |
+
self.net.to(devices.get_optimal_device())
|
694 |
+
if max(H, W) <= self.pad * 2 + self.tile_size:
|
695 |
+
print("[Tiled VAE]: the input size is tiny and unnecessary to tile.")
|
696 |
+
return self.net.original_forward(x)
|
697 |
+
else:
|
698 |
+
return self.vae_tile_forward(x)
|
699 |
+
finally:
|
700 |
+
self.net.to(original_device)
|
701 |
+
|
702 |
+
def get_best_tile_size(self, lowerbound, upperbound):
|
703 |
+
"""
|
704 |
+
Get the best tile size for GPU memory
|
705 |
+
"""
|
706 |
+
divider = 32
|
707 |
+
while divider >= 2:
|
708 |
+
remainer = lowerbound % divider
|
709 |
+
if remainer == 0:
|
710 |
+
return lowerbound
|
711 |
+
candidate = lowerbound - remainer + divider
|
712 |
+
if candidate <= upperbound:
|
713 |
+
return candidate
|
714 |
+
divider //= 2
|
715 |
+
return lowerbound
|
716 |
+
|
717 |
+
def split_tiles(self, h, w):
|
718 |
+
"""
|
719 |
+
Tool function to split the image into tiles
|
720 |
+
@param h: height of the image
|
721 |
+
@param w: width of the image
|
722 |
+
@return: tile_input_bboxes, tile_output_bboxes
|
723 |
+
"""
|
724 |
+
tile_input_bboxes, tile_output_bboxes = [], []
|
725 |
+
tile_size = self.tile_size
|
726 |
+
pad = self.pad
|
727 |
+
num_height_tiles = math.ceil((h - 2 * pad) / tile_size)
|
728 |
+
num_width_tiles = math.ceil((w - 2 * pad) / tile_size)
|
729 |
+
# If any of the numbers are 0, we let it be 1
|
730 |
+
# This is to deal with long and thin images
|
731 |
+
num_height_tiles = max(num_height_tiles, 1)
|
732 |
+
num_width_tiles = max(num_width_tiles, 1)
|
733 |
+
|
734 |
+
# Suggestions from https://github.com/Kahsolt: auto shrink the tile size
|
735 |
+
real_tile_height = math.ceil((h - 2 * pad) / num_height_tiles)
|
736 |
+
real_tile_width = math.ceil((w - 2 * pad) / num_width_tiles)
|
737 |
+
real_tile_height = self.get_best_tile_size(real_tile_height, tile_size)
|
738 |
+
real_tile_width = self.get_best_tile_size(real_tile_width, tile_size)
|
739 |
+
|
740 |
+
print(f'[Tiled VAE]: split to {num_height_tiles}x{num_width_tiles} = {num_height_tiles*num_width_tiles} tiles. ' +
|
741 |
+
f'Optimal tile size {real_tile_width}x{real_tile_height}, original tile size {tile_size}x{tile_size}')
|
742 |
+
|
743 |
+
for i in range(num_height_tiles):
|
744 |
+
for j in range(num_width_tiles):
|
745 |
+
# bbox: [x1, x2, y1, y2]
|
746 |
+
# the padding is is unnessary for image borders. So we directly start from (32, 32)
|
747 |
+
input_bbox = [
|
748 |
+
pad + j * real_tile_width,
|
749 |
+
min(pad + (j + 1) * real_tile_width, w),
|
750 |
+
pad + i * real_tile_height,
|
751 |
+
min(pad + (i + 1) * real_tile_height, h),
|
752 |
+
]
|
753 |
+
|
754 |
+
# if the output bbox is close to the image boundary, we extend it to the image boundary
|
755 |
+
output_bbox = [
|
756 |
+
input_bbox[0] if input_bbox[0] > pad else 0,
|
757 |
+
input_bbox[1] if input_bbox[1] < w - pad else w,
|
758 |
+
input_bbox[2] if input_bbox[2] > pad else 0,
|
759 |
+
input_bbox[3] if input_bbox[3] < h - pad else h,
|
760 |
+
]
|
761 |
+
|
762 |
+
# scale to get the final output bbox
|
763 |
+
output_bbox = [x * 8 if self.is_decoder else x // 8 for x in output_bbox]
|
764 |
+
tile_output_bboxes.append(output_bbox)
|
765 |
+
|
766 |
+
# indistinguishable expand the input bbox by pad pixels
|
767 |
+
tile_input_bboxes.append([
|
768 |
+
max(0, input_bbox[0] - pad),
|
769 |
+
min(w, input_bbox[1] + pad),
|
770 |
+
max(0, input_bbox[2] - pad),
|
771 |
+
min(h, input_bbox[3] + pad),
|
772 |
+
])
|
773 |
+
|
774 |
+
return tile_input_bboxes, tile_output_bboxes
|
775 |
+
|
776 |
+
@torch.no_grad()
|
777 |
+
def estimate_group_norm(self, z, task_queue, color_fix):
|
778 |
+
device = z.device
|
779 |
+
tile = z
|
780 |
+
last_id = len(task_queue) - 1
|
781 |
+
while last_id >= 0 and task_queue[last_id][0] != 'pre_norm':
|
782 |
+
last_id -= 1
|
783 |
+
if last_id <= 0 or task_queue[last_id][0] != 'pre_norm':
|
784 |
+
raise ValueError('No group norm found in the task queue')
|
785 |
+
# estimate until the last group norm
|
786 |
+
for i in range(last_id + 1):
|
787 |
+
task = task_queue[i]
|
788 |
+
if task[0] == 'pre_norm':
|
789 |
+
group_norm_func = GroupNormParam.from_tile(tile, task[1])
|
790 |
+
task_queue[i] = ('apply_norm', group_norm_func)
|
791 |
+
if i == last_id:
|
792 |
+
return True
|
793 |
+
tile = group_norm_func(tile)
|
794 |
+
elif task[0] == 'store_res':
|
795 |
+
task_id = i + 1
|
796 |
+
while task_id < last_id and task_queue[task_id][0] != 'add_res':
|
797 |
+
task_id += 1
|
798 |
+
if task_id >= last_id:
|
799 |
+
continue
|
800 |
+
task_queue[task_id][1] = task[1](tile)
|
801 |
+
elif task[0] == 'add_res':
|
802 |
+
tile += task[1].to(device)
|
803 |
+
task[1] = None
|
804 |
+
elif color_fix and task[0] == 'downsample':
|
805 |
+
for j in range(i, last_id + 1):
|
806 |
+
if task_queue[j][0] == 'store_res':
|
807 |
+
task_queue[j] = ('store_res_cpu', task_queue[j][1])
|
808 |
+
return True
|
809 |
+
else:
|
810 |
+
tile = task[1](tile)
|
811 |
+
try:
|
812 |
+
devices.test_for_nans(tile, "vae")
|
813 |
+
except:
|
814 |
+
print(f'Nan detected in fast mode estimation. Fast mode disabled.')
|
815 |
+
return False
|
816 |
+
|
817 |
+
raise IndexError('Should not reach here')
|
818 |
+
|
819 |
+
@perfcount
|
820 |
+
@torch.no_grad()
|
821 |
+
def vae_tile_forward(self, z):
|
822 |
+
"""
|
823 |
+
Decode a latent vector z into an image in a tiled manner.
|
824 |
+
@param z: latent vector
|
825 |
+
@return: image
|
826 |
+
"""
|
827 |
+
device = next(self.net.parameters()).device
|
828 |
+
dtype = z.dtype
|
829 |
+
net = self.net
|
830 |
+
tile_size = self.tile_size
|
831 |
+
is_decoder = self.is_decoder
|
832 |
+
|
833 |
+
z = z.detach() # detach the input to avoid backprop
|
834 |
+
|
835 |
+
N, height, width = z.shape[0], z.shape[2], z.shape[3]
|
836 |
+
net.last_z_shape = z.shape
|
837 |
+
|
838 |
+
# Split the input into tiles and build a task queue for each tile
|
839 |
+
print(f'[Tiled VAE]: input_size: {z.shape}, tile_size: {tile_size}, padding: {self.pad}')
|
840 |
+
|
841 |
+
in_bboxes, out_bboxes = self.split_tiles(height, width)
|
842 |
+
|
843 |
+
# Prepare tiles by split the input latents
|
844 |
+
tiles = []
|
845 |
+
for input_bbox in in_bboxes:
|
846 |
+
tile = z[:, :, input_bbox[2]:input_bbox[3], input_bbox[0]:input_bbox[1]].cpu()
|
847 |
+
tiles.append(tile)
|
848 |
+
|
849 |
+
num_tiles = len(tiles)
|
850 |
+
num_completed = 0
|
851 |
+
|
852 |
+
# Build task queues
|
853 |
+
single_task_queue = build_task_queue(net, is_decoder)
|
854 |
+
#print(single_task_queue)
|
855 |
+
if self.fast_mode:
|
856 |
+
# Fast mode: downsample the input image to the tile size,
|
857 |
+
# then estimate the group norm parameters on the downsampled image
|
858 |
+
scale_factor = tile_size / max(height, width)
|
859 |
+
z = z.to(device)
|
860 |
+
downsampled_z = F.interpolate(z, scale_factor=scale_factor, mode='nearest-exact')
|
861 |
+
# use nearest-exact to keep statictics as close as possible
|
862 |
+
print(f'[Tiled VAE]: Fast mode enabled, estimating group norm parameters on {downsampled_z.shape[3]} x {downsampled_z.shape[2]} image')
|
863 |
+
|
864 |
+
# ======= Special thanks to @Kahsolt for distribution shift issue ======= #
|
865 |
+
# The downsampling will heavily distort its mean and std, so we need to recover it.
|
866 |
+
std_old, mean_old = torch.std_mean(z, dim=[0, 2, 3], keepdim=True)
|
867 |
+
std_new, mean_new = torch.std_mean(downsampled_z, dim=[0, 2, 3], keepdim=True)
|
868 |
+
downsampled_z = (downsampled_z - mean_new) / std_new * std_old + mean_old
|
869 |
+
del std_old, mean_old, std_new, mean_new
|
870 |
+
# occasionally the std_new is too small or too large, which exceeds the range of float16
|
871 |
+
# so we need to clamp it to max z's range.
|
872 |
+
downsampled_z = torch.clamp_(downsampled_z, min=z.min(), max=z.max())
|
873 |
+
estimate_task_queue = clone_task_queue(single_task_queue)
|
874 |
+
if self.estimate_group_norm(downsampled_z, estimate_task_queue, color_fix=self.color_fix):
|
875 |
+
single_task_queue = estimate_task_queue
|
876 |
+
del downsampled_z
|
877 |
+
|
878 |
+
task_queues = [clone_task_queue(single_task_queue) for _ in range(num_tiles)]
|
879 |
+
|
880 |
+
# Dummy result
|
881 |
+
result = None
|
882 |
+
result_approx = None
|
883 |
+
#try:
|
884 |
+
# with devices.autocast():
|
885 |
+
# result_approx = torch.cat([F.interpolate(cheap_approximation(x).unsqueeze(0), scale_factor=opt_f, mode='nearest-exact') for x in z], dim=0).cpu()
|
886 |
+
#except: pass
|
887 |
+
# Free memory of input latent tensor
|
888 |
+
del z
|
889 |
+
|
890 |
+
# Task queue execution
|
891 |
+
pbar = tqdm(total=num_tiles * len(task_queues[0]), desc=f"[Tiled VAE]: Executing {'Decoder' if is_decoder else 'Encoder'} Task Queue: ")
|
892 |
+
|
893 |
+
# execute the task back and forth when switch tiles so that we always
|
894 |
+
# keep one tile on the GPU to reduce unnecessary data transfer
|
895 |
+
forward = True
|
896 |
+
interrupted = False
|
897 |
+
#state.interrupted = interrupted
|
898 |
+
while True:
|
899 |
+
#if state.interrupted: interrupted = True ; break
|
900 |
+
|
901 |
+
group_norm_param = GroupNormParam()
|
902 |
+
for i in range(num_tiles) if forward else reversed(range(num_tiles)):
|
903 |
+
#if state.interrupted: interrupted = True ; break
|
904 |
+
|
905 |
+
tile = tiles[i].to(device)
|
906 |
+
input_bbox = in_bboxes[i]
|
907 |
+
task_queue = task_queues[i]
|
908 |
+
|
909 |
+
interrupted = False
|
910 |
+
while len(task_queue) > 0:
|
911 |
+
#if state.interrupted: interrupted = True ; break
|
912 |
+
|
913 |
+
# DEBUG: current task
|
914 |
+
# print('Running task: ', task_queue[0][0], ' on tile ', i, '/', num_tiles, ' with shape ', tile.shape)
|
915 |
+
task = task_queue.pop(0)
|
916 |
+
if task[0] == 'pre_norm':
|
917 |
+
group_norm_param.add_tile(tile, task[1])
|
918 |
+
break
|
919 |
+
elif task[0] == 'store_res' or task[0] == 'store_res_cpu':
|
920 |
+
task_id = 0
|
921 |
+
res = task[1](tile)
|
922 |
+
if not self.fast_mode or task[0] == 'store_res_cpu':
|
923 |
+
res = res.cpu()
|
924 |
+
while task_queue[task_id][0] != 'add_res':
|
925 |
+
task_id += 1
|
926 |
+
task_queue[task_id][1] = res
|
927 |
+
elif task[0] == 'add_res':
|
928 |
+
tile += task[1].to(device)
|
929 |
+
task[1] = None
|
930 |
+
else:
|
931 |
+
tile = task[1](tile)
|
932 |
+
#print(tiles[i].shape, tile.shape, task)
|
933 |
+
pbar.update(1)
|
934 |
+
|
935 |
+
if interrupted: break
|
936 |
+
|
937 |
+
# check for NaNs in the tile.
|
938 |
+
# If there are NaNs, we abort the process to save user's time
|
939 |
+
#devices.test_for_nans(tile, "vae")
|
940 |
+
|
941 |
+
#print(tiles[i].shape, tile.shape, i, num_tiles)
|
942 |
+
if len(task_queue) == 0:
|
943 |
+
tiles[i] = None
|
944 |
+
num_completed += 1
|
945 |
+
if result is None: # NOTE: dim C varies from different cases, can only be inited dynamically
|
946 |
+
result = torch.zeros((N, tile.shape[1], height * 8 if is_decoder else height // 8, width * 8 if is_decoder else width // 8), device=device, requires_grad=False)
|
947 |
+
result[:, :, out_bboxes[i][2]:out_bboxes[i][3], out_bboxes[i][0]:out_bboxes[i][1]] = crop_valid_region(tile, in_bboxes[i], out_bboxes[i], is_decoder)
|
948 |
+
del tile
|
949 |
+
elif i == num_tiles - 1 and forward:
|
950 |
+
forward = False
|
951 |
+
tiles[i] = tile
|
952 |
+
elif i == 0 and not forward:
|
953 |
+
forward = True
|
954 |
+
tiles[i] = tile
|
955 |
+
else:
|
956 |
+
tiles[i] = tile.cpu()
|
957 |
+
del tile
|
958 |
+
|
959 |
+
if interrupted: break
|
960 |
+
if num_completed == num_tiles: break
|
961 |
+
|
962 |
+
# insert the group norm task to the head of each task queue
|
963 |
+
group_norm_func = group_norm_param.summary()
|
964 |
+
if group_norm_func is not None:
|
965 |
+
for i in range(num_tiles):
|
966 |
+
task_queue = task_queues[i]
|
967 |
+
task_queue.insert(0, ('apply_norm', group_norm_func))
|
968 |
+
|
969 |
+
# Done!
|
970 |
+
pbar.close()
|
971 |
+
return result.to(dtype) if result is not None else result_approx.to(device)
|
CKPT_PTH.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
SDXL_CLIP1_PATH = 'openai/clip-vit-large-patch14'
|
2 |
+
SDXL_CLIP2_CKPT_PTH = 'laion_CLIP-ViT-bigG-14-laion2B-39B-b160k/open_clip_pytorch_model.bin'
|
Examples/Example1.png
ADDED
Examples/Example2.jpeg
ADDED
Examples/Example3.webp
ADDED
README.md
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
-
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
1 |
+
---
|
2 |
+
title: BOOXEL
|
3 |
+
sdk: gradio
|
4 |
+
emoji: 🖼
|
5 |
+
sdk_version: 4.42.0
|
6 |
+
app_file: app.py
|
7 |
+
license: apache-2.0
|
8 |
+
colorFrom: blue
|
9 |
+
colorTo: pink
|
10 |
+
short_description: Restore images with prompt
|
11 |
+
suggested_hardware: zero-a10g
|
12 |
+
---
|
app.py
ADDED
@@ -0,0 +1,854 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
import argparse
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
import einops
|
7 |
+
import copy
|
8 |
+
import math
|
9 |
+
import time
|
10 |
+
import random
|
11 |
+
import spaces
|
12 |
+
import re
|
13 |
+
import uuid
|
14 |
+
|
15 |
+
from gradio_imageslider import ImageSlider
|
16 |
+
from PIL import Image
|
17 |
+
from BOOXEL.util import HWC3, upscale_image, fix_resize, convert_dtype, create_BOOXEL_model, load_QF_ckpt
|
18 |
+
from huggingface_hub import hf_hub_download
|
19 |
+
from pillow_heif import register_heif_opener
|
20 |
+
|
21 |
+
register_heif_opener()
|
22 |
+
|
23 |
+
max_64_bit_int = np.iinfo(np.int32).max
|
24 |
+
|
25 |
+
hf_hub_download(repo_id="laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", filename="open_clip_pytorch_model.bin", local_dir="laion_CLIP-ViT-bigG-14-laion2B-39B-b160k")
|
26 |
+
hf_hub_download(repo_id="ckpt/sd_xl_base_1.0", filename="sd_xl_base_1.0_0.9vae.safetensors", local_dir="ckpt_sd_xl_base_1.0")
|
27 |
+
hf_hub_download(repo_id="yanranxiaoxi/booxel", filename="BOOXEL-v0.F.ckpt", local_dir="yanranxiaoxi_booxel", token=os.environ.get('MODEL_ACCESS_TOKEN'))
|
28 |
+
hf_hub_download(repo_id="yanranxiaoxi/booxel", filename="BOOXEL-v0.Q.ckpt", local_dir="yanranxiaoxi_booxel", token=os.environ.get('MODEL_ACCESS_TOKEN'))
|
29 |
+
hf_hub_download(repo_id="RunDiffusion/Juggernaut-XL-Lightning", filename="Juggernaut_RunDiffusionPhoto2_Lightning_4Steps.safetensors", local_dir="RunDiffusion_Juggernaut-XL-Lightning")
|
30 |
+
|
31 |
+
parser = argparse.ArgumentParser()
|
32 |
+
parser.add_argument("--opt", type=str, default='options/BOOXEL_v0.yaml')
|
33 |
+
parser.add_argument("--ip", type=str, default='127.0.0.1')
|
34 |
+
parser.add_argument("--port", type=int, default='6688')
|
35 |
+
parser.add_argument("--no_llava", action='store_true', default=True)#False
|
36 |
+
parser.add_argument("--use_image_slider", action='store_true', default=False)#False
|
37 |
+
parser.add_argument("--log_history", action='store_true', default=False)
|
38 |
+
parser.add_argument("--loading_half_params", action='store_true', default=False)#False
|
39 |
+
parser.add_argument("--use_tile_vae", action='store_true', default=True)#False
|
40 |
+
parser.add_argument("--encoder_tile_size", type=int, default=512)
|
41 |
+
parser.add_argument("--decoder_tile_size", type=int, default=64)
|
42 |
+
parser.add_argument("--load_8bit_llava", action='store_true', default=False)
|
43 |
+
args = parser.parse_args()
|
44 |
+
|
45 |
+
if torch.cuda.device_count() > 0:
|
46 |
+
BOOXEL_device = 'cuda:0'
|
47 |
+
|
48 |
+
# 加载 BOOXEL
|
49 |
+
model, default_setting = create_BOOXEL_model(args.opt, BOOXEL_sign='Q', load_default_setting=True)
|
50 |
+
if args.loading_half_params:
|
51 |
+
model = model.half()
|
52 |
+
if args.use_tile_vae:
|
53 |
+
model.init_tile_vae(encoder_tile_size=args.encoder_tile_size, decoder_tile_size=args.decoder_tile_size)
|
54 |
+
model = model.to(BOOXEL_device)
|
55 |
+
model.first_stage_model.denoise_encoder_s1 = copy.deepcopy(model.first_stage_model.denoise_encoder)
|
56 |
+
model.current_model = 'v0-Q'
|
57 |
+
ckpt_Q, ckpt_F = load_QF_ckpt(args.opt)
|
58 |
+
|
59 |
+
def check_upload(input_image):
|
60 |
+
if input_image is None:
|
61 |
+
raise gr.Error("请提供要处理的图像。")
|
62 |
+
return gr.update(visible = True)
|
63 |
+
|
64 |
+
def update_seed(is_randomize_seed, seed):
|
65 |
+
if is_randomize_seed:
|
66 |
+
return random.randint(0, max_64_bit_int)
|
67 |
+
return seed
|
68 |
+
|
69 |
+
def reset():
|
70 |
+
return [
|
71 |
+
None,
|
72 |
+
0,
|
73 |
+
None,
|
74 |
+
None,
|
75 |
+
"电影级,高对比度,高度精细,使用哈苏相机拍摄,超精细照片,逼真的最大细节,32K,调色,超高清,极致的细节,皮肤毛孔细节,超清晰度,完美无变形。",
|
76 |
+
"绘画,油画,插图,绘图,艺术,素描,动漫,卡通,CG 风格,3D 渲染,虚幻引擎,模糊,混色,不清晰,怪异纹理,丑陋,肮脏,凌乱,质量最差,质量低,框架,水印,签名,JPEG 伪影,变形,低分辨率,过度平滑",
|
77 |
+
1,
|
78 |
+
1024,
|
79 |
+
1,
|
80 |
+
2,
|
81 |
+
50,
|
82 |
+
-1.0,
|
83 |
+
1.,
|
84 |
+
default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0,
|
85 |
+
True,
|
86 |
+
random.randint(0, max_64_bit_int),
|
87 |
+
5,
|
88 |
+
1.003,
|
89 |
+
"Wavelet",
|
90 |
+
"fp32",
|
91 |
+
"fp32",
|
92 |
+
1.0,
|
93 |
+
True,
|
94 |
+
False,
|
95 |
+
default_setting.spt_linear_CFG_Quality if torch.cuda.device_count() > 0 else 1.0,
|
96 |
+
0.,
|
97 |
+
"v0-Q",
|
98 |
+
"input",
|
99 |
+
6
|
100 |
+
]
|
101 |
+
|
102 |
+
def check(input_image):
|
103 |
+
if input_image is None:
|
104 |
+
raise gr.Error("请提供要处理的图像。")
|
105 |
+
|
106 |
+
@spaces.GPU(duration=420)
|
107 |
+
def stage1_process(
|
108 |
+
input_image,
|
109 |
+
gamma_correction,
|
110 |
+
diff_dtype,
|
111 |
+
ae_dtype
|
112 |
+
):
|
113 |
+
print('stage1_process ==>>')
|
114 |
+
if torch.cuda.device_count() == 0:
|
115 |
+
gr.Warning('将此 Spaces 设置为 GPU 配置以使其正常工作。')
|
116 |
+
return None, None
|
117 |
+
torch.cuda.set_device(BOOXEL_device)
|
118 |
+
LQ = HWC3(np.array(Image.open(input_image)))
|
119 |
+
LQ = fix_resize(LQ, 512)
|
120 |
+
# stage1
|
121 |
+
LQ = np.array(LQ) / 255 * 2 - 1
|
122 |
+
LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(BOOXEL_device)[:, :3, :, :]
|
123 |
+
|
124 |
+
model.ae_dtype = convert_dtype(ae_dtype)
|
125 |
+
model.model.dtype = convert_dtype(diff_dtype)
|
126 |
+
|
127 |
+
LQ = model.batchify_denoise(LQ, is_stage1=True)
|
128 |
+
LQ = (LQ[0].permute(1, 2, 0) * 127.5 + 127.5).cpu().numpy().round().clip(0, 255).astype(np.uint8)
|
129 |
+
# 伽玛校正
|
130 |
+
LQ = LQ / 255.0
|
131 |
+
LQ = np.power(LQ, gamma_correction)
|
132 |
+
LQ *= 255.0
|
133 |
+
LQ = LQ.round().clip(0, 255).astype(np.uint8)
|
134 |
+
print('<<== stage1_process')
|
135 |
+
return LQ, gr.update(visible = True)
|
136 |
+
|
137 |
+
def stage2_process(*args, **kwargs):
|
138 |
+
try:
|
139 |
+
return restore_in_Xmin(*args, **kwargs)
|
140 |
+
except Exception as e:
|
141 |
+
print('异常的类型 ' + str(type(e)))
|
142 |
+
if type(e).__name__ == "<class 'gradio.exceptions.Error'>":
|
143 |
+
print('异常的名称 ' + type(e).__name__)
|
144 |
+
raise e
|
145 |
+
|
146 |
+
def restore_in_Xmin(
|
147 |
+
noisy_image,
|
148 |
+
rotation,
|
149 |
+
denoise_image,
|
150 |
+
prompt,
|
151 |
+
a_prompt,
|
152 |
+
n_prompt,
|
153 |
+
num_samples,
|
154 |
+
min_size,
|
155 |
+
downscale,
|
156 |
+
upscale,
|
157 |
+
edm_steps,
|
158 |
+
s_stage1,
|
159 |
+
s_stage2,
|
160 |
+
s_cfg,
|
161 |
+
randomize_seed,
|
162 |
+
seed,
|
163 |
+
s_churn,
|
164 |
+
s_noise,
|
165 |
+
color_fix_type,
|
166 |
+
diff_dtype,
|
167 |
+
ae_dtype,
|
168 |
+
gamma_correction,
|
169 |
+
linear_CFG,
|
170 |
+
linear_s_stage2,
|
171 |
+
spt_linear_CFG,
|
172 |
+
spt_linear_s_stage2,
|
173 |
+
model_select,
|
174 |
+
output_format,
|
175 |
+
allocation
|
176 |
+
):
|
177 |
+
print("noisy_image:\n" + str(noisy_image))
|
178 |
+
print("denoise_image:\n" + str(denoise_image))
|
179 |
+
print("rotation: " + str(rotation))
|
180 |
+
print("prompt: " + str(prompt))
|
181 |
+
print("a_prompt: " + str(a_prompt))
|
182 |
+
print("n_prompt: " + str(n_prompt))
|
183 |
+
print("num_samples: " + str(num_samples))
|
184 |
+
print("min_size: " + str(min_size))
|
185 |
+
print("downscale: " + str(downscale))
|
186 |
+
print("upscale: " + str(upscale))
|
187 |
+
print("edm_steps: " + str(edm_steps))
|
188 |
+
print("s_stage1: " + str(s_stage1))
|
189 |
+
print("s_stage2: " + str(s_stage2))
|
190 |
+
print("s_cfg: " + str(s_cfg))
|
191 |
+
print("randomize_seed: " + str(randomize_seed))
|
192 |
+
print("seed: " + str(seed))
|
193 |
+
print("s_churn: " + str(s_churn))
|
194 |
+
print("s_noise: " + str(s_noise))
|
195 |
+
print("color_fix_type: " + str(color_fix_type))
|
196 |
+
print("diff_dtype: " + str(diff_dtype))
|
197 |
+
print("ae_dtype: " + str(ae_dtype))
|
198 |
+
print("gamma_correction: " + str(gamma_correction))
|
199 |
+
print("linear_CFG: " + str(linear_CFG))
|
200 |
+
print("linear_s_stage2: " + str(linear_s_stage2))
|
201 |
+
print("spt_linear_CFG: " + str(spt_linear_CFG))
|
202 |
+
print("spt_linear_s_stage2: " + str(spt_linear_s_stage2))
|
203 |
+
print("model_select: " + str(model_select))
|
204 |
+
print("GPU time allocation: " + str(allocation) + " min")
|
205 |
+
print("output_format: " + str(output_format))
|
206 |
+
|
207 |
+
input_format = re.sub(r"^.*\.([^\.]+)$", r"\1", noisy_image)
|
208 |
+
|
209 |
+
if input_format not in ['png', 'webp', 'jpg', 'jpeg', 'gif', 'bmp', 'heic']:
|
210 |
+
gr.Warning('错误的图像格式。当前仅支持 *.png, *.webp, *.jpg, *.jpeg, *.gif, *.bmp 或 *.heic。')
|
211 |
+
return None, None, None, None
|
212 |
+
|
213 |
+
if output_format == "input":
|
214 |
+
if noisy_image is None:
|
215 |
+
output_format = "png"
|
216 |
+
else:
|
217 |
+
output_format = input_format
|
218 |
+
print("最终的 output_format:" + str(output_format))
|
219 |
+
|
220 |
+
if prompt is None:
|
221 |
+
prompt = ""
|
222 |
+
|
223 |
+
if a_prompt is None:
|
224 |
+
a_prompt = ""
|
225 |
+
|
226 |
+
if n_prompt is None:
|
227 |
+
n_prompt = ""
|
228 |
+
|
229 |
+
if prompt != "" and a_prompt != "":
|
230 |
+
a_prompt = prompt + ", " + a_prompt
|
231 |
+
else:
|
232 |
+
a_prompt = prompt + a_prompt
|
233 |
+
print("最终提示词:" + str(a_prompt))
|
234 |
+
|
235 |
+
denoise_image = np.array(Image.open(noisy_image if denoise_image is None else denoise_image))
|
236 |
+
|
237 |
+
if rotation == 90:
|
238 |
+
denoise_image = np.array(list(zip(*denoise_image[::-1])))
|
239 |
+
elif rotation == 180:
|
240 |
+
denoise_image = np.array(list(zip(*denoise_image[::-1])))
|
241 |
+
denoise_image = np.array(list(zip(*denoise_image[::-1])))
|
242 |
+
elif rotation == -90:
|
243 |
+
denoise_image = np.array(list(zip(*denoise_image))[::-1])
|
244 |
+
|
245 |
+
if 1 < downscale:
|
246 |
+
input_height, input_width, input_channel = denoise_image.shape
|
247 |
+
denoise_image = np.array(Image.fromarray(denoise_image).resize((input_width // downscale, input_height // downscale), Image.LANCZOS))
|
248 |
+
|
249 |
+
denoise_image = HWC3(denoise_image)
|
250 |
+
|
251 |
+
if torch.cuda.device_count() == 0:
|
252 |
+
gr.Warning('将此 Spaces 设置为 GPU 配置以使其正常工作。')
|
253 |
+
return [noisy_image, denoise_image], gr.update(label="可下载的结果为 *." + output_format + " 格式", format = output_format, value = [denoise_image]), None, gr.update(visible=True)
|
254 |
+
|
255 |
+
if model_select != model.current_model:
|
256 |
+
print('载入 ' + model_select)
|
257 |
+
if model_select == 'v0-Q':
|
258 |
+
model.load_state_dict(ckpt_Q, strict=False)
|
259 |
+
elif model_select == 'v0-F':
|
260 |
+
model.load_state_dict(ckpt_F, strict=False)
|
261 |
+
model.current_model = model_select
|
262 |
+
|
263 |
+
model.ae_dtype = convert_dtype(ae_dtype)
|
264 |
+
model.model.dtype = convert_dtype(diff_dtype)
|
265 |
+
|
266 |
+
# 分配
|
267 |
+
if allocation == 1:
|
268 |
+
return restore_in_1min(
|
269 |
+
noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
|
270 |
+
)
|
271 |
+
if allocation == 2:
|
272 |
+
return restore_in_2min(
|
273 |
+
noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
|
274 |
+
)
|
275 |
+
if allocation == 3:
|
276 |
+
return restore_in_3min(
|
277 |
+
noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
|
278 |
+
)
|
279 |
+
if allocation == 4:
|
280 |
+
return restore_in_4min(
|
281 |
+
noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
|
282 |
+
)
|
283 |
+
if allocation == 5:
|
284 |
+
return restore_in_5min(
|
285 |
+
noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
|
286 |
+
)
|
287 |
+
if allocation == 7:
|
288 |
+
return restore_in_7min(
|
289 |
+
noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
|
290 |
+
)
|
291 |
+
if allocation == 8:
|
292 |
+
return restore_in_8min(
|
293 |
+
noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
|
294 |
+
)
|
295 |
+
if allocation == 9:
|
296 |
+
return restore_in_9min(
|
297 |
+
noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
|
298 |
+
)
|
299 |
+
if allocation == 10:
|
300 |
+
return restore_in_10min(
|
301 |
+
noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
|
302 |
+
)
|
303 |
+
else:
|
304 |
+
return restore_in_6min(
|
305 |
+
noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
|
306 |
+
)
|
307 |
+
|
308 |
+
@spaces.GPU(duration=59)
|
309 |
+
def restore_in_1min(*args, **kwargs):
|
310 |
+
return restore_on_gpu(*args, **kwargs)
|
311 |
+
|
312 |
+
@spaces.GPU(duration=119)
|
313 |
+
def restore_in_2min(*args, **kwargs):
|
314 |
+
return restore_on_gpu(*args, **kwargs)
|
315 |
+
|
316 |
+
@spaces.GPU(duration=179)
|
317 |
+
def restore_in_3min(*args, **kwargs):
|
318 |
+
return restore_on_gpu(*args, **kwargs)
|
319 |
+
|
320 |
+
@spaces.GPU(duration=239)
|
321 |
+
def restore_in_4min(*args, **kwargs):
|
322 |
+
return restore_on_gpu(*args, **kwargs)
|
323 |
+
|
324 |
+
@spaces.GPU(duration=299)
|
325 |
+
def restore_in_5min(*args, **kwargs):
|
326 |
+
return restore_on_gpu(*args, **kwargs)
|
327 |
+
|
328 |
+
@spaces.GPU(duration=359)
|
329 |
+
def restore_in_6min(*args, **kwargs):
|
330 |
+
return restore_on_gpu(*args, **kwargs)
|
331 |
+
|
332 |
+
@spaces.GPU(duration=419)
|
333 |
+
def restore_in_7min(*args, **kwargs):
|
334 |
+
return restore_on_gpu(*args, **kwargs)
|
335 |
+
|
336 |
+
@spaces.GPU(duration=479)
|
337 |
+
def restore_in_8min(*args, **kwargs):
|
338 |
+
return restore_on_gpu(*args, **kwargs)
|
339 |
+
|
340 |
+
@spaces.GPU(duration=539)
|
341 |
+
def restore_in_9min(*args, **kwargs):
|
342 |
+
return restore_on_gpu(*args, **kwargs)
|
343 |
+
|
344 |
+
@spaces.GPU(duration=599)
|
345 |
+
def restore_in_10min(*args, **kwargs):
|
346 |
+
return restore_on_gpu(*args, **kwargs)
|
347 |
+
|
348 |
+
def restore_on_gpu(
|
349 |
+
noisy_image,
|
350 |
+
input_image,
|
351 |
+
prompt,
|
352 |
+
a_prompt,
|
353 |
+
n_prompt,
|
354 |
+
num_samples,
|
355 |
+
min_size,
|
356 |
+
downscale,
|
357 |
+
upscale,
|
358 |
+
edm_steps,
|
359 |
+
s_stage1,
|
360 |
+
s_stage2,
|
361 |
+
s_cfg,
|
362 |
+
randomize_seed,
|
363 |
+
seed,
|
364 |
+
s_churn,
|
365 |
+
s_noise,
|
366 |
+
color_fix_type,
|
367 |
+
diff_dtype,
|
368 |
+
ae_dtype,
|
369 |
+
gamma_correction,
|
370 |
+
linear_CFG,
|
371 |
+
linear_s_stage2,
|
372 |
+
spt_linear_CFG,
|
373 |
+
spt_linear_s_stage2,
|
374 |
+
model_select,
|
375 |
+
output_format,
|
376 |
+
allocation
|
377 |
+
):
|
378 |
+
start = time.time()
|
379 |
+
print('restore ==>>')
|
380 |
+
|
381 |
+
torch.cuda.set_device(BOOXEL_device)
|
382 |
+
|
383 |
+
with torch.no_grad():
|
384 |
+
input_image = upscale_image(input_image, upscale, unit_resolution=32, min_size=min_size)
|
385 |
+
LQ = np.array(input_image) / 255.0
|
386 |
+
LQ = np.power(LQ, gamma_correction)
|
387 |
+
LQ *= 255.0
|
388 |
+
LQ = LQ.round().clip(0, 255).astype(np.uint8)
|
389 |
+
LQ = LQ / 255 * 2 - 1
|
390 |
+
LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(BOOXEL_device)[:, :3, :, :]
|
391 |
+
captions = ['']
|
392 |
+
|
393 |
+
samples = model.batchify_sample(LQ, captions, num_steps=edm_steps, restoration_scale=s_stage1, s_churn=s_churn,
|
394 |
+
s_noise=s_noise, cfg_scale=s_cfg, control_scale=s_stage2, seed=seed,
|
395 |
+
num_samples=num_samples, p_p=a_prompt, n_p=n_prompt, color_fix_type=color_fix_type,
|
396 |
+
use_linear_CFG=linear_CFG, use_linear_control_scale=linear_s_stage2,
|
397 |
+
cfg_scale_start=spt_linear_CFG, control_scale_start=spt_linear_s_stage2)
|
398 |
+
|
399 |
+
x_samples = (einops.rearrange(samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().round().clip(
|
400 |
+
0, 255).astype(np.uint8)
|
401 |
+
results = [x_samples[i] for i in range(num_samples)]
|
402 |
+
torch.cuda.empty_cache()
|
403 |
+
|
404 |
+
# 所有结果的大小相同
|
405 |
+
input_height, input_width, input_channel = np.array(input_image).shape
|
406 |
+
result_height, result_width, result_channel = np.array(results[0]).shape
|
407 |
+
|
408 |
+
print('<<== restore')
|
409 |
+
end = time.time()
|
410 |
+
secondes = int(end - start)
|
411 |
+
minutes = math.floor(secondes / 60)
|
412 |
+
secondes = secondes - (minutes * 60)
|
413 |
+
hours = math.floor(minutes / 60)
|
414 |
+
minutes = minutes - (hours * 60)
|
415 |
+
information = ("如果想获得不同的结果,请重新开始。" if randomize_seed else "") + \
|
416 |
+
"如果您没有得到想要的图片,请在 « 图片描述 » 中添加更多细节。" + \
|
417 |
+
"等待 " + str(allocation) + " 分钟以避免 GPU 配额处罚,或也可以使用另一台计算机。" + \
|
418 |
+
"该图片已在 " + \
|
419 |
+
((str(hours) + " 小时 ") if hours != 0 else "") + \
|
420 |
+
((str(minutes) + " 分钟 ") if hours != 0 or minutes != 0 else "") + \
|
421 |
+
str(secondes) + " 秒 内生成。" + \
|
422 |
+
"新图像的分辨率为 " + str(result_width) + \
|
423 |
+
" 像素宽, " + str(result_height) + \
|
424 |
+
" 像素高,最终总分辨率为 " + f'{result_width * result_height:,}' + " 像素。"
|
425 |
+
print(information)
|
426 |
+
try:
|
427 |
+
print("初始分辨率:" + f'{input_width * input_height:,}')
|
428 |
+
print("最终分辨率:" + f'{result_width * result_height:,}')
|
429 |
+
print("edm_steps: " + str(edm_steps))
|
430 |
+
print("num_samples: " + str(num_samples))
|
431 |
+
print("缩小规模:" + str(downscale))
|
432 |
+
print("预计分钟数:" + f'{(((result_width * result_height**(1/1.75)) * input_width * input_height * (edm_steps**(1/2)) * (num_samples**(1/2.5)))**(1/2.5)) / 25000:,}')
|
433 |
+
except Exception as e:
|
434 |
+
print('估算错误')
|
435 |
+
|
436 |
+
# 滑动块中只能显示一张图像
|
437 |
+
return [noisy_image] + [results[0]], gr.update(label="可下载的结果为 *." + output_format + " 格式", format = output_format, value = results), gr.update(value = information, visible = True), gr.update(visible=True)
|
438 |
+
|
439 |
+
def load_and_reset(param_setting):
|
440 |
+
print('load_and_reset ==>>')
|
441 |
+
if torch.cuda.device_count() == 0:
|
442 |
+
gr.Warning('将此 Spaces 设置为 GPU 配置以使其正常工作。')
|
443 |
+
return None, None, None, None, None, None, None, None, None, None, None, None, None, None
|
444 |
+
edm_steps = default_setting.edm_steps
|
445 |
+
s_stage2 = 1.0
|
446 |
+
s_stage1 = -1.0
|
447 |
+
s_churn = 5
|
448 |
+
s_noise = 1.003
|
449 |
+
# 积极提示词
|
450 |
+
a_prompt = '电影级,高对比度,高度精细,使用哈苏相机拍摄,超精细照片,逼真的最大细节,32K,调色,超高清,极致的细节,皮肤毛孔细节,超清晰度,完美无变形。'
|
451 |
+
# 消极提示词
|
452 |
+
n_prompt = '绘画,油画,插图,绘图,艺术,素描,动漫,卡通,CG 风格,3D 渲染,虚幻引擎,模糊,混色,不清晰,怪异纹��,丑陋,肮脏,凌乱,质量最差,质量低,框架,水印,签名,JPEG 伪影,变形,低分辨率,过度平滑'
|
453 |
+
color_fix_type = 'Wavelet'
|
454 |
+
spt_linear_s_stage2 = 0.0
|
455 |
+
linear_s_stage2 = False
|
456 |
+
linear_CFG = True
|
457 |
+
if param_setting == "Quality":
|
458 |
+
s_cfg = default_setting.s_cfg_Quality
|
459 |
+
spt_linear_CFG = default_setting.spt_linear_CFG_Quality
|
460 |
+
model_select = "v0-Q"
|
461 |
+
elif param_setting == "Fidelity":
|
462 |
+
s_cfg = default_setting.s_cfg_Fidelity
|
463 |
+
spt_linear_CFG = default_setting.spt_linear_CFG_Fidelity
|
464 |
+
model_select = "v0-F"
|
465 |
+
else:
|
466 |
+
raise NotImplementedError
|
467 |
+
gr.Info('参数已重置。')
|
468 |
+
print('<<== load_and_reset')
|
469 |
+
return edm_steps, s_cfg, s_stage2, s_stage1, s_churn, s_noise, a_prompt, n_prompt, color_fix_type, linear_CFG, \
|
470 |
+
linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select
|
471 |
+
|
472 |
+
def log_information(result_gallery):
|
473 |
+
print('log_information')
|
474 |
+
if result_gallery is not None:
|
475 |
+
for i, result in enumerate(result_gallery):
|
476 |
+
print(result[0])
|
477 |
+
|
478 |
+
def on_select_result(result_slider, result_gallery, evt: gr.SelectData):
|
479 |
+
print('on_select_result')
|
480 |
+
if result_gallery is not None:
|
481 |
+
for i, result in enumerate(result_gallery):
|
482 |
+
print(result[0])
|
483 |
+
return [result_slider[0], result_gallery[evt.index][0]]
|
484 |
+
|
485 |
+
# Gradio 接口
|
486 |
+
with gr.Blocks() as interface:
|
487 |
+
gr.Markdown("""
|
488 |
+
# BOOXEL —— Boost Pixel!
|
489 |
+
|
490 |
+
提供你的提示词,借助先进的生成实验和模型放大的力量,获取非凡的逼真画面。
|
491 |
+
|
492 |
+
我们收集了一个包含 600 万张高分辨率、高质量图像的真实世界采集的数据集用于模型训练,每张图像都关联了清晰且详尽的描述性文本注释。
|
493 |
+
|
494 |
+
我们提供了使用文本提示操纵恢复图像的能力,此外,还引入了消极质量提示和恢复指导的采样方法,以进一步提高生成图像的质量和保真度。
|
495 |
+
""")
|
496 |
+
|
497 |
+
input_image = gr.Image(label="输入图像(*.png, *.webp, *.jpeg, *.jpg, *.gif, *.bmp, *.heic)", show_label=True, type="filepath", height=600, elem_id="image-input")
|
498 |
+
rotation = gr.Radio([["不旋转", 0], ["⤵ 旋转 +90°", 90], ["↩ 旋转 180°", 180], ["⤴ 旋转 -90°", -90]], label="方向校正", info="在还原图像之前,将应用以下旋转功能;人工智能需要良好的定位才能理解内容", value=0, interactive=True, visible=False)
|
499 |
+
with gr.Group():
|
500 |
+
prompt = gr.Textbox(label="图像描述", info="帮助人工智能理解图像所代表的内容;尽可能多地描述,尤其是我们在原始图像上看不到的细节;可以用任何语言书写", value="", placeholder="长春,上午,秋天,英短蓝白猫,走在,花丛小径上,真实图像", lines=3)
|
501 |
+
upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8], ["x9", 9], ["x10", 10]], label="像素放大倍率", info="1 到 10 倍放大倍率", value=2, interactive=True)
|
502 |
+
allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min(不建议)", 8], ["9 min(不建议)", 9], ["10 min(不建议)", 10]], label="GPU 分配时间", info="设置为较低值可中止运行;设置为较高值后,下次运行会受到配额处罚", value=5, interactive=True)
|
503 |
+
|
504 |
+
with gr.Accordion("预降噪(可选)", open=False):
|
505 |
+
gamma_correction = gr.Slider(label="伽玛校正", info="较低的值图像将会更亮,反之亦然", minimum=0.1, maximum=2.0, value=1.0, step=0.1)
|
506 |
+
denoise_button = gr.Button(value="预降噪")
|
507 |
+
denoise_image = gr.Image(label="降噪图像", show_label=True, type="filepath", sources=[], interactive = False, height=600, elem_id="image-s1")
|
508 |
+
denoise_information = gr.HTML(value="如果存在,去噪图像将被用于修复,而不是输入图像。", visible=False)
|
509 |
+
|
510 |
+
with gr.Accordion("高级选项", open=False):
|
511 |
+
output_format = gr.Radio([["与输入一致", "input"], ["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="生成的图像格式", info="文件扩展名", value="input", interactive=True)
|
512 |
+
a_prompt = gr.Textbox(label="补充图片说明",
|
513 |
+
info="完整的主图像描述",
|
514 |
+
value='电影级,高对比度,高度精细,使用哈苏相机拍摄,超精细照片,逼真的最大细节,32K,调色,超高清,极致的细节,皮肤毛孔细节,超清晰度,完美无变形。',
|
515 |
+
lines=3)
|
516 |
+
n_prompt = gr.Textbox(label="负面图像描述",
|
517 |
+
info="通过列出图像不代表的内容来消除歧义",
|
518 |
+
value='绘画,油画,插图,绘图,艺术,素描,动漫,卡通,CG 风格,3D 渲染,虚幻引擎���模糊,混色,不清晰,怪异纹理,丑陋,肮脏,凌乱,质量最差,质量低,框架,水印,签名,JPEG 伪影,变形,低分辨率,过度平滑',
|
519 |
+
lines=3)
|
520 |
+
edm_steps = gr.Slider(label="步骤数", info="较低的值生成将会更快;较高的值将会获得更多的细节", minimum=1, maximum=200, value=default_setting.edm_steps if torch.cuda.device_count() > 0 else 1, step=1)
|
521 |
+
num_samples = gr.Slider(label="生成数", info="生成的结果图像的数量", minimum=1, maximum=4 if not args.use_image_slider else 1
|
522 |
+
, value=1, step=1)
|
523 |
+
min_size = gr.Slider(label="最小尺寸", info="结果的最小高度和最小宽度", minimum=32, maximum=4096, value=1024, step=32)
|
524 |
+
downscale = gr.Radio([["/1", 1], ["/2", 2], ["/3", 3], ["/4", 4], ["/5", 5], ["/6", 6], ["/7", 7], ["/8", 8], ["/9", 9], ["/10", 10]], label="缩减前因数", info="减少图像模糊,缩短处理时间", value=1, interactive=True)
|
525 |
+
with gr.Row():
|
526 |
+
with gr.Column():
|
527 |
+
model_select = gr.Radio([["质量 (v0-Q)", "v0-Q"], ["保真度 (v0-F)", "v0-F"]], label="模型选择", info="预训练模型", value="v0-Q",
|
528 |
+
interactive=True)
|
529 |
+
with gr.Column():
|
530 |
+
color_fix_type = gr.Radio([["None", "None"], ["AdaIn (改进风格)", "AdaIn"], ["Wavelet (针对 JPEG 伪图象)", "Wavelet"]], label="色彩修复类型", info="AdaIn 改进画面风格;Wavelet 用于 JPEG 伪图像", value="Wavelet",
|
531 |
+
interactive=True)
|
532 |
+
s_cfg = gr.Slider(label="文本指导等级", info="较低的值将更加跟随源图像;较高的值将更加跟随提示", minimum=1.0, maximum=15.0,
|
533 |
+
value=default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.1)
|
534 |
+
s_stage2 = gr.Slider(label="修复指导强度", minimum=0., maximum=1., value=1., step=0.05)
|
535 |
+
s_stage1 = gr.Slider(label="预降噪指导强度", minimum=-1.0, maximum=6.0, value=-1.0, step=1.0)
|
536 |
+
s_churn = gr.Slider(label="S-Churn", minimum=0, maximum=40, value=5, step=1)
|
537 |
+
s_noise = gr.Slider(label="S-Noise", minimum=1.0, maximum=1.1, value=1.003, step=0.001)
|
538 |
+
with gr.Row():
|
539 |
+
with gr.Column():
|
540 |
+
linear_CFG = gr.Checkbox(label="线性 CFG", value=True)
|
541 |
+
spt_linear_CFG = gr.Slider(label="CFG 起始", minimum=1.0,
|
542 |
+
maximum=9.0, value=default_setting.spt_linear_CFG_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.5)
|
543 |
+
with gr.Column():
|
544 |
+
linear_s_stage2 = gr.Checkbox(label="线性修复指导", value=False)
|
545 |
+
spt_linear_s_stage2 = gr.Slider(label="指导起始", minimum=0.,
|
546 |
+
maximum=1., value=0., step=0.05)
|
547 |
+
with gr.Column():
|
548 |
+
diff_dtype = gr.Radio([["fp32 (精确)", "fp32"], ["fp16 (中等)", "fp16"], ["bf16 (快速)", "bf16"]], label="扩散数据类型", value="fp32",
|
549 |
+
interactive=True)
|
550 |
+
with gr.Column():
|
551 |
+
ae_dtype = gr.Radio([["fp32 (精确)", "fp32"], ["bf16 (快速)", "bf16"]], label="自动编码器数据类型", value="fp32",
|
552 |
+
interactive=True)
|
553 |
+
randomize_seed = gr.Checkbox(label = "\U0001F3B2 随机种子", value=True, info="如果选中,结果将总是不同")
|
554 |
+
seed = gr.Slider(label="种子", minimum=0, maximum=max_64_bit_int, step=1, randomize=True)
|
555 |
+
with gr.Group():
|
556 |
+
param_setting = gr.Radio(["Quality", "Fidelity"], interactive=True, label="预配", value="Quality")
|
557 |
+
restart_button = gr.Button(value="应用预配")
|
558 |
+
|
559 |
+
with gr.Column():
|
560 |
+
diffusion_button = gr.Button(value="开始处理", variant="primary", elem_id="process_button")
|
561 |
+
reset_btn = gr.Button(value="重新初始化页面", variant="stop", elem_id="reset_button", visible=False)
|
562 |
+
|
563 |
+
restore_information = gr.HTML(value="重启进程,获得另一个结果。", visible=False)
|
564 |
+
result_slider = ImageSlider(label='对比结果', show_label=True, interactive=False, elem_id="slider1", show_download_button=False)
|
565 |
+
result_gallery = gr.Gallery(label='可下载的结果', show_label=True, interactive=False, elem_id="gallery1")
|
566 |
+
|
567 |
+
gr.Examples(
|
568 |
+
examples = [
|
569 |
+
[
|
570 |
+
"./Examples/Example1.png",
|
571 |
+
0,
|
572 |
+
None,
|
573 |
+
"一群人,快乐地在街上行走,逼真,8K,极其精细",
|
574 |
+
"电影级,高对比度,高度精细,使用哈苏相机拍摄,超精细照片,逼真的最大细节,32K,调色,超高清,极致的细节,皮肤毛孔细节,超清晰度,完美无变形。",
|
575 |
+
"绘画,油画,插图,绘图���艺术,素描,动漫,卡通,CG 风格,3D 渲染,虚幻引擎,模糊,混色,不清晰,怪异纹理,丑陋,肮脏,凌乱,质量最差,质量低,框架,水印,签名,JPEG 伪影,变形,低分辨率,过度平滑",
|
576 |
+
2,
|
577 |
+
1024,
|
578 |
+
1,
|
579 |
+
8,
|
580 |
+
200,
|
581 |
+
-1,
|
582 |
+
1,
|
583 |
+
7.5,
|
584 |
+
False,
|
585 |
+
42,
|
586 |
+
5,
|
587 |
+
1.003,
|
588 |
+
"AdaIn",
|
589 |
+
"fp16",
|
590 |
+
"bf16",
|
591 |
+
1.0,
|
592 |
+
True,
|
593 |
+
4,
|
594 |
+
False,
|
595 |
+
0.,
|
596 |
+
"v0-Q",
|
597 |
+
"input",
|
598 |
+
5
|
599 |
+
],
|
600 |
+
[
|
601 |
+
"./Examples/Example2.jpeg",
|
602 |
+
0,
|
603 |
+
None,
|
604 |
+
"一只虎斑猫的头部,在一间房子里,逼真,8K,极其细腻。",
|
605 |
+
"电影级,高对比度,高度精细,使用哈苏相机拍摄,超精细照片,逼真的最大细节,32K,调色,超高清,极致的细节,皮肤毛孔细节,超清晰度,完美无变形。",
|
606 |
+
"绘画,油画,插图,绘图,艺术,素描,动漫,卡通,CG 风格,3D 渲染,虚幻引擎,模糊,混色,不清晰,怪异纹理,丑陋,肮脏,凌乱,质量最差,质量低,框架,水印,签名,JPEG 伪影,变形,低分辨率,过度平滑",
|
607 |
+
1,
|
608 |
+
1024,
|
609 |
+
1,
|
610 |
+
1,
|
611 |
+
200,
|
612 |
+
-1,
|
613 |
+
1,
|
614 |
+
7.5,
|
615 |
+
False,
|
616 |
+
42,
|
617 |
+
5,
|
618 |
+
1.003,
|
619 |
+
"Wavelet",
|
620 |
+
"fp16",
|
621 |
+
"bf16",
|
622 |
+
1.0,
|
623 |
+
True,
|
624 |
+
4,
|
625 |
+
False,
|
626 |
+
0.,
|
627 |
+
"v0-Q",
|
628 |
+
"input",
|
629 |
+
4
|
630 |
+
],
|
631 |
+
[
|
632 |
+
"./Examples/Example3.webp",
|
633 |
+
0,
|
634 |
+
None,
|
635 |
+
"一个红色的苹果",
|
636 |
+
"电影级,高对比度,高度精细,使用哈苏相机拍摄,超精细照片,逼真的最大细节,32K,调色,超高清,极致的细节,皮肤毛孔细节,超清晰度,完美无变形。",
|
637 |
+
"绘画,油画,插图,绘图,艺术,素描,动漫,卡通,CG 风格,3D 渲染,虚幻引擎,模糊,混色,不清晰,怪异纹理,丑陋,肮脏,凌乱,质量最差,质量低,框架,水印,签名,JPEG 伪影,变形,低分辨率,过度平滑",
|
638 |
+
1,
|
639 |
+
1024,
|
640 |
+
1,
|
641 |
+
1,
|
642 |
+
200,
|
643 |
+
-1,
|
644 |
+
1,
|
645 |
+
7.5,
|
646 |
+
False,
|
647 |
+
42,
|
648 |
+
5,
|
649 |
+
1.003,
|
650 |
+
"Wavelet",
|
651 |
+
"fp16",
|
652 |
+
"bf16",
|
653 |
+
1.0,
|
654 |
+
True,
|
655 |
+
4,
|
656 |
+
False,
|
657 |
+
0.,
|
658 |
+
"v0-Q",
|
659 |
+
"input",
|
660 |
+
4
|
661 |
+
],
|
662 |
+
[
|
663 |
+
"./Examples/Example3.webp",
|
664 |
+
0,
|
665 |
+
None,
|
666 |
+
"一块红色大理石",
|
667 |
+
"电影级,高对比度,高度精细,使用哈苏相机拍摄,超精细照片,逼真的最大细节,32K,调色,超高清,极致的细节,皮肤毛孔细节,超清晰度,完美无变形。",
|
668 |
+
"绘画,油画,插图,绘图,艺术,素描,动漫,卡通,CG 风格,3D 渲染,虚幻引擎,模糊,混色,不清晰,怪异纹理,丑陋,肮脏,凌乱,质量最差,质量低,框架,水印,签名,JPEG 伪影,变形,低分辨率,过度平滑",
|
669 |
+
1,
|
670 |
+
1024,
|
671 |
+
1,
|
672 |
+
1,
|
673 |
+
200,
|
674 |
+
-1,
|
675 |
+
1,
|
676 |
+
7.5,
|
677 |
+
False,
|
678 |
+
42,
|
679 |
+
5,
|
680 |
+
1.003,
|
681 |
+
"Wavelet",
|
682 |
+
"fp16",
|
683 |
+
"bf16",
|
684 |
+
1.0,
|
685 |
+
True,
|
686 |
+
4,
|
687 |
+
False,
|
688 |
+
0.,
|
689 |
+
"v0-Q",
|
690 |
+
"input",
|
691 |
+
4
|
692 |
+
],
|
693 |
+
],
|
694 |
+
run_on_click = True,
|
695 |
+
fn = stage2_process,
|
696 |
+
inputs = [
|
697 |
+
input_image,
|
698 |
+
rotation,
|
699 |
+
denoise_image,
|
700 |
+
prompt,
|
701 |
+
a_prompt,
|
702 |
+
n_prompt,
|
703 |
+
num_samples,
|
704 |
+
min_size,
|
705 |
+
downscale,
|
706 |
+
upscale,
|
707 |
+
edm_steps,
|
708 |
+
s_stage1,
|
709 |
+
s_stage2,
|
710 |
+
s_cfg,
|
711 |
+
randomize_seed,
|
712 |
+
seed,
|
713 |
+
s_churn,
|
714 |
+
s_noise,
|
715 |
+
color_fix_type,
|
716 |
+
diff_dtype,
|
717 |
+
ae_dtype,
|
718 |
+
gamma_correction,
|
719 |
+
linear_CFG,
|
720 |
+
linear_s_stage2,
|
721 |
+
spt_linear_CFG,
|
722 |
+
spt_linear_s_stage2,
|
723 |
+
model_select,
|
724 |
+
output_format,
|
725 |
+
allocation
|
726 |
+
],
|
727 |
+
outputs = [
|
728 |
+
result_slider,
|
729 |
+
result_gallery,
|
730 |
+
restore_information,
|
731 |
+
reset_btn
|
732 |
+
],
|
733 |
+
cache_examples = False,
|
734 |
+
)
|
735 |
+
|
736 |
+
input_image.upload(fn = check_upload, inputs = [
|
737 |
+
input_image
|
738 |
+
], outputs = [
|
739 |
+
rotation
|
740 |
+
], queue = False, show_progress = False)
|
741 |
+
|
742 |
+
denoise_button.click(fn = check, inputs = [
|
743 |
+
input_image
|
744 |
+
], outputs = [], queue = False, show_progress = False).success(fn = stage1_process, inputs = [
|
745 |
+
input_image,
|
746 |
+
gamma_correction,
|
747 |
+
diff_dtype,
|
748 |
+
ae_dtype
|
749 |
+
], outputs=[
|
750 |
+
denoise_image,
|
751 |
+
denoise_information
|
752 |
+
])
|
753 |
+
|
754 |
+
diffusion_button.click(fn = update_seed, inputs = [
|
755 |
+
randomize_seed,
|
756 |
+
seed
|
757 |
+
], outputs = [
|
758 |
+
seed
|
759 |
+
], queue = False, show_progress = False).then(fn = check, inputs = [
|
760 |
+
input_image
|
761 |
+
], outputs = [], queue = False, show_progress = False).success(fn=stage2_process, inputs = [
|
762 |
+
input_image,
|
763 |
+
rotation,
|
764 |
+
denoise_image,
|
765 |
+
prompt,
|
766 |
+
a_prompt,
|
767 |
+
n_prompt,
|
768 |
+
num_samples,
|
769 |
+
min_size,
|
770 |
+
downscale,
|
771 |
+
upscale,
|
772 |
+
edm_steps,
|
773 |
+
s_stage1,
|
774 |
+
s_stage2,
|
775 |
+
s_cfg,
|
776 |
+
randomize_seed,
|
777 |
+
seed,
|
778 |
+
s_churn,
|
779 |
+
s_noise,
|
780 |
+
color_fix_type,
|
781 |
+
diff_dtype,
|
782 |
+
ae_dtype,
|
783 |
+
gamma_correction,
|
784 |
+
linear_CFG,
|
785 |
+
linear_s_stage2,
|
786 |
+
spt_linear_CFG,
|
787 |
+
spt_linear_s_stage2,
|
788 |
+
model_select,
|
789 |
+
output_format,
|
790 |
+
allocation
|
791 |
+
], outputs = [
|
792 |
+
result_slider,
|
793 |
+
result_gallery,
|
794 |
+
restore_information,
|
795 |
+
reset_btn
|
796 |
+
]).success(fn = log_information, inputs = [
|
797 |
+
result_gallery
|
798 |
+
], outputs = [], queue = False, show_progress = False)
|
799 |
+
|
800 |
+
result_gallery.change(on_select_result, [result_slider, result_gallery], result_slider)
|
801 |
+
result_gallery.select(on_select_result, [result_slider, result_gallery], result_slider)
|
802 |
+
|
803 |
+
restart_button.click(fn = load_and_reset, inputs = [
|
804 |
+
param_setting
|
805 |
+
], outputs = [
|
806 |
+
edm_steps,
|
807 |
+
s_cfg,
|
808 |
+
s_stage2,
|
809 |
+
s_stage1,
|
810 |
+
s_churn,
|
811 |
+
s_noise,
|
812 |
+
a_prompt,
|
813 |
+
n_prompt,
|
814 |
+
color_fix_type,
|
815 |
+
linear_CFG,
|
816 |
+
linear_s_stage2,
|
817 |
+
spt_linear_CFG,
|
818 |
+
spt_linear_s_stage2,
|
819 |
+
model_select
|
820 |
+
])
|
821 |
+
|
822 |
+
reset_btn.click(fn = reset, inputs = [], outputs = [
|
823 |
+
input_image,
|
824 |
+
rotation,
|
825 |
+
denoise_image,
|
826 |
+
prompt,
|
827 |
+
a_prompt,
|
828 |
+
n_prompt,
|
829 |
+
num_samples,
|
830 |
+
min_size,
|
831 |
+
downscale,
|
832 |
+
upscale,
|
833 |
+
edm_steps,
|
834 |
+
s_stage1,
|
835 |
+
s_stage2,
|
836 |
+
s_cfg,
|
837 |
+
randomize_seed,
|
838 |
+
seed,
|
839 |
+
s_churn,
|
840 |
+
s_noise,
|
841 |
+
color_fix_type,
|
842 |
+
diff_dtype,
|
843 |
+
ae_dtype,
|
844 |
+
gamma_correction,
|
845 |
+
linear_CFG,
|
846 |
+
linear_s_stage2,
|
847 |
+
spt_linear_CFG,
|
848 |
+
spt_linear_s_stage2,
|
849 |
+
model_select,
|
850 |
+
output_format,
|
851 |
+
allocation
|
852 |
+
], queue = False, show_progress = False)
|
853 |
+
|
854 |
+
interface.queue(10).launch()
|
llava/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .model import LlavaLlamaForCausalLM
|
llava/constants.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
CONTROLLER_HEART_BEAT_EXPIRATION = 30
|
2 |
+
WORKER_HEART_BEAT_INTERVAL = 15
|
3 |
+
|
4 |
+
LOGDIR = "."
|
5 |
+
|
6 |
+
# 模型常量
|
7 |
+
IGNORE_INDEX = -100
|
8 |
+
IMAGE_TOKEN_INDEX = -200
|
9 |
+
DEFAULT_IMAGE_TOKEN = "<image>"
|
10 |
+
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
|
11 |
+
DEFAULT_IM_START_TOKEN = "<im_start>"
|
12 |
+
DEFAULT_IM_END_TOKEN = "<im_end>"
|
llava/conversation.py
ADDED
@@ -0,0 +1,381 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dataclasses
|
2 |
+
from enum import auto, Enum
|
3 |
+
from typing import List, Tuple
|
4 |
+
|
5 |
+
|
6 |
+
class SeparatorStyle(Enum):
|
7 |
+
"""不同的分离器样式。"""
|
8 |
+
SINGLE = auto()
|
9 |
+
TWO = auto()
|
10 |
+
MPT = auto()
|
11 |
+
PLAIN = auto()
|
12 |
+
LLAMA_2 = auto()
|
13 |
+
|
14 |
+
|
15 |
+
@dataclasses.dataclass
|
16 |
+
class Conversation:
|
17 |
+
"""一个保存所有对话历史记录的类。"""
|
18 |
+
system: str
|
19 |
+
roles: List[str]
|
20 |
+
messages: List[List[str]]
|
21 |
+
offset: int
|
22 |
+
sep_style: SeparatorStyle = SeparatorStyle.SINGLE
|
23 |
+
sep: str = "###"
|
24 |
+
sep2: str = None
|
25 |
+
version: str = "Unknown"
|
26 |
+
|
27 |
+
skip_next: bool = False
|
28 |
+
|
29 |
+
def get_prompt(self):
|
30 |
+
messages = self.messages
|
31 |
+
if len(messages) > 0 and type(messages[0][1]) is tuple:
|
32 |
+
messages = self.messages.copy()
|
33 |
+
init_role, init_msg = messages[0].copy()
|
34 |
+
init_msg = init_msg[0].replace("<image>", "").strip()
|
35 |
+
if 'mmtag' in self.version:
|
36 |
+
messages[0] = (init_role, init_msg)
|
37 |
+
messages.insert(0, (self.roles[0], "<Image><image></Image>"))
|
38 |
+
messages.insert(1, (self.roles[1], "已接收。"))
|
39 |
+
else:
|
40 |
+
messages[0] = (init_role, "<image>\n" + init_msg)
|
41 |
+
|
42 |
+
if self.sep_style == SeparatorStyle.SINGLE:
|
43 |
+
ret = self.system + self.sep
|
44 |
+
for role, message in messages:
|
45 |
+
if message:
|
46 |
+
if type(message) is tuple:
|
47 |
+
message, _, _ = message
|
48 |
+
ret += role + ": " + message + self.sep
|
49 |
+
else:
|
50 |
+
ret += role + ":"
|
51 |
+
elif self.sep_style == SeparatorStyle.TWO:
|
52 |
+
seps = [self.sep, self.sep2]
|
53 |
+
ret = self.system + seps[0]
|
54 |
+
for i, (role, message) in enumerate(messages):
|
55 |
+
if message:
|
56 |
+
if type(message) is tuple:
|
57 |
+
message, _, _ = message
|
58 |
+
ret += role + ": " + message + seps[i % 2]
|
59 |
+
else:
|
60 |
+
ret += role + ":"
|
61 |
+
elif self.sep_style == SeparatorStyle.MPT:
|
62 |
+
ret = self.system + self.sep
|
63 |
+
for role, message in messages:
|
64 |
+
if message:
|
65 |
+
if type(message) is tuple:
|
66 |
+
message, _, _ = message
|
67 |
+
ret += role + message + self.sep
|
68 |
+
else:
|
69 |
+
ret += role
|
70 |
+
elif self.sep_style == SeparatorStyle.LLAMA_2:
|
71 |
+
wrap_sys = lambda msg: f"<<SYS>>\n{msg}\n<</SYS>>\n\n"
|
72 |
+
wrap_inst = lambda msg: f"[INST] {msg} [/INST]"
|
73 |
+
ret = ""
|
74 |
+
|
75 |
+
for i, (role, message) in enumerate(messages):
|
76 |
+
if i == 0:
|
77 |
+
assert message, "第一个消息不能为 None"
|
78 |
+
assert role == self.roles[0], "第一个消息必须来自于用户"
|
79 |
+
if message:
|
80 |
+
if type(message) is tuple:
|
81 |
+
message, _, _ = message
|
82 |
+
if i == 0: message = wrap_sys(self.system) + message
|
83 |
+
if i % 2 == 0:
|
84 |
+
message = wrap_inst(message)
|
85 |
+
ret += self.sep + message
|
86 |
+
else:
|
87 |
+
ret += " " + message + " " + self.sep2
|
88 |
+
else:
|
89 |
+
ret += ""
|
90 |
+
ret = ret.lstrip(self.sep)
|
91 |
+
elif self.sep_style == SeparatorStyle.PLAIN:
|
92 |
+
seps = [self.sep, self.sep2]
|
93 |
+
ret = self.system
|
94 |
+
for i, (role, message) in enumerate(messages):
|
95 |
+
if message:
|
96 |
+
if type(message) is tuple:
|
97 |
+
message, _, _ = message
|
98 |
+
ret += message + seps[i % 2]
|
99 |
+
else:
|
100 |
+
ret += ""
|
101 |
+
else:
|
102 |
+
raise ValueError(f"不正确的样式:{self.sep_style}")
|
103 |
+
|
104 |
+
return ret
|
105 |
+
|
106 |
+
def append_message(self, role, message):
|
107 |
+
self.messages.append([role, message])
|
108 |
+
|
109 |
+
def get_images(self, return_pil=False):
|
110 |
+
images = []
|
111 |
+
for i, (role, msg) in enumerate(self.messages[self.offset:]):
|
112 |
+
if i % 2 == 0:
|
113 |
+
if type(msg) is tuple:
|
114 |
+
import base64
|
115 |
+
from io import BytesIO
|
116 |
+
from PIL import Image
|
117 |
+
msg, image, image_process_mode = msg
|
118 |
+
if image_process_mode == "Pad":
|
119 |
+
def expand2square(pil_img, background_color=(122, 116, 104)):
|
120 |
+
width, height = pil_img.size
|
121 |
+
if width == height:
|
122 |
+
return pil_img
|
123 |
+
elif width > height:
|
124 |
+
result = Image.new(pil_img.mode, (width, width), background_color)
|
125 |
+
result.paste(pil_img, (0, (width - height) // 2))
|
126 |
+
return result
|
127 |
+
else:
|
128 |
+
result = Image.new(pil_img.mode, (height, height), background_color)
|
129 |
+
result.paste(pil_img, ((height - width) // 2, 0))
|
130 |
+
return result
|
131 |
+
image = expand2square(image)
|
132 |
+
elif image_process_mode in ["Default", "Crop"]:
|
133 |
+
pass
|
134 |
+
elif image_process_mode == "Resize":
|
135 |
+
image = image.resize((336, 336))
|
136 |
+
else:
|
137 |
+
raise ValueError(f"不正确的 image_process_mode:{image_process_mode}")
|
138 |
+
max_hw, min_hw = max(image.size), min(image.size)
|
139 |
+
aspect_ratio = max_hw / min_hw
|
140 |
+
max_len, min_len = 800, 400
|
141 |
+
shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
|
142 |
+
longest_edge = int(shortest_edge * aspect_ratio)
|
143 |
+
W, H = image.size
|
144 |
+
if longest_edge != max(image.size):
|
145 |
+
if H > W:
|
146 |
+
H, W = longest_edge, shortest_edge
|
147 |
+
else:
|
148 |
+
H, W = shortest_edge, longest_edge
|
149 |
+
image = image.resize((W, H))
|
150 |
+
if return_pil:
|
151 |
+
images.append(image)
|
152 |
+
else:
|
153 |
+
buffered = BytesIO()
|
154 |
+
image.save(buffered, format="PNG")
|
155 |
+
img_b64_str = base64.b64encode(buffered.getvalue()).decode()
|
156 |
+
images.append(img_b64_str)
|
157 |
+
return images
|
158 |
+
|
159 |
+
def to_gradio_chatbot(self):
|
160 |
+
ret = []
|
161 |
+
for i, (role, msg) in enumerate(self.messages[self.offset:]):
|
162 |
+
if i % 2 == 0:
|
163 |
+
if type(msg) is tuple:
|
164 |
+
import base64
|
165 |
+
from io import BytesIO
|
166 |
+
msg, image, image_process_mode = msg
|
167 |
+
max_hw, min_hw = max(image.size), min(image.size)
|
168 |
+
aspect_ratio = max_hw / min_hw
|
169 |
+
max_len, min_len = 800, 400
|
170 |
+
shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
|
171 |
+
longest_edge = int(shortest_edge * aspect_ratio)
|
172 |
+
W, H = image.size
|
173 |
+
if H > W:
|
174 |
+
H, W = longest_edge, shortest_edge
|
175 |
+
else:
|
176 |
+
H, W = shortest_edge, longest_edge
|
177 |
+
image = image.resize((W, H))
|
178 |
+
buffered = BytesIO()
|
179 |
+
image.save(buffered, format="JPEG")
|
180 |
+
img_b64_str = base64.b64encode(buffered.getvalue()).decode()
|
181 |
+
img_str = f'<img src="data:image/png;base64,{img_b64_str}" alt="用户上传的图像" />'
|
182 |
+
msg = img_str + msg.replace('<image>', '').strip()
|
183 |
+
ret.append([msg, None])
|
184 |
+
else:
|
185 |
+
ret.append([msg, None])
|
186 |
+
else:
|
187 |
+
ret[-1][-1] = msg
|
188 |
+
return ret
|
189 |
+
|
190 |
+
def copy(self):
|
191 |
+
return Conversation(
|
192 |
+
system=self.system,
|
193 |
+
roles=self.roles,
|
194 |
+
messages=[[x, y] for x, y in self.messages],
|
195 |
+
offset=self.offset,
|
196 |
+
sep_style=self.sep_style,
|
197 |
+
sep=self.sep,
|
198 |
+
sep2=self.sep2,
|
199 |
+
version=self.version)
|
200 |
+
|
201 |
+
def dict(self):
|
202 |
+
if len(self.get_images()) > 0:
|
203 |
+
return {
|
204 |
+
"system": self.system,
|
205 |
+
"roles": self.roles,
|
206 |
+
"messages": [[x, y[0] if type(y) is tuple else y] for x, y in self.messages],
|
207 |
+
"offset": self.offset,
|
208 |
+
"sep": self.sep,
|
209 |
+
"sep2": self.sep2,
|
210 |
+
}
|
211 |
+
return {
|
212 |
+
"system": self.system,
|
213 |
+
"roles": self.roles,
|
214 |
+
"messages": self.messages,
|
215 |
+
"offset": self.offset,
|
216 |
+
"sep": self.sep,
|
217 |
+
"sep2": self.sep2,
|
218 |
+
}
|
219 |
+
|
220 |
+
|
221 |
+
conv_vicuna_v0 = Conversation(
|
222 |
+
system="A chat between a curious human and an artificial intelligence assistant. "
|
223 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
224 |
+
roles=("Human", "Assistant"),
|
225 |
+
messages=(
|
226 |
+
("Human", "What are the key differences between renewable and non-renewable energy sources?"),
|
227 |
+
("Assistant",
|
228 |
+
"Renewable energy sources are those that can be replenished naturally in a relatively "
|
229 |
+
"short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
|
230 |
+
"Non-renewable energy sources, on the other hand, are finite and will eventually be "
|
231 |
+
"depleted, such as coal, oil, and natural gas. Here are some key differences between "
|
232 |
+
"renewable and non-renewable energy sources:\n"
|
233 |
+
"1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
|
234 |
+
"energy sources are finite and will eventually run out.\n"
|
235 |
+
"2. Environmental impact: Renewable energy sources have a much lower environmental impact "
|
236 |
+
"than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
|
237 |
+
"and other negative effects.\n"
|
238 |
+
"3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
|
239 |
+
"have lower operational costs than non-renewable sources.\n"
|
240 |
+
"4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
|
241 |
+
"locations than non-renewable sources.\n"
|
242 |
+
"5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
|
243 |
+
"situations and needs, while non-renewable sources are more rigid and inflexible.\n"
|
244 |
+
"6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
|
245 |
+
"non-renewable sources are not, and their depletion can lead to economic and social instability.\n")
|
246 |
+
),
|
247 |
+
offset=2,
|
248 |
+
sep_style=SeparatorStyle.SINGLE,
|
249 |
+
sep="###",
|
250 |
+
)
|
251 |
+
|
252 |
+
conv_vicuna_v1 = Conversation(
|
253 |
+
system="A chat between a curious user and an artificial intelligence assistant. "
|
254 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
255 |
+
roles=("USER", "ASSISTANT"),
|
256 |
+
version="v1",
|
257 |
+
messages=(),
|
258 |
+
offset=0,
|
259 |
+
sep_style=SeparatorStyle.TWO,
|
260 |
+
sep=" ",
|
261 |
+
sep2="</s>",
|
262 |
+
)
|
263 |
+
|
264 |
+
conv_llama_2 = Conversation(
|
265 |
+
system="""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
|
266 |
+
|
267 |
+
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
|
268 |
+
roles=("USER", "ASSISTANT"),
|
269 |
+
version="llama_v2",
|
270 |
+
messages=(),
|
271 |
+
offset=0,
|
272 |
+
sep_style=SeparatorStyle.LLAMA_2,
|
273 |
+
sep="<s>",
|
274 |
+
sep2="</s>",
|
275 |
+
)
|
276 |
+
|
277 |
+
conv_llava_llama_2 = Conversation(
|
278 |
+
system="You are a helpful language and vision assistant. "
|
279 |
+
"You are able to understand the visual content that the user provides, "
|
280 |
+
"and assist the user with a variety of tasks using natural language.",
|
281 |
+
roles=("USER", "ASSISTANT"),
|
282 |
+
version="llama_v2",
|
283 |
+
messages=(),
|
284 |
+
offset=0,
|
285 |
+
sep_style=SeparatorStyle.LLAMA_2,
|
286 |
+
sep="<s>",
|
287 |
+
sep2="</s>",
|
288 |
+
)
|
289 |
+
|
290 |
+
conv_mpt = Conversation(
|
291 |
+
system="""<|im_start|>system
|
292 |
+
A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
|
293 |
+
roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
|
294 |
+
version="mpt",
|
295 |
+
messages=(),
|
296 |
+
offset=0,
|
297 |
+
sep_style=SeparatorStyle.MPT,
|
298 |
+
sep="<|im_end|>",
|
299 |
+
)
|
300 |
+
|
301 |
+
conv_llava_plain = Conversation(
|
302 |
+
system="",
|
303 |
+
roles=("", ""),
|
304 |
+
messages=(
|
305 |
+
),
|
306 |
+
offset=0,
|
307 |
+
sep_style=SeparatorStyle.PLAIN,
|
308 |
+
sep="\n",
|
309 |
+
)
|
310 |
+
|
311 |
+
conv_llava_v0 = Conversation(
|
312 |
+
system="A chat between a curious human and an artificial intelligence assistant. "
|
313 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
314 |
+
roles=("Human", "Assistant"),
|
315 |
+
messages=(
|
316 |
+
),
|
317 |
+
offset=0,
|
318 |
+
sep_style=SeparatorStyle.SINGLE,
|
319 |
+
sep="###",
|
320 |
+
)
|
321 |
+
|
322 |
+
conv_llava_v0_mmtag = Conversation(
|
323 |
+
system="A chat between a curious user and an artificial intelligence assistant. "
|
324 |
+
"The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
|
325 |
+
"The visual content will be provided with the following format: <Image>visual content</Image>.",
|
326 |
+
roles=("Human", "Assistant"),
|
327 |
+
messages=(
|
328 |
+
),
|
329 |
+
offset=0,
|
330 |
+
sep_style=SeparatorStyle.SINGLE,
|
331 |
+
sep="###",
|
332 |
+
version="v0_mmtag",
|
333 |
+
)
|
334 |
+
|
335 |
+
conv_llava_v1 = Conversation(
|
336 |
+
system="A chat between a curious human and an artificial intelligence assistant. "
|
337 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
338 |
+
roles=("USER", "ASSISTANT"),
|
339 |
+
version="v1",
|
340 |
+
messages=(),
|
341 |
+
offset=0,
|
342 |
+
sep_style=SeparatorStyle.TWO,
|
343 |
+
sep=" ",
|
344 |
+
sep2="</s>",
|
345 |
+
)
|
346 |
+
|
347 |
+
conv_llava_v1_mmtag = Conversation(
|
348 |
+
system="A chat between a curious user and an artificial intelligence assistant. "
|
349 |
+
"The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
|
350 |
+
"The visual content will be provided with the following format: <Image>visual content</Image>.",
|
351 |
+
roles=("USER", "ASSISTANT"),
|
352 |
+
messages=(),
|
353 |
+
offset=0,
|
354 |
+
sep_style=SeparatorStyle.TWO,
|
355 |
+
sep=" ",
|
356 |
+
sep2="</s>",
|
357 |
+
version="v1_mmtag",
|
358 |
+
)
|
359 |
+
|
360 |
+
default_conversation = conv_vicuna_v0
|
361 |
+
conv_templates = {
|
362 |
+
"default": conv_vicuna_v0,
|
363 |
+
"v0": conv_vicuna_v0,
|
364 |
+
"v1": conv_vicuna_v1,
|
365 |
+
"vicuna_v1": conv_vicuna_v1,
|
366 |
+
"llama_2": conv_llama_2,
|
367 |
+
|
368 |
+
"plain": conv_llava_plain,
|
369 |
+
"v0_plain": conv_llava_plain,
|
370 |
+
"llava_v0": conv_llava_v0,
|
371 |
+
"v0_mmtag": conv_llava_v0_mmtag,
|
372 |
+
"llava_v1": conv_llava_v1,
|
373 |
+
"v1_mmtag": conv_llava_v1_mmtag,
|
374 |
+
"llava_llama_2": conv_llava_llama_2,
|
375 |
+
|
376 |
+
"mpt": conv_mpt,
|
377 |
+
}
|
378 |
+
|
379 |
+
|
380 |
+
if __name__ == "__main__":
|
381 |
+
print(default_conversation.get_prompt())
|
llava/eval/eval_gpt_review.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
|
5 |
+
import openai
|
6 |
+
import tqdm
|
7 |
+
import ray
|
8 |
+
import time
|
9 |
+
|
10 |
+
NUM_SECONDS_TO_SLEEP = 3
|
11 |
+
|
12 |
+
@ray.remote(num_cpus=4)
|
13 |
+
def get_eval(content: str, max_tokens: int):
|
14 |
+
while True:
|
15 |
+
try:
|
16 |
+
response = openai.ChatCompletion.create(
|
17 |
+
model='gpt-4',
|
18 |
+
messages=[{
|
19 |
+
'role': 'system',
|
20 |
+
'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
|
21 |
+
}, {
|
22 |
+
'role': 'user',
|
23 |
+
'content': content,
|
24 |
+
}],
|
25 |
+
temperature=0.2, # TODO: figure out which temperature is best for evaluation
|
26 |
+
max_tokens=max_tokens,
|
27 |
+
)
|
28 |
+
break
|
29 |
+
except openai.error.RateLimitError:
|
30 |
+
pass
|
31 |
+
except Exception as e:
|
32 |
+
print(e)
|
33 |
+
time.sleep(NUM_SECONDS_TO_SLEEP)
|
34 |
+
|
35 |
+
print('success!')
|
36 |
+
return response['choices'][0]['message']['content']
|
37 |
+
|
38 |
+
|
39 |
+
def parse_score(review):
|
40 |
+
try:
|
41 |
+
score_pair = review.split('\n')[0]
|
42 |
+
score_pair = score_pair.replace(',', ' ')
|
43 |
+
sp = score_pair.split(' ')
|
44 |
+
if len(sp) == 2:
|
45 |
+
return [float(sp[0]), float(sp[1])]
|
46 |
+
else:
|
47 |
+
print('error', review)
|
48 |
+
return [-1, -1]
|
49 |
+
except Exception as e:
|
50 |
+
print(e)
|
51 |
+
print('error', review)
|
52 |
+
return [-1, -1]
|
53 |
+
|
54 |
+
|
55 |
+
if __name__ == '__main__':
|
56 |
+
parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
|
57 |
+
parser.add_argument('-q', '--question')
|
58 |
+
# parser.add_argument('-a', '--answer')
|
59 |
+
parser.add_argument('-a', '--answer-list', nargs='+', default=[])
|
60 |
+
parser.add_argument('-r', '--rule')
|
61 |
+
parser.add_argument('-o', '--output')
|
62 |
+
parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
|
63 |
+
args = parser.parse_args()
|
64 |
+
|
65 |
+
ray.init()
|
66 |
+
|
67 |
+
f_q = open(os.path.expanduser(args.question))
|
68 |
+
f_ans1 = open(os.path.expanduser(args.answer_list[0]))
|
69 |
+
f_ans2 = open(os.path.expanduser(args.answer_list[1]))
|
70 |
+
rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
|
71 |
+
|
72 |
+
review_file = open(f'{args.output}', 'w')
|
73 |
+
|
74 |
+
js_list = []
|
75 |
+
handles = []
|
76 |
+
idx = 0
|
77 |
+
for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
|
78 |
+
# if idx == 1:
|
79 |
+
# break
|
80 |
+
|
81 |
+
ques = json.loads(ques_js)
|
82 |
+
ans1 = json.loads(ans1_js)
|
83 |
+
ans2 = json.loads(ans2_js)
|
84 |
+
|
85 |
+
category = json.loads(ques_js)['category']
|
86 |
+
if category in rule_dict:
|
87 |
+
rule = rule_dict[category]
|
88 |
+
else:
|
89 |
+
rule = rule_dict['default']
|
90 |
+
prompt = rule['prompt']
|
91 |
+
role = rule['role']
|
92 |
+
content = (f'[Question]\n{ques["text"]}\n\n'
|
93 |
+
f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
|
94 |
+
f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
|
95 |
+
f'[System]\n{prompt}\n\n')
|
96 |
+
js_list.append({
|
97 |
+
'id': idx+1,
|
98 |
+
'question_id': ques['question_id'],
|
99 |
+
'answer1_id': ans1['answer_id'],
|
100 |
+
'answer2_id': ans2['answer_id'],
|
101 |
+
'category': category})
|
102 |
+
idx += 1
|
103 |
+
handles.append(get_eval.remote(content, args.max_tokens))
|
104 |
+
# To avoid the rate limit set by OpenAI
|
105 |
+
time.sleep(NUM_SECONDS_TO_SLEEP)
|
106 |
+
|
107 |
+
reviews = ray.get(handles)
|
108 |
+
for idx, review in enumerate(reviews):
|
109 |
+
scores = parse_score(review)
|
110 |
+
js_list[idx]['content'] = review
|
111 |
+
js_list[idx]['tuple'] = scores
|
112 |
+
review_file.write(json.dumps(js_list[idx]) + '\n')
|
113 |
+
review_file.close()
|
llava/eval/eval_gpt_review_bench.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
|
5 |
+
import openai
|
6 |
+
import time
|
7 |
+
|
8 |
+
NUM_SECONDS_TO_SLEEP = 0.5
|
9 |
+
|
10 |
+
|
11 |
+
def get_eval(content: str, max_tokens: int):
|
12 |
+
while True:
|
13 |
+
try:
|
14 |
+
response = openai.ChatCompletion.create(
|
15 |
+
model='gpt-4-0314',
|
16 |
+
messages=[{
|
17 |
+
'role': 'system',
|
18 |
+
'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
|
19 |
+
}, {
|
20 |
+
'role': 'user',
|
21 |
+
'content': content,
|
22 |
+
}],
|
23 |
+
temperature=0.2, # TODO: figure out which temperature is best for evaluation
|
24 |
+
max_tokens=max_tokens,
|
25 |
+
)
|
26 |
+
break
|
27 |
+
except openai.error.RateLimitError:
|
28 |
+
pass
|
29 |
+
except Exception as e:
|
30 |
+
print(e)
|
31 |
+
time.sleep(NUM_SECONDS_TO_SLEEP)
|
32 |
+
|
33 |
+
return response['choices'][0]['message']['content']
|
34 |
+
|
35 |
+
|
36 |
+
def parse_score(review):
|
37 |
+
try:
|
38 |
+
score_pair = review.split('\n')[0]
|
39 |
+
score_pair = score_pair.replace(',', ' ')
|
40 |
+
sp = score_pair.split(' ')
|
41 |
+
if len(sp) == 2:
|
42 |
+
return [float(sp[0]), float(sp[1])]
|
43 |
+
else:
|
44 |
+
print('error', review)
|
45 |
+
return [-1, -1]
|
46 |
+
except Exception as e:
|
47 |
+
print(e)
|
48 |
+
print('error', review)
|
49 |
+
return [-1, -1]
|
50 |
+
|
51 |
+
|
52 |
+
if __name__ == '__main__':
|
53 |
+
parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
|
54 |
+
parser.add_argument('-q', '--question')
|
55 |
+
parser.add_argument('-c', '--context')
|
56 |
+
parser.add_argument('-a', '--answer-list', nargs='+', default=[])
|
57 |
+
parser.add_argument('-r', '--rule')
|
58 |
+
parser.add_argument('-o', '--output')
|
59 |
+
parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
|
60 |
+
args = parser.parse_args()
|
61 |
+
|
62 |
+
f_q = open(os.path.expanduser(args.question))
|
63 |
+
f_ans1 = open(os.path.expanduser(args.answer_list[0]))
|
64 |
+
f_ans2 = open(os.path.expanduser(args.answer_list[1]))
|
65 |
+
rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
|
66 |
+
|
67 |
+
if os.path.isfile(os.path.expanduser(args.output)):
|
68 |
+
cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))]
|
69 |
+
else:
|
70 |
+
cur_reviews = []
|
71 |
+
|
72 |
+
review_file = open(f'{args.output}', 'a')
|
73 |
+
|
74 |
+
context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
|
75 |
+
image_to_context = {context['image']: context for context in context_list}
|
76 |
+
|
77 |
+
handles = []
|
78 |
+
idx = 0
|
79 |
+
for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
|
80 |
+
ques = json.loads(ques_js)
|
81 |
+
ans1 = json.loads(ans1_js)
|
82 |
+
ans2 = json.loads(ans2_js)
|
83 |
+
|
84 |
+
inst = image_to_context[ques['image']]
|
85 |
+
|
86 |
+
if isinstance(inst['caption'], list):
|
87 |
+
cap_str = '\n'.join(inst['caption'])
|
88 |
+
else:
|
89 |
+
cap_str = inst['caption']
|
90 |
+
|
91 |
+
category = 'llava_bench_' + json.loads(ques_js)['category']
|
92 |
+
if category in rule_dict:
|
93 |
+
rule = rule_dict[category]
|
94 |
+
else:
|
95 |
+
assert False, f"Visual QA category not found in rule file: {category}."
|
96 |
+
prompt = rule['prompt']
|
97 |
+
role = rule['role']
|
98 |
+
content = (f'[Context]\n{cap_str}\n\n'
|
99 |
+
f'[Question]\n{ques["text"]}\n\n'
|
100 |
+
f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
|
101 |
+
f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
|
102 |
+
f'[System]\n{prompt}\n\n')
|
103 |
+
cur_js = {
|
104 |
+
'id': idx+1,
|
105 |
+
'question_id': ques['question_id'],
|
106 |
+
'answer1_id': ans1.get('answer_id', ans1['question_id']),
|
107 |
+
'answer2_id': ans2.get('answer_id', ans2['answer_id']),
|
108 |
+
'category': category
|
109 |
+
}
|
110 |
+
if idx >= len(cur_reviews):
|
111 |
+
review = get_eval(content, args.max_tokens)
|
112 |
+
scores = parse_score(review)
|
113 |
+
cur_js['content'] = review
|
114 |
+
cur_js['tuple'] = scores
|
115 |
+
review_file.write(json.dumps(cur_js) + '\n')
|
116 |
+
review_file.flush()
|
117 |
+
else:
|
118 |
+
print(f'Skipping {idx} as we already have it.')
|
119 |
+
idx += 1
|
120 |
+
print(idx)
|
121 |
+
review_file.close()
|
llava/eval/eval_gpt_review_visual.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
|
5 |
+
import openai
|
6 |
+
import time
|
7 |
+
|
8 |
+
NUM_SECONDS_TO_SLEEP = 0.5
|
9 |
+
|
10 |
+
|
11 |
+
def get_eval(content: str, max_tokens: int):
|
12 |
+
while True:
|
13 |
+
try:
|
14 |
+
response = openai.ChatCompletion.create(
|
15 |
+
model='gpt-4-0314',
|
16 |
+
messages=[{
|
17 |
+
'role': 'system',
|
18 |
+
'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
|
19 |
+
}, {
|
20 |
+
'role': 'user',
|
21 |
+
'content': content,
|
22 |
+
}],
|
23 |
+
temperature=0.2, # TODO: figure out which temperature is best for evaluation
|
24 |
+
max_tokens=max_tokens,
|
25 |
+
)
|
26 |
+
break
|
27 |
+
except openai.error.RateLimitError:
|
28 |
+
pass
|
29 |
+
except Exception as e:
|
30 |
+
print(e)
|
31 |
+
time.sleep(NUM_SECONDS_TO_SLEEP)
|
32 |
+
|
33 |
+
return response['choices'][0]['message']['content']
|
34 |
+
|
35 |
+
|
36 |
+
def parse_score(review):
|
37 |
+
try:
|
38 |
+
score_pair = review.split('\n')[0]
|
39 |
+
score_pair = score_pair.replace(',', ' ')
|
40 |
+
sp = score_pair.split(' ')
|
41 |
+
if len(sp) == 2:
|
42 |
+
return [float(sp[0]), float(sp[1])]
|
43 |
+
else:
|
44 |
+
print('error', review)
|
45 |
+
return [-1, -1]
|
46 |
+
except Exception as e:
|
47 |
+
print(e)
|
48 |
+
print('error', review)
|
49 |
+
return [-1, -1]
|
50 |
+
|
51 |
+
|
52 |
+
if __name__ == '__main__':
|
53 |
+
parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
|
54 |
+
parser.add_argument('-q', '--question')
|
55 |
+
parser.add_argument('-c', '--context')
|
56 |
+
parser.add_argument('-a', '--answer-list', nargs='+', default=[])
|
57 |
+
parser.add_argument('-r', '--rule')
|
58 |
+
parser.add_argument('-o', '--output')
|
59 |
+
parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
|
60 |
+
args = parser.parse_args()
|
61 |
+
|
62 |
+
f_q = open(os.path.expanduser(args.question))
|
63 |
+
f_ans1 = open(os.path.expanduser(args.answer_list[0]))
|
64 |
+
f_ans2 = open(os.path.expanduser(args.answer_list[1]))
|
65 |
+
rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
|
66 |
+
|
67 |
+
if os.path.isfile(os.path.expanduser(args.output)):
|
68 |
+
cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))]
|
69 |
+
else:
|
70 |
+
cur_reviews = []
|
71 |
+
|
72 |
+
review_file = open(f'{args.output}', 'a')
|
73 |
+
|
74 |
+
context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
|
75 |
+
image_to_context = {context['image']: context for context in context_list}
|
76 |
+
|
77 |
+
handles = []
|
78 |
+
idx = 0
|
79 |
+
for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
|
80 |
+
ques = json.loads(ques_js)
|
81 |
+
ans1 = json.loads(ans1_js)
|
82 |
+
ans2 = json.loads(ans2_js)
|
83 |
+
|
84 |
+
inst = image_to_context[ques['image']]
|
85 |
+
cap_str = '\n'.join(inst['captions'])
|
86 |
+
box_str = '\n'.join([f'{instance["category"]}: {instance["bbox"]}' for instance in inst['instances']])
|
87 |
+
|
88 |
+
category = json.loads(ques_js)['category']
|
89 |
+
if category in rule_dict:
|
90 |
+
rule = rule_dict[category]
|
91 |
+
else:
|
92 |
+
assert False, f"Visual QA category not found in rule file: {category}."
|
93 |
+
prompt = rule['prompt']
|
94 |
+
role = rule['role']
|
95 |
+
content = (f'[Context]\n{cap_str}\n\n{box_str}\n\n'
|
96 |
+
f'[Question]\n{ques["text"]}\n\n'
|
97 |
+
f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
|
98 |
+
f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
|
99 |
+
f'[System]\n{prompt}\n\n')
|
100 |
+
cur_js = {
|
101 |
+
'id': idx+1,
|
102 |
+
'question_id': ques['question_id'],
|
103 |
+
'answer1_id': ans1.get('answer_id', ans1['question_id']),
|
104 |
+
'answer2_id': ans2.get('answer_id', ans2['answer_id']),
|
105 |
+
'category': category
|
106 |
+
}
|
107 |
+
if idx >= len(cur_reviews):
|
108 |
+
review = get_eval(content, args.max_tokens)
|
109 |
+
scores = parse_score(review)
|
110 |
+
cur_js['content'] = review
|
111 |
+
cur_js['tuple'] = scores
|
112 |
+
review_file.write(json.dumps(cur_js) + '\n')
|
113 |
+
review_file.flush()
|
114 |
+
else:
|
115 |
+
print(f'Skipping {idx} as we already have it.')
|
116 |
+
idx += 1
|
117 |
+
print(idx)
|
118 |
+
review_file.close()
|
llava/eval/eval_pope.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import argparse
|
4 |
+
|
5 |
+
def eval_pope(answers, label_file):
|
6 |
+
label_list = [json.loads(q)['label'] for q in open(label_file, 'r')]
|
7 |
+
|
8 |
+
for answer in answers:
|
9 |
+
text = answer['text']
|
10 |
+
|
11 |
+
# Only keep the first sentence
|
12 |
+
if text.find('.') != -1:
|
13 |
+
text = text.split('.')[0]
|
14 |
+
|
15 |
+
text = text.replace(',', '')
|
16 |
+
words = text.split(' ')
|
17 |
+
if 'No' in words or 'not' in words or 'no' in words:
|
18 |
+
answer['text'] = 'no'
|
19 |
+
else:
|
20 |
+
answer['text'] = 'yes'
|
21 |
+
|
22 |
+
for i in range(len(label_list)):
|
23 |
+
if label_list[i] == 'no':
|
24 |
+
label_list[i] = 0
|
25 |
+
else:
|
26 |
+
label_list[i] = 1
|
27 |
+
|
28 |
+
pred_list = []
|
29 |
+
for answer in answers:
|
30 |
+
if answer['text'] == 'no':
|
31 |
+
pred_list.append(0)
|
32 |
+
else:
|
33 |
+
pred_list.append(1)
|
34 |
+
|
35 |
+
pos = 1
|
36 |
+
neg = 0
|
37 |
+
yes_ratio = pred_list.count(1) / len(pred_list)
|
38 |
+
|
39 |
+
TP, TN, FP, FN = 0, 0, 0, 0
|
40 |
+
for pred, label in zip(pred_list, label_list):
|
41 |
+
if pred == pos and label == pos:
|
42 |
+
TP += 1
|
43 |
+
elif pred == pos and label == neg:
|
44 |
+
FP += 1
|
45 |
+
elif pred == neg and label == neg:
|
46 |
+
TN += 1
|
47 |
+
elif pred == neg and label == pos:
|
48 |
+
FN += 1
|
49 |
+
|
50 |
+
print('TP\tFP\tTN\tFN\t')
|
51 |
+
print('{}\t{}\t{}\t{}'.format(TP, FP, TN, FN))
|
52 |
+
|
53 |
+
precision = float(TP) / float(TP + FP)
|
54 |
+
recall = float(TP) / float(TP + FN)
|
55 |
+
f1 = 2*precision*recall / (precision + recall)
|
56 |
+
acc = (TP + TN) / (TP + TN + FP + FN)
|
57 |
+
print('Accuracy: {}'.format(acc))
|
58 |
+
print('Precision: {}'.format(precision))
|
59 |
+
print('Recall: {}'.format(recall))
|
60 |
+
print('F1 score: {}'.format(f1))
|
61 |
+
print('Yes ratio: {}'.format(yes_ratio))
|
62 |
+
print('%.3f, %.3f, %.3f, %.3f, %.3f' % (f1, acc, precision, recall, yes_ratio) )
|
63 |
+
|
64 |
+
if __name__ == "__main__":
|
65 |
+
parser = argparse.ArgumentParser()
|
66 |
+
parser.add_argument("--annotation-dir", type=str)
|
67 |
+
parser.add_argument("--question-file", type=str)
|
68 |
+
parser.add_argument("--result-file", type=str)
|
69 |
+
args = parser.parse_args()
|
70 |
+
|
71 |
+
questions = [json.loads(line) for line in open(args.question_file)]
|
72 |
+
questions = {question['question_id']: question for question in questions}
|
73 |
+
answers = [json.loads(q) for q in open(args.result_file)]
|
74 |
+
for file in os.listdir(args.annotation_dir):
|
75 |
+
assert file.startswith('coco_pope_')
|
76 |
+
assert file.endswith('.json')
|
77 |
+
category = file[10:-5]
|
78 |
+
cur_answers = [x for x in answers if questions[x['question_id']]['category'] == category]
|
79 |
+
print('Category: {}, # samples: {}'.format(category, len(cur_answers)))
|
80 |
+
eval_pope(cur_answers, os.path.join(args.annotation_dir, file))
|
81 |
+
print("====================================")
|
llava/eval/eval_science_qa.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
import random
|
6 |
+
|
7 |
+
|
8 |
+
def get_args():
|
9 |
+
parser = argparse.ArgumentParser()
|
10 |
+
parser.add_argument('--base-dir', type=str)
|
11 |
+
parser.add_argument('--result-file', type=str)
|
12 |
+
parser.add_argument('--output-file', type=str)
|
13 |
+
parser.add_argument('--output-result', type=str)
|
14 |
+
parser.add_argument('--split', type=str, default='test')
|
15 |
+
parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
|
16 |
+
return parser.parse_args()
|
17 |
+
|
18 |
+
|
19 |
+
def convert_caps(results):
|
20 |
+
fakecaps = []
|
21 |
+
for result in results:
|
22 |
+
image_id = result['question_id']
|
23 |
+
caption = result['text']
|
24 |
+
fakecaps.append({"image_id": int(image_id), "caption": caption})
|
25 |
+
return fakecaps
|
26 |
+
|
27 |
+
|
28 |
+
def get_pred_idx(prediction, choices, options):
|
29 |
+
"""
|
30 |
+
Get the index (e.g. 2) from the prediction (e.g. 'C')
|
31 |
+
"""
|
32 |
+
if prediction in options[:len(choices)]:
|
33 |
+
return options.index(prediction)
|
34 |
+
else:
|
35 |
+
return -1
|
36 |
+
return random.choice(range(len(choices)))
|
37 |
+
|
38 |
+
|
39 |
+
if __name__ == "__main__":
|
40 |
+
args = get_args()
|
41 |
+
|
42 |
+
base_dir = args.base_dir
|
43 |
+
split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
|
44 |
+
problems = json.load(open(os.path.join(base_dir, "problems.json")))
|
45 |
+
predictions = [json.loads(line) for line in open(args.result_file)]
|
46 |
+
predictions = {pred['question_id']: pred for pred in predictions}
|
47 |
+
split_problems = {idx: problems[idx] for idx in split_indices}
|
48 |
+
|
49 |
+
results = {'correct': [], 'incorrect': []}
|
50 |
+
sqa_results = {}
|
51 |
+
sqa_results['acc'] = None
|
52 |
+
sqa_results['correct'] = None
|
53 |
+
sqa_results['count'] = None
|
54 |
+
sqa_results['results'] = {}
|
55 |
+
sqa_results['outputs'] = {}
|
56 |
+
|
57 |
+
for prob_id, prob in split_problems.items():
|
58 |
+
if prob_id not in predictions:
|
59 |
+
pred = {'text': 'FAILED', 'prompt': 'Unknown'}
|
60 |
+
pred_text = 'FAILED'
|
61 |
+
else:
|
62 |
+
pred = predictions[prob_id]
|
63 |
+
pred_text = pred['text']
|
64 |
+
|
65 |
+
if pred_text in args.options:
|
66 |
+
answer = pred_text
|
67 |
+
elif len(pred_text) >= 3 and pred_text[0] in args.options and pred_text[1:3] == ". ":
|
68 |
+
answer = pred_text[0]
|
69 |
+
else:
|
70 |
+
pattern = re.compile(r'The answer is ([A-Z]).')
|
71 |
+
res = pattern.findall(pred_text)
|
72 |
+
if len(res) == 1:
|
73 |
+
answer = res[0] # 'A', 'B', ...
|
74 |
+
else:
|
75 |
+
answer = "FAILED"
|
76 |
+
|
77 |
+
pred_idx = get_pred_idx(answer, prob['choices'], args.options)
|
78 |
+
|
79 |
+
analysis = {
|
80 |
+
'question_id': prob_id,
|
81 |
+
'parsed_ans': answer,
|
82 |
+
'ground_truth': args.options[prob['answer']],
|
83 |
+
'question': pred['prompt'],
|
84 |
+
'pred': pred_text,
|
85 |
+
'is_multimodal': '<image>' in pred['prompt'],
|
86 |
+
}
|
87 |
+
|
88 |
+
sqa_results['results'][prob_id] = get_pred_idx(answer, prob['choices'], args.options)
|
89 |
+
sqa_results['outputs'][prob_id] = pred_text
|
90 |
+
|
91 |
+
if pred_idx == prob['answer']:
|
92 |
+
results['correct'].append(analysis)
|
93 |
+
else:
|
94 |
+
results['incorrect'].append(analysis)
|
95 |
+
|
96 |
+
correct = len(results['correct'])
|
97 |
+
total = len(results['correct']) + len(results['incorrect'])
|
98 |
+
|
99 |
+
###### IMG ######
|
100 |
+
multimodal_correct = len([x for x in results['correct'] if x['is_multimodal']])
|
101 |
+
multimodal_incorrect = len([x for x in results['incorrect'] if x['is_multimodal']])
|
102 |
+
multimodal_total = multimodal_correct + multimodal_incorrect
|
103 |
+
###### IMG ######
|
104 |
+
|
105 |
+
print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%, IMG-Accuracy: {multimodal_correct / multimodal_total * 100:.2f}%')
|
106 |
+
|
107 |
+
sqa_results['acc'] = correct / total * 100
|
108 |
+
sqa_results['correct'] = correct
|
109 |
+
sqa_results['count'] = total
|
110 |
+
|
111 |
+
with open(args.output_file, 'w') as f:
|
112 |
+
json.dump(results, f, indent=2)
|
113 |
+
with open(args.output_result, 'w') as f:
|
114 |
+
json.dump(sqa_results, f, indent=2)
|
llava/eval/eval_science_qa_gpt4.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
import random
|
6 |
+
from collections import defaultdict
|
7 |
+
|
8 |
+
|
9 |
+
def get_args():
|
10 |
+
parser = argparse.ArgumentParser()
|
11 |
+
parser.add_argument('--base-dir', type=str)
|
12 |
+
parser.add_argument('--gpt4-result', type=str)
|
13 |
+
parser.add_argument('--our-result', type=str)
|
14 |
+
parser.add_argument('--split', type=str, default='test')
|
15 |
+
parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
|
16 |
+
return parser.parse_args()
|
17 |
+
|
18 |
+
|
19 |
+
def convert_caps(results):
|
20 |
+
fakecaps = []
|
21 |
+
for result in results:
|
22 |
+
image_id = result['question_id']
|
23 |
+
caption = result['text']
|
24 |
+
fakecaps.append({"image_id": int(image_id), "caption": caption})
|
25 |
+
return fakecaps
|
26 |
+
|
27 |
+
|
28 |
+
def get_pred_idx(prediction, choices, options):
|
29 |
+
"""
|
30 |
+
Get the index (e.g. 2) from the prediction (e.g. 'C')
|
31 |
+
"""
|
32 |
+
if prediction in options[:len(choices)]:
|
33 |
+
return options.index(prediction)
|
34 |
+
else:
|
35 |
+
return random.choice(range(len(choices)))
|
36 |
+
|
37 |
+
|
38 |
+
if __name__ == "__main__":
|
39 |
+
args = get_args()
|
40 |
+
|
41 |
+
base_dir = args.base_dir
|
42 |
+
split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
|
43 |
+
problems = json.load(open(os.path.join(base_dir, "problems.json")))
|
44 |
+
our_predictions = [json.loads(line) for line in open(args.our_result)]
|
45 |
+
our_predictions = {pred['question_id']: pred for pred in our_predictions}
|
46 |
+
split_problems = {idx: problems[idx] for idx in split_indices}
|
47 |
+
|
48 |
+
gpt4_predictions = json.load(open(args.gpt4_result))['outputs']
|
49 |
+
|
50 |
+
results = defaultdict(lambda: 0)
|
51 |
+
|
52 |
+
for prob_id, prob in split_problems.items():
|
53 |
+
if prob_id not in our_predictions:
|
54 |
+
continue
|
55 |
+
if prob_id not in gpt4_predictions:
|
56 |
+
continue
|
57 |
+
our_pred = our_predictions[prob_id]['text']
|
58 |
+
gpt4_pred = gpt4_predictions[prob_id]
|
59 |
+
|
60 |
+
pattern = re.compile(r'The answer is ([A-Z]).')
|
61 |
+
our_res = pattern.findall(our_pred)
|
62 |
+
if len(our_res) == 1:
|
63 |
+
our_answer = our_res[0] # 'A', 'B', ...
|
64 |
+
else:
|
65 |
+
our_answer = "FAILED"
|
66 |
+
gpt4_res = pattern.findall(gpt4_pred)
|
67 |
+
if len(gpt4_res) == 1:
|
68 |
+
gpt4_answer = gpt4_res[0] # 'A', 'B', ...
|
69 |
+
else:
|
70 |
+
gpt4_answer = "FAILED"
|
71 |
+
|
72 |
+
our_pred_idx = get_pred_idx(our_answer, prob['choices'], args.options)
|
73 |
+
gpt4_pred_idx = get_pred_idx(gpt4_answer, prob['choices'], args.options)
|
74 |
+
|
75 |
+
if gpt4_answer == 'FAILED':
|
76 |
+
results['gpt4_failed'] += 1
|
77 |
+
# continue
|
78 |
+
gpt4_pred_idx = our_pred_idx
|
79 |
+
# if our_pred_idx != prob['answer']:
|
80 |
+
# print(our_predictions[prob_id]['prompt'])
|
81 |
+
# print('-----------------')
|
82 |
+
# print(f'LECTURE: {prob["lecture"]}')
|
83 |
+
# print(f'SOLUTION: {prob["solution"]}')
|
84 |
+
# print('=====================')
|
85 |
+
else:
|
86 |
+
# continue
|
87 |
+
pass
|
88 |
+
# gpt4_pred_idx = our_pred_idx
|
89 |
+
|
90 |
+
if gpt4_pred_idx == prob['answer']:
|
91 |
+
results['correct'] += 1
|
92 |
+
else:
|
93 |
+
results['incorrect'] += 1
|
94 |
+
|
95 |
+
|
96 |
+
if gpt4_pred_idx == prob['answer'] or our_pred_idx == prob['answer']:
|
97 |
+
results['correct_upperbound'] += 1
|
98 |
+
|
99 |
+
correct = results['correct']
|
100 |
+
total = results['correct'] + results['incorrect']
|
101 |
+
print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%')
|
102 |
+
print(f'Total: {total}, Correct (upper): {results["correct_upperbound"]}, Accuracy: {results["correct_upperbound"] / total * 100:.2f}%')
|
103 |
+
print(f'Total: {total}, GPT-4 NO-ANS (RANDOM): {results["gpt4_failed"]}, Percentage: {results["gpt4_failed"] / total * 100:.2f}%')
|
104 |
+
|
llava/eval/eval_science_qa_gpt4_requery.py
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
import random
|
6 |
+
from collections import defaultdict
|
7 |
+
|
8 |
+
|
9 |
+
def get_args():
|
10 |
+
parser = argparse.ArgumentParser()
|
11 |
+
parser.add_argument('--base-dir', type=str)
|
12 |
+
parser.add_argument('--gpt4-result', type=str)
|
13 |
+
parser.add_argument('--requery-result', type=str)
|
14 |
+
parser.add_argument('--our-result', type=str)
|
15 |
+
parser.add_argument('--output-result', type=str)
|
16 |
+
parser.add_argument('--split', type=str, default='test')
|
17 |
+
parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
|
18 |
+
return parser.parse_args()
|
19 |
+
|
20 |
+
|
21 |
+
def convert_caps(results):
|
22 |
+
fakecaps = []
|
23 |
+
for result in results:
|
24 |
+
image_id = result['question_id']
|
25 |
+
caption = result['text']
|
26 |
+
fakecaps.append({"image_id": int(image_id), "caption": caption})
|
27 |
+
return fakecaps
|
28 |
+
|
29 |
+
|
30 |
+
def get_pred_idx(prediction, choices, options):
|
31 |
+
"""
|
32 |
+
Get the index (e.g. 2) from the prediction (e.g. 'C')
|
33 |
+
"""
|
34 |
+
if prediction in options[:len(choices)]:
|
35 |
+
return options.index(prediction)
|
36 |
+
else:
|
37 |
+
return random.choice(range(len(choices)))
|
38 |
+
|
39 |
+
|
40 |
+
if __name__ == "__main__":
|
41 |
+
args = get_args()
|
42 |
+
|
43 |
+
base_dir = args.base_dir
|
44 |
+
split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
|
45 |
+
problems = json.load(open(os.path.join(base_dir, "problems.json")))
|
46 |
+
our_predictions = [json.loads(line) for line in open(args.our_result)]
|
47 |
+
our_predictions = {pred['question_id']: pred for pred in our_predictions}
|
48 |
+
split_problems = {idx: problems[idx] for idx in split_indices}
|
49 |
+
|
50 |
+
requery_predictions = [json.loads(line) for line in open(args.requery_result)]
|
51 |
+
requery_predictions = {pred['question_id']: pred for pred in requery_predictions}
|
52 |
+
|
53 |
+
gpt4_predictions = json.load(open(args.gpt4_result))['outputs']
|
54 |
+
|
55 |
+
results = defaultdict(lambda: 0)
|
56 |
+
|
57 |
+
sqa_results = {}
|
58 |
+
sqa_results['acc'] = None
|
59 |
+
sqa_results['correct'] = None
|
60 |
+
sqa_results['count'] = None
|
61 |
+
sqa_results['results'] = {}
|
62 |
+
sqa_results['outputs'] = {}
|
63 |
+
|
64 |
+
for prob_id, prob in split_problems.items():
|
65 |
+
if prob_id not in our_predictions:
|
66 |
+
assert False
|
67 |
+
if prob_id not in gpt4_predictions:
|
68 |
+
assert False
|
69 |
+
our_pred = our_predictions[prob_id]['text']
|
70 |
+
gpt4_pred = gpt4_predictions[prob_id]
|
71 |
+
if prob_id not in requery_predictions:
|
72 |
+
results['missing_requery'] += 1
|
73 |
+
requery_pred = "MISSING"
|
74 |
+
else:
|
75 |
+
requery_pred = requery_predictions[prob_id]['text']
|
76 |
+
|
77 |
+
pattern = re.compile(r'The answer is ([A-Z]).')
|
78 |
+
our_res = pattern.findall(our_pred)
|
79 |
+
if len(our_res) == 1:
|
80 |
+
our_answer = our_res[0] # 'A', 'B', ...
|
81 |
+
else:
|
82 |
+
our_answer = "FAILED"
|
83 |
+
|
84 |
+
requery_res = pattern.findall(requery_pred)
|
85 |
+
if len(requery_res) == 1:
|
86 |
+
requery_answer = requery_res[0] # 'A', 'B', ...
|
87 |
+
else:
|
88 |
+
requery_answer = "FAILED"
|
89 |
+
|
90 |
+
gpt4_res = pattern.findall(gpt4_pred)
|
91 |
+
if len(gpt4_res) == 1:
|
92 |
+
gpt4_answer = gpt4_res[0] # 'A', 'B', ...
|
93 |
+
else:
|
94 |
+
gpt4_answer = "FAILED"
|
95 |
+
|
96 |
+
our_pred_idx = get_pred_idx(our_answer, prob['choices'], args.options)
|
97 |
+
gpt4_pred_idx = get_pred_idx(gpt4_answer, prob['choices'], args.options)
|
98 |
+
requery_pred_idx = get_pred_idx(requery_answer, prob['choices'], args.options)
|
99 |
+
|
100 |
+
results['total'] += 1
|
101 |
+
|
102 |
+
if gpt4_answer == 'FAILED':
|
103 |
+
results['gpt4_failed'] += 1
|
104 |
+
if gpt4_pred_idx == prob['answer']:
|
105 |
+
results['gpt4_correct'] += 1
|
106 |
+
if our_pred_idx == prob['answer']:
|
107 |
+
results['gpt4_ourvisual_correct'] += 1
|
108 |
+
elif gpt4_pred_idx == prob['answer']:
|
109 |
+
results['gpt4_correct'] += 1
|
110 |
+
results['gpt4_ourvisual_correct'] += 1
|
111 |
+
|
112 |
+
if our_pred_idx == prob['answer']:
|
113 |
+
results['our_correct'] += 1
|
114 |
+
|
115 |
+
if requery_answer == 'FAILED':
|
116 |
+
sqa_results['results'][prob_id] = our_pred_idx
|
117 |
+
if our_pred_idx == prob['answer']:
|
118 |
+
results['requery_correct'] += 1
|
119 |
+
else:
|
120 |
+
sqa_results['results'][prob_id] = requery_pred_idx
|
121 |
+
if requery_pred_idx == prob['answer']:
|
122 |
+
results['requery_correct'] += 1
|
123 |
+
else:
|
124 |
+
print(f"""
|
125 |
+
Question ({args.options[prob['answer']]}): {our_predictions[prob_id]['prompt']}
|
126 |
+
Our ({our_answer}): {our_pred}
|
127 |
+
GPT-4 ({gpt4_answer}): {gpt4_pred}
|
128 |
+
Requery ({requery_answer}): {requery_pred}
|
129 |
+
print("=====================================")
|
130 |
+
""")
|
131 |
+
|
132 |
+
if gpt4_pred_idx == prob['answer'] or our_pred_idx == prob['answer']:
|
133 |
+
results['correct_upperbound'] += 1
|
134 |
+
|
135 |
+
total = results['total']
|
136 |
+
print(f'Total: {total}, Our-Correct: {results["our_correct"]}, Accuracy: {results["our_correct"] / total * 100:.2f}%')
|
137 |
+
print(f'Total: {total}, GPT-4-Correct: {results["gpt4_correct"]}, Accuracy: {results["gpt4_correct"] / total * 100:.2f}%')
|
138 |
+
print(f'Total: {total}, GPT-4 NO-ANS (RANDOM): {results["gpt4_failed"]}, Percentage: {results["gpt4_failed"] / total * 100:.2f}%')
|
139 |
+
print(f'Total: {total}, GPT-4-OursVisual-Correct: {results["gpt4_ourvisual_correct"]}, Accuracy: {results["gpt4_ourvisual_correct"] / total * 100:.2f}%')
|
140 |
+
print(f'Total: {total}, Requery-Correct: {results["requery_correct"]}, Accuracy: {results["requery_correct"] / total * 100:.2f}%')
|
141 |
+
print(f'Total: {total}, Correct upper: {results["correct_upperbound"]}, Accuracy: {results["correct_upperbound"] / total * 100:.2f}%')
|
142 |
+
|
143 |
+
sqa_results['acc'] = results["requery_correct"] / total * 100
|
144 |
+
sqa_results['correct'] = results["requery_correct"]
|
145 |
+
sqa_results['count'] = total
|
146 |
+
|
147 |
+
with open(args.output_result, 'w') as f:
|
148 |
+
json.dump(sqa_results, f, indent=2)
|
149 |
+
|
llava/eval/eval_textvqa.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import argparse
|
3 |
+
import json
|
4 |
+
import re
|
5 |
+
|
6 |
+
from llava.eval.m4c_evaluator import TextVQAAccuracyEvaluator
|
7 |
+
|
8 |
+
|
9 |
+
def get_args():
|
10 |
+
parser = argparse.ArgumentParser()
|
11 |
+
parser.add_argument('--annotation-file', type=str)
|
12 |
+
parser.add_argument('--result-file', type=str)
|
13 |
+
parser.add_argument('--result-dir', type=str)
|
14 |
+
return parser.parse_args()
|
15 |
+
|
16 |
+
|
17 |
+
def prompt_processor(prompt):
|
18 |
+
if prompt.startswith('OCR tokens: '):
|
19 |
+
pattern = r"Question: (.*?) Short answer:"
|
20 |
+
match = re.search(pattern, prompt, re.DOTALL)
|
21 |
+
question = match.group(1)
|
22 |
+
elif 'Reference OCR token: ' in prompt and len(prompt.split('\n')) == 3:
|
23 |
+
if prompt.startswith('Reference OCR token:'):
|
24 |
+
question = prompt.split('\n')[1]
|
25 |
+
else:
|
26 |
+
question = prompt.split('\n')[0]
|
27 |
+
elif len(prompt.split('\n')) == 2:
|
28 |
+
question = prompt.split('\n')[0]
|
29 |
+
else:
|
30 |
+
assert False
|
31 |
+
|
32 |
+
return question.lower()
|
33 |
+
|
34 |
+
|
35 |
+
def eval_single(annotation_file, result_file):
|
36 |
+
experiment_name = os.path.splitext(os.path.basename(result_file))[0]
|
37 |
+
print(experiment_name)
|
38 |
+
annotations = json.load(open(annotation_file))['data']
|
39 |
+
annotations = {(annotation['image_id'], annotation['question'].lower()): annotation for annotation in annotations}
|
40 |
+
results = [json.loads(line) for line in open(result_file)]
|
41 |
+
|
42 |
+
pred_list = []
|
43 |
+
for result in results:
|
44 |
+
annotation = annotations[(result['question_id'], prompt_processor(result['prompt']))]
|
45 |
+
pred_list.append({
|
46 |
+
"pred_answer": result['text'],
|
47 |
+
"gt_answers": annotation['answers'],
|
48 |
+
})
|
49 |
+
|
50 |
+
evaluator = TextVQAAccuracyEvaluator()
|
51 |
+
print('Samples: {}\nAccuracy: {:.2f}%\n'.format(len(pred_list), 100. * evaluator.eval_pred_list(pred_list)))
|
52 |
+
|
53 |
+
|
54 |
+
if __name__ == "__main__":
|
55 |
+
args = get_args()
|
56 |
+
|
57 |
+
if args.result_file is not None:
|
58 |
+
eval_single(args.annotation_file, args.result_file)
|
59 |
+
|
60 |
+
if args.result_dir is not None:
|
61 |
+
for result_file in sorted(os.listdir(args.result_dir)):
|
62 |
+
if not result_file.endswith('.jsonl'):
|
63 |
+
print(f'Skipping {result_file}')
|
64 |
+
continue
|
65 |
+
eval_single(args.annotation_file, os.path.join(args.result_dir, result_file))
|
llava/eval/generate_webpage_data_from_table.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Generate json file for webpage."""
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
|
6 |
+
# models = ['llama', 'alpaca', 'gpt35', 'bard']
|
7 |
+
models = ['vicuna']
|
8 |
+
|
9 |
+
|
10 |
+
def read_jsonl(path: str, key: str=None):
|
11 |
+
data = []
|
12 |
+
with open(os.path.expanduser(path)) as f:
|
13 |
+
for line in f:
|
14 |
+
if not line:
|
15 |
+
continue
|
16 |
+
data.append(json.loads(line))
|
17 |
+
if key is not None:
|
18 |
+
data.sort(key=lambda x: x[key])
|
19 |
+
data = {item[key]: item for item in data}
|
20 |
+
return data
|
21 |
+
|
22 |
+
|
23 |
+
def trim_hanging_lines(s: str, n: int) -> str:
|
24 |
+
s = s.strip()
|
25 |
+
for _ in range(n):
|
26 |
+
s = s.split('\n', 1)[1].strip()
|
27 |
+
return s
|
28 |
+
|
29 |
+
|
30 |
+
if __name__ == '__main__':
|
31 |
+
questions = read_jsonl('table/question.jsonl', key='question_id')
|
32 |
+
|
33 |
+
# alpaca_answers = read_jsonl('table/answer/answer_alpaca-13b.jsonl', key='question_id')
|
34 |
+
# bard_answers = read_jsonl('table/answer/answer_bard.jsonl', key='question_id')
|
35 |
+
# gpt35_answers = read_jsonl('table/answer/answer_gpt35.jsonl', key='question_id')
|
36 |
+
# llama_answers = read_jsonl('table/answer/answer_llama-13b.jsonl', key='question_id')
|
37 |
+
vicuna_answers = read_jsonl('table/answer/answer_vicuna-13b.jsonl', key='question_id')
|
38 |
+
ours_answers = read_jsonl('table/results/llama-13b-hf-alpaca.jsonl', key='question_id')
|
39 |
+
|
40 |
+
review_vicuna = read_jsonl('table/review/review_vicuna-13b_llama-13b-hf-alpaca.jsonl', key='question_id')
|
41 |
+
# review_alpaca = read_jsonl('table/review/review_alpaca-13b_vicuna-13b.jsonl', key='question_id')
|
42 |
+
# review_bard = read_jsonl('table/review/review_bard_vicuna-13b.jsonl', key='question_id')
|
43 |
+
# review_gpt35 = read_jsonl('table/review/review_gpt35_vicuna-13b.jsonl', key='question_id')
|
44 |
+
# review_llama = read_jsonl('table/review/review_llama-13b_vicuna-13b.jsonl', key='question_id')
|
45 |
+
|
46 |
+
records = []
|
47 |
+
for qid in questions.keys():
|
48 |
+
r = {
|
49 |
+
'id': qid,
|
50 |
+
'category': questions[qid]['category'],
|
51 |
+
'question': questions[qid]['text'],
|
52 |
+
'answers': {
|
53 |
+
# 'alpaca': alpaca_answers[qid]['text'],
|
54 |
+
# 'llama': llama_answers[qid]['text'],
|
55 |
+
# 'bard': bard_answers[qid]['text'],
|
56 |
+
# 'gpt35': gpt35_answers[qid]['text'],
|
57 |
+
'vicuna': vicuna_answers[qid]['text'],
|
58 |
+
'ours': ours_answers[qid]['text'],
|
59 |
+
},
|
60 |
+
'evaluations': {
|
61 |
+
# 'alpaca': review_alpaca[qid]['text'],
|
62 |
+
# 'llama': review_llama[qid]['text'],
|
63 |
+
# 'bard': review_bard[qid]['text'],
|
64 |
+
'vicuna': review_vicuna[qid]['content'],
|
65 |
+
# 'gpt35': review_gpt35[qid]['text'],
|
66 |
+
},
|
67 |
+
'scores': {
|
68 |
+
'vicuna': review_vicuna[qid]['tuple'],
|
69 |
+
# 'alpaca': review_alpaca[qid]['score'],
|
70 |
+
# 'llama': review_llama[qid]['score'],
|
71 |
+
# 'bard': review_bard[qid]['score'],
|
72 |
+
# 'gpt35': review_gpt35[qid]['score'],
|
73 |
+
},
|
74 |
+
}
|
75 |
+
|
76 |
+
# cleanup data
|
77 |
+
cleaned_evals = {}
|
78 |
+
for k, v in r['evaluations'].items():
|
79 |
+
v = v.strip()
|
80 |
+
lines = v.split('\n')
|
81 |
+
# trim the first line if it's a pair of numbers
|
82 |
+
if re.match(r'\d+[, ]+\d+', lines[0]):
|
83 |
+
lines = lines[1:]
|
84 |
+
v = '\n'.join(lines)
|
85 |
+
cleaned_evals[k] = v.replace('Assistant 1', "**Assistant 1**").replace('Assistant 2', '**Assistant 2**')
|
86 |
+
|
87 |
+
r['evaluations'] = cleaned_evals
|
88 |
+
records.append(r)
|
89 |
+
|
90 |
+
# Reorder the records, this is optional
|
91 |
+
for r in records:
|
92 |
+
if r['id'] <= 20:
|
93 |
+
r['id'] += 60
|
94 |
+
else:
|
95 |
+
r['id'] -= 20
|
96 |
+
for r in records:
|
97 |
+
if r['id'] <= 50:
|
98 |
+
r['id'] += 10
|
99 |
+
elif 50 < r['id'] <= 60:
|
100 |
+
r['id'] -= 50
|
101 |
+
for r in records:
|
102 |
+
if r['id'] == 7:
|
103 |
+
r['id'] = 1
|
104 |
+
elif r['id'] < 7:
|
105 |
+
r['id'] += 1
|
106 |
+
|
107 |
+
records.sort(key=lambda x: x['id'])
|
108 |
+
|
109 |
+
# Write to file
|
110 |
+
with open('webpage/data.json', 'w') as f:
|
111 |
+
json.dump({'questions': records, 'models': models}, f, indent=2)
|
llava/eval/m4c_evaluator.py
ADDED
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
import re
|
3 |
+
|
4 |
+
from tqdm import tqdm
|
5 |
+
|
6 |
+
|
7 |
+
class EvalAIAnswerProcessor:
|
8 |
+
"""
|
9 |
+
Processes an answer similar to Eval AI
|
10 |
+
copied from
|
11 |
+
https://github.com/facebookresearch/mmf/blob/c46b3b3391275b4181567db80943473a89ab98ab/pythia/tasks/processors.py#L897
|
12 |
+
"""
|
13 |
+
|
14 |
+
CONTRACTIONS = {
|
15 |
+
"aint": "ain't",
|
16 |
+
"arent": "aren't",
|
17 |
+
"cant": "can't",
|
18 |
+
"couldve": "could've",
|
19 |
+
"couldnt": "couldn't",
|
20 |
+
"couldn'tve": "couldn't've",
|
21 |
+
"couldnt've": "couldn't've",
|
22 |
+
"didnt": "didn't",
|
23 |
+
"doesnt": "doesn't",
|
24 |
+
"dont": "don't",
|
25 |
+
"hadnt": "hadn't",
|
26 |
+
"hadnt've": "hadn't've",
|
27 |
+
"hadn'tve": "hadn't've",
|
28 |
+
"hasnt": "hasn't",
|
29 |
+
"havent": "haven't",
|
30 |
+
"hed": "he'd",
|
31 |
+
"hed've": "he'd've",
|
32 |
+
"he'dve": "he'd've",
|
33 |
+
"hes": "he's",
|
34 |
+
"howd": "how'd",
|
35 |
+
"howll": "how'll",
|
36 |
+
"hows": "how's",
|
37 |
+
"Id've": "I'd've",
|
38 |
+
"I'dve": "I'd've",
|
39 |
+
"Im": "I'm",
|
40 |
+
"Ive": "I've",
|
41 |
+
"isnt": "isn't",
|
42 |
+
"itd": "it'd",
|
43 |
+
"itd've": "it'd've",
|
44 |
+
"it'dve": "it'd've",
|
45 |
+
"itll": "it'll",
|
46 |
+
"let's": "let's",
|
47 |
+
"maam": "ma'am",
|
48 |
+
"mightnt": "mightn't",
|
49 |
+
"mightnt've": "mightn't've",
|
50 |
+
"mightn'tve": "mightn't've",
|
51 |
+
"mightve": "might've",
|
52 |
+
"mustnt": "mustn't",
|
53 |
+
"mustve": "must've",
|
54 |
+
"neednt": "needn't",
|
55 |
+
"notve": "not've",
|
56 |
+
"oclock": "o'clock",
|
57 |
+
"oughtnt": "oughtn't",
|
58 |
+
"ow's'at": "'ow's'at",
|
59 |
+
"'ows'at": "'ow's'at",
|
60 |
+
"'ow'sat": "'ow's'at",
|
61 |
+
"shant": "shan't",
|
62 |
+
"shed've": "she'd've",
|
63 |
+
"she'dve": "she'd've",
|
64 |
+
"she's": "she's",
|
65 |
+
"shouldve": "should've",
|
66 |
+
"shouldnt": "shouldn't",
|
67 |
+
"shouldnt've": "shouldn't've",
|
68 |
+
"shouldn'tve": "shouldn't've",
|
69 |
+
"somebody'd": "somebodyd",
|
70 |
+
"somebodyd've": "somebody'd've",
|
71 |
+
"somebody'dve": "somebody'd've",
|
72 |
+
"somebodyll": "somebody'll",
|
73 |
+
"somebodys": "somebody's",
|
74 |
+
"someoned": "someone'd",
|
75 |
+
"someoned've": "someone'd've",
|
76 |
+
"someone'dve": "someone'd've",
|
77 |
+
"someonell": "someone'll",
|
78 |
+
"someones": "someone's",
|
79 |
+
"somethingd": "something'd",
|
80 |
+
"somethingd've": "something'd've",
|
81 |
+
"something'dve": "something'd've",
|
82 |
+
"somethingll": "something'll",
|
83 |
+
"thats": "that's",
|
84 |
+
"thered": "there'd",
|
85 |
+
"thered've": "there'd've",
|
86 |
+
"there'dve": "there'd've",
|
87 |
+
"therere": "there're",
|
88 |
+
"theres": "there's",
|
89 |
+
"theyd": "they'd",
|
90 |
+
"theyd've": "they'd've",
|
91 |
+
"they'dve": "they'd've",
|
92 |
+
"theyll": "they'll",
|
93 |
+
"theyre": "they're",
|
94 |
+
"theyve": "they've",
|
95 |
+
"twas": "'twas",
|
96 |
+
"wasnt": "wasn't",
|
97 |
+
"wed've": "we'd've",
|
98 |
+
"we'dve": "we'd've",
|
99 |
+
"weve": "we've",
|
100 |
+
"werent": "weren't",
|
101 |
+
"whatll": "what'll",
|
102 |
+
"whatre": "what're",
|
103 |
+
"whats": "what's",
|
104 |
+
"whatve": "what've",
|
105 |
+
"whens": "when's",
|
106 |
+
"whered": "where'd",
|
107 |
+
"wheres": "where's",
|
108 |
+
"whereve": "where've",
|
109 |
+
"whod": "who'd",
|
110 |
+
"whod've": "who'd've",
|
111 |
+
"who'dve": "who'd've",
|
112 |
+
"wholl": "who'll",
|
113 |
+
"whos": "who's",
|
114 |
+
"whove": "who've",
|
115 |
+
"whyll": "why'll",
|
116 |
+
"whyre": "why're",
|
117 |
+
"whys": "why's",
|
118 |
+
"wont": "won't",
|
119 |
+
"wouldve": "would've",
|
120 |
+
"wouldnt": "wouldn't",
|
121 |
+
"wouldnt've": "wouldn't've",
|
122 |
+
"wouldn'tve": "wouldn't've",
|
123 |
+
"yall": "y'all",
|
124 |
+
"yall'll": "y'all'll",
|
125 |
+
"y'allll": "y'all'll",
|
126 |
+
"yall'd've": "y'all'd've",
|
127 |
+
"y'alld've": "y'all'd've",
|
128 |
+
"y'all'dve": "y'all'd've",
|
129 |
+
"youd": "you'd",
|
130 |
+
"youd've": "you'd've",
|
131 |
+
"you'dve": "you'd've",
|
132 |
+
"youll": "you'll",
|
133 |
+
"youre": "you're",
|
134 |
+
"youve": "you've",
|
135 |
+
}
|
136 |
+
|
137 |
+
NUMBER_MAP = {
|
138 |
+
"none": "0",
|
139 |
+
"zero": "0",
|
140 |
+
"one": "1",
|
141 |
+
"two": "2",
|
142 |
+
"three": "3",
|
143 |
+
"four": "4",
|
144 |
+
"five": "5",
|
145 |
+
"six": "6",
|
146 |
+
"seven": "7",
|
147 |
+
"eight": "8",
|
148 |
+
"nine": "9",
|
149 |
+
"ten": "10",
|
150 |
+
}
|
151 |
+
ARTICLES = ["a", "an", "the"]
|
152 |
+
PERIOD_STRIP = re.compile(r"(?!<=\d)(\.)(?!\d)")
|
153 |
+
COMMA_STRIP = re.compile(r"(?<=\d)(\,)+(?=\d)")
|
154 |
+
PUNCTUATIONS = [
|
155 |
+
";",
|
156 |
+
r"/",
|
157 |
+
"[",
|
158 |
+
"]",
|
159 |
+
'"',
|
160 |
+
"{",
|
161 |
+
"}",
|
162 |
+
"(",
|
163 |
+
")",
|
164 |
+
"=",
|
165 |
+
"+",
|
166 |
+
"\\",
|
167 |
+
"_",
|
168 |
+
"-",
|
169 |
+
">",
|
170 |
+
"<",
|
171 |
+
"@",
|
172 |
+
"`",
|
173 |
+
",",
|
174 |
+
"?",
|
175 |
+
"!",
|
176 |
+
]
|
177 |
+
|
178 |
+
def __init__(self, *args, **kwargs):
|
179 |
+
pass
|
180 |
+
|
181 |
+
def word_tokenize(self, word):
|
182 |
+
word = word.lower()
|
183 |
+
word = word.replace(",", "").replace("?", "").replace("'s", " 's")
|
184 |
+
return word.strip()
|
185 |
+
|
186 |
+
def process_punctuation(self, in_text):
|
187 |
+
out_text = in_text
|
188 |
+
for p in self.PUNCTUATIONS:
|
189 |
+
if (p + " " in in_text or " " + p in in_text) or (
|
190 |
+
re.search(self.COMMA_STRIP, in_text) is not None
|
191 |
+
):
|
192 |
+
out_text = out_text.replace(p, "")
|
193 |
+
else:
|
194 |
+
out_text = out_text.replace(p, " ")
|
195 |
+
out_text = self.PERIOD_STRIP.sub("", out_text, re.UNICODE)
|
196 |
+
return out_text
|
197 |
+
|
198 |
+
def process_digit_article(self, in_text):
|
199 |
+
out_text = []
|
200 |
+
temp_text = in_text.lower().split()
|
201 |
+
for word in temp_text:
|
202 |
+
word = self.NUMBER_MAP.setdefault(word, word)
|
203 |
+
if word not in self.ARTICLES:
|
204 |
+
out_text.append(word)
|
205 |
+
else:
|
206 |
+
pass
|
207 |
+
for word_id, word in enumerate(out_text):
|
208 |
+
if word in self.CONTRACTIONS:
|
209 |
+
out_text[word_id] = self.CONTRACTIONS[word]
|
210 |
+
out_text = " ".join(out_text)
|
211 |
+
return out_text
|
212 |
+
|
213 |
+
def __call__(self, item):
|
214 |
+
item = self.word_tokenize(item)
|
215 |
+
item = item.replace("\n", " ").replace("\t", " ").strip()
|
216 |
+
item = self.process_punctuation(item)
|
217 |
+
item = self.process_digit_article(item)
|
218 |
+
return item
|
219 |
+
|
220 |
+
|
221 |
+
class TextVQAAccuracyEvaluator:
|
222 |
+
def __init__(self):
|
223 |
+
self.answer_processor = EvalAIAnswerProcessor()
|
224 |
+
|
225 |
+
def _compute_answer_scores(self, raw_answers):
|
226 |
+
"""
|
227 |
+
compute the accuracy (soft score) of human answers
|
228 |
+
"""
|
229 |
+
answers = [self.answer_processor(a) for a in raw_answers]
|
230 |
+
assert len(answers) == 10
|
231 |
+
gt_answers = list(enumerate(answers))
|
232 |
+
unique_answers = set(answers)
|
233 |
+
unique_answer_scores = {}
|
234 |
+
|
235 |
+
for unique_answer in unique_answers:
|
236 |
+
accs = []
|
237 |
+
for gt_answer in gt_answers:
|
238 |
+
other_answers = [item for item in gt_answers if item != gt_answer]
|
239 |
+
matching_answers = [
|
240 |
+
item for item in other_answers if item[1] == unique_answer
|
241 |
+
]
|
242 |
+
acc = min(1, float(len(matching_answers)) / 3)
|
243 |
+
accs.append(acc)
|
244 |
+
unique_answer_scores[unique_answer] = sum(accs) / len(accs)
|
245 |
+
|
246 |
+
return unique_answer_scores
|
247 |
+
|
248 |
+
def eval_pred_list(self, pred_list):
|
249 |
+
pred_scores = []
|
250 |
+
for entry in tqdm(pred_list):
|
251 |
+
pred_answer = self.answer_processor(entry["pred_answer"])
|
252 |
+
unique_answer_scores = self._compute_answer_scores(entry["gt_answers"])
|
253 |
+
score = unique_answer_scores.get(pred_answer, 0.0)
|
254 |
+
pred_scores.append(score)
|
255 |
+
|
256 |
+
accuracy = sum(pred_scores) / len(pred_scores)
|
257 |
+
return accuracy
|
258 |
+
|
259 |
+
|
260 |
+
class STVQAAccuracyEvaluator:
|
261 |
+
def __init__(self):
|
262 |
+
self.answer_processor = EvalAIAnswerProcessor()
|
263 |
+
|
264 |
+
def eval_pred_list(self, pred_list):
|
265 |
+
pred_scores = []
|
266 |
+
for entry in pred_list:
|
267 |
+
pred_answer = self.answer_processor(entry["pred_answer"])
|
268 |
+
gts = [self.answer_processor(a) for a in entry["gt_answers"]]
|
269 |
+
score = 1.0 if pred_answer in gts else 0.0
|
270 |
+
pred_scores.append(score)
|
271 |
+
|
272 |
+
accuracy = sum(pred_scores) / len(pred_scores)
|
273 |
+
return accuracy
|
274 |
+
|
275 |
+
|
276 |
+
class STVQAANLSEvaluator:
|
277 |
+
def __init__(self):
|
278 |
+
import editdistance # install with `pip install editdistance`
|
279 |
+
|
280 |
+
self.get_edit_distance = editdistance.eval
|
281 |
+
|
282 |
+
def get_anls(self, s1, s2):
|
283 |
+
s1 = s1.lower().strip()
|
284 |
+
s2 = s2.lower().strip()
|
285 |
+
iou = 1 - self.get_edit_distance(s1, s2) / max(len(s1), len(s2))
|
286 |
+
anls = iou if iou >= 0.5 else 0.0
|
287 |
+
return anls
|
288 |
+
|
289 |
+
def eval_pred_list(self, pred_list):
|
290 |
+
pred_scores = []
|
291 |
+
for entry in pred_list:
|
292 |
+
anls = max(
|
293 |
+
self.get_anls(entry["pred_answer"], gt) for gt in entry["gt_answers"]
|
294 |
+
)
|
295 |
+
pred_scores.append(anls)
|
296 |
+
|
297 |
+
accuracy = sum(pred_scores) / len(pred_scores)
|
298 |
+
return accuracy
|
299 |
+
|
300 |
+
|
301 |
+
class TextCapsBleu4Evaluator:
|
302 |
+
def __init__(self):
|
303 |
+
# The following script requires Java 1.8.0 and pycocotools installed.
|
304 |
+
# The pycocoevalcap can be installed with pip as
|
305 |
+
# pip install git+https://github.com/ronghanghu/coco-caption.git@python23
|
306 |
+
# Original pycocoevalcap code is at https://github.com/tylin/coco-caption
|
307 |
+
# but has no python3 support yet.
|
308 |
+
try:
|
309 |
+
from pycocoevalcap.bleu.bleu import Bleu
|
310 |
+
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
|
311 |
+
except ModuleNotFoundError:
|
312 |
+
print(
|
313 |
+
"Please install pycocoevalcap module using "
|
314 |
+
"pip install git+https://github.com/ronghanghu/coco-caption.git@python23" # noqa
|
315 |
+
)
|
316 |
+
raise
|
317 |
+
|
318 |
+
self.tokenizer = PTBTokenizer()
|
319 |
+
self.scorer = Bleu(4)
|
320 |
+
|
321 |
+
def eval_pred_list(self, pred_list):
|
322 |
+
# Create reference and hypotheses captions.
|
323 |
+
gts = {}
|
324 |
+
res = {}
|
325 |
+
for idx, entry in enumerate(pred_list):
|
326 |
+
gts[idx] = [{"caption": a} for a in entry["gt_answers"]]
|
327 |
+
res[idx] = [{"caption": entry["pred_answer"]}]
|
328 |
+
|
329 |
+
gts = self.tokenizer.tokenize(gts)
|
330 |
+
res = self.tokenizer.tokenize(res)
|
331 |
+
score, _ = self.scorer.compute_score(gts, res)
|
332 |
+
|
333 |
+
bleu4 = score[3] # score is (Bleu-1, Bleu-2, Bleu-3, Bleu-4)
|
334 |
+
return bleu4
|
llava/eval/model_qa.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria
|
3 |
+
import torch
|
4 |
+
import os
|
5 |
+
import json
|
6 |
+
from tqdm import tqdm
|
7 |
+
import shortuuid
|
8 |
+
|
9 |
+
from llava.conversation import default_conversation
|
10 |
+
from llava.utils import disable_torch_init
|
11 |
+
|
12 |
+
|
13 |
+
# new stopping implementation
|
14 |
+
class KeywordsStoppingCriteria(StoppingCriteria):
|
15 |
+
def __init__(self, keywords, tokenizer, input_ids):
|
16 |
+
self.keywords = keywords
|
17 |
+
self.tokenizer = tokenizer
|
18 |
+
self.start_len = None
|
19 |
+
self.input_ids = input_ids
|
20 |
+
|
21 |
+
def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
22 |
+
if self.start_len is None:
|
23 |
+
self.start_len = self.input_ids.shape[1]
|
24 |
+
else:
|
25 |
+
outputs = self.tokenizer.batch_decode(output_ids[:, self.start_len:], skip_special_tokens=True)[0]
|
26 |
+
for keyword in self.keywords:
|
27 |
+
if keyword in outputs:
|
28 |
+
return True
|
29 |
+
return False
|
30 |
+
|
31 |
+
|
32 |
+
@torch.inference_mode()
|
33 |
+
def eval_model(model_name, questions_file, answers_file):
|
34 |
+
# Model
|
35 |
+
disable_torch_init()
|
36 |
+
model_name = os.path.expanduser(model_name)
|
37 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
|
38 |
+
model = AutoModelForCausalLM.from_pretrained(model_name,
|
39 |
+
torch_dtype=torch.float16).cuda()
|
40 |
+
|
41 |
+
|
42 |
+
ques_file = open(os.path.expanduser(questions_file), "r")
|
43 |
+
ans_file = open(os.path.expanduser(answers_file), "w")
|
44 |
+
for i, line in enumerate(tqdm(ques_file)):
|
45 |
+
idx = json.loads(line)["question_id"]
|
46 |
+
qs = json.loads(line)["text"]
|
47 |
+
cat = json.loads(line)["category"]
|
48 |
+
conv = default_conversation.copy()
|
49 |
+
conv.append_message(conv.roles[0], qs)
|
50 |
+
prompt = conv.get_prompt()
|
51 |
+
inputs = tokenizer([prompt])
|
52 |
+
input_ids = torch.as_tensor(inputs.input_ids).cuda()
|
53 |
+
stopping_criteria = KeywordsStoppingCriteria([conv.sep], tokenizer, input_ids)
|
54 |
+
output_ids = model.generate(
|
55 |
+
input_ids,
|
56 |
+
do_sample=True,
|
57 |
+
use_cache=True,
|
58 |
+
temperature=0.7,
|
59 |
+
max_new_tokens=1024,
|
60 |
+
stopping_criteria=[stopping_criteria])
|
61 |
+
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
|
62 |
+
try:
|
63 |
+
index = outputs.index(conv.sep, len(prompt))
|
64 |
+
except ValueError:
|
65 |
+
outputs += conv.sep
|
66 |
+
index = outputs.index(conv.sep, len(prompt))
|
67 |
+
|
68 |
+
outputs = outputs[len(prompt) + len(conv.roles[1]) + 2:index].strip()
|
69 |
+
ans_id = shortuuid.uuid()
|
70 |
+
ans_file.write(json.dumps({"question_id": idx,
|
71 |
+
"text": outputs,
|
72 |
+
"answer_id": ans_id,
|
73 |
+
"model_id": model_name,
|
74 |
+
"metadata": {}}) + "\n")
|
75 |
+
ans_file.flush()
|
76 |
+
ans_file.close()
|
77 |
+
|
78 |
+
if __name__ == "__main__":
|
79 |
+
parser = argparse.ArgumentParser()
|
80 |
+
parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
|
81 |
+
parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
|
82 |
+
parser.add_argument("--answers-file", type=str, default="answer.jsonl")
|
83 |
+
args = parser.parse_args()
|
84 |
+
|
85 |
+
eval_model(args.model_name, args.question_file, args.answers_file)
|
llava/eval/model_vqa.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
import json
|
5 |
+
from tqdm import tqdm
|
6 |
+
import shortuuid
|
7 |
+
|
8 |
+
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
9 |
+
from llava.conversation import conv_templates, SeparatorStyle
|
10 |
+
from llava.model.builder import load_pretrained_model
|
11 |
+
from llava.utils import disable_torch_init
|
12 |
+
from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
|
13 |
+
|
14 |
+
from PIL import Image
|
15 |
+
import math
|
16 |
+
|
17 |
+
|
18 |
+
def split_list(lst, n):
|
19 |
+
"""Split a list into n (roughly) equal-sized chunks"""
|
20 |
+
chunk_size = math.ceil(len(lst) / n) # integer division
|
21 |
+
return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
|
22 |
+
|
23 |
+
|
24 |
+
def get_chunk(lst, n, k):
|
25 |
+
chunks = split_list(lst, n)
|
26 |
+
return chunks[k]
|
27 |
+
|
28 |
+
|
29 |
+
def eval_model(args):
|
30 |
+
# Model
|
31 |
+
disable_torch_init()
|
32 |
+
model_path = os.path.expanduser(args.model_path)
|
33 |
+
model_name = get_model_name_from_path(model_path)
|
34 |
+
tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
|
35 |
+
|
36 |
+
meta_pth = '/opt/data/private/metas/unsplash_ISO300-_PIL_1024_x2x4_APEX.txt'
|
37 |
+
img_pths = []
|
38 |
+
with open(meta_pth, 'r') as f:
|
39 |
+
for line in f.readlines():
|
40 |
+
img_pths.append(line.split('\t')[0])
|
41 |
+
f.close()
|
42 |
+
|
43 |
+
img_pths = get_chunk(img_pths, args.num_chunks, args.chunk_idx)
|
44 |
+
|
45 |
+
# split to batch 8
|
46 |
+
img_pths = split_list(img_pths, 8)
|
47 |
+
|
48 |
+
|
49 |
+
questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")]
|
50 |
+
questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
|
51 |
+
answers_file = os.path.expanduser(args.answers_file)
|
52 |
+
os.makedirs(os.path.dirname(answers_file), exist_ok=True)
|
53 |
+
ans_file = open(answers_file, "w")
|
54 |
+
for line in tqdm(questions):
|
55 |
+
idx = line["question_id"]
|
56 |
+
image_file = line["image"]
|
57 |
+
qs = line["text"]
|
58 |
+
cur_prompt = qs
|
59 |
+
if model.config.mm_use_im_start_end:
|
60 |
+
qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
|
61 |
+
else:
|
62 |
+
qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
|
63 |
+
|
64 |
+
conv = conv_templates[args.conv_mode].copy()
|
65 |
+
conv.append_message(conv.roles[0], qs)
|
66 |
+
conv.append_message(conv.roles[1], None)
|
67 |
+
prompt = conv.get_prompt()
|
68 |
+
|
69 |
+
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
|
70 |
+
|
71 |
+
image = Image.open(os.path.join(args.image_folder, image_file))
|
72 |
+
image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
|
73 |
+
|
74 |
+
stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
|
75 |
+
keywords = [stop_str]
|
76 |
+
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
|
77 |
+
|
78 |
+
with torch.inference_mode():
|
79 |
+
output_ids = model.generate(
|
80 |
+
input_ids,
|
81 |
+
images=image_tensor.unsqueeze(0).half().cuda(),
|
82 |
+
do_sample=True if args.temperature > 0 else False,
|
83 |
+
temperature=args.temperature,
|
84 |
+
top_p=args.top_p,
|
85 |
+
num_beams=args.num_beams,
|
86 |
+
# no_repeat_ngram_size=3,
|
87 |
+
max_new_tokens=1024,
|
88 |
+
use_cache=True)
|
89 |
+
|
90 |
+
input_token_len = input_ids.shape[1]
|
91 |
+
n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
|
92 |
+
if n_diff_input_output > 0:
|
93 |
+
print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
|
94 |
+
outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
|
95 |
+
outputs = outputs.strip()
|
96 |
+
if outputs.endswith(stop_str):
|
97 |
+
outputs = outputs[:-len(stop_str)]
|
98 |
+
outputs = outputs.strip()
|
99 |
+
|
100 |
+
ans_id = shortuuid.uuid()
|
101 |
+
ans_file.write(json.dumps({"question_id": idx,
|
102 |
+
"prompt": cur_prompt,
|
103 |
+
"text": outputs,
|
104 |
+
"answer_id": ans_id,
|
105 |
+
"model_id": model_name,
|
106 |
+
"metadata": {}}) + "\n")
|
107 |
+
ans_file.flush()
|
108 |
+
ans_file.close()
|
109 |
+
|
110 |
+
if __name__ == "__main__":
|
111 |
+
parser = argparse.ArgumentParser()
|
112 |
+
parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
|
113 |
+
parser.add_argument("--model-base", type=str, default=None)
|
114 |
+
parser.add_argument("--image-folder", type=str, default="")
|
115 |
+
parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
|
116 |
+
parser.add_argument("--answers-file", type=str, default="answer.jsonl")
|
117 |
+
parser.add_argument("--conv-mode", type=str, default="llava_v1")
|
118 |
+
parser.add_argument("--num-chunks", type=int, default=1)
|
119 |
+
parser.add_argument("--chunk-idx", type=int, default=0)
|
120 |
+
parser.add_argument("--temperature", type=float, default=0.2)
|
121 |
+
parser.add_argument("--top_p", type=float, default=None)
|
122 |
+
parser.add_argument("--num_beams", type=int, default=1)
|
123 |
+
args = parser.parse_args()
|
124 |
+
|
125 |
+
eval_model(args)
|
llava/eval/model_vqa_loader.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
import json
|
5 |
+
from tqdm import tqdm
|
6 |
+
import shortuuid
|
7 |
+
|
8 |
+
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
9 |
+
from llava.conversation import conv_templates, SeparatorStyle
|
10 |
+
from llava.model.builder import load_pretrained_model
|
11 |
+
from llava.utils import disable_torch_init
|
12 |
+
from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
|
13 |
+
from torch.utils.data import Dataset, DataLoader
|
14 |
+
|
15 |
+
from PIL import Image
|
16 |
+
import math
|
17 |
+
|
18 |
+
|
19 |
+
def split_list(lst, n):
|
20 |
+
"""Split a list into n (roughly) equal-sized chunks"""
|
21 |
+
chunk_size = math.ceil(len(lst) / n) # integer division
|
22 |
+
return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
|
23 |
+
|
24 |
+
|
25 |
+
def get_chunk(lst, n, k):
|
26 |
+
chunks = split_list(lst, n)
|
27 |
+
return chunks[k]
|
28 |
+
|
29 |
+
|
30 |
+
# Custom dataset class
|
31 |
+
class CustomDataset(Dataset):
|
32 |
+
def __init__(self, questions, image_folder, tokenizer, image_processor, model_config):
|
33 |
+
self.questions = questions
|
34 |
+
self.image_folder = image_folder
|
35 |
+
self.tokenizer = tokenizer
|
36 |
+
self.image_processor = image_processor
|
37 |
+
self.model_config = model_config
|
38 |
+
|
39 |
+
def __getitem__(self, index):
|
40 |
+
line = self.questions[index]
|
41 |
+
image_file = line["image"]
|
42 |
+
qs = line["text"]
|
43 |
+
if self.model_config.mm_use_im_start_end:
|
44 |
+
qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
|
45 |
+
else:
|
46 |
+
qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
|
47 |
+
|
48 |
+
conv = conv_templates[args.conv_mode].copy()
|
49 |
+
conv.append_message(conv.roles[0], qs)
|
50 |
+
conv.append_message(conv.roles[1], None)
|
51 |
+
prompt = conv.get_prompt()
|
52 |
+
|
53 |
+
image = Image.open(os.path.join(self.image_folder, image_file)).convert('RGB')
|
54 |
+
image_tensor = process_images([image], self.image_processor, self.model_config)[0]
|
55 |
+
|
56 |
+
input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
|
57 |
+
|
58 |
+
return input_ids, image_tensor
|
59 |
+
|
60 |
+
def __len__(self):
|
61 |
+
return len(self.questions)
|
62 |
+
|
63 |
+
|
64 |
+
# DataLoader
|
65 |
+
def create_data_loader(questions, image_folder, tokenizer, image_processor, model_config, batch_size=1, num_workers=4):
|
66 |
+
assert batch_size == 1, "batch_size must be 1"
|
67 |
+
dataset = CustomDataset(questions, image_folder, tokenizer, image_processor, model_config)
|
68 |
+
data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False)
|
69 |
+
return data_loader
|
70 |
+
|
71 |
+
|
72 |
+
def eval_model(args):
|
73 |
+
# Model
|
74 |
+
disable_torch_init()
|
75 |
+
model_path = os.path.expanduser(args.model_path)
|
76 |
+
model_name = get_model_name_from_path(model_path)
|
77 |
+
tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
|
78 |
+
|
79 |
+
questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")]
|
80 |
+
questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
|
81 |
+
answers_file = os.path.expanduser(args.answers_file)
|
82 |
+
os.makedirs(os.path.dirname(answers_file), exist_ok=True)
|
83 |
+
ans_file = open(answers_file, "w")
|
84 |
+
|
85 |
+
if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode:
|
86 |
+
args.conv_mode = args.conv_mode + '_mmtag'
|
87 |
+
print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.')
|
88 |
+
|
89 |
+
data_loader = create_data_loader(questions, args.image_folder, tokenizer, image_processor, model.config)
|
90 |
+
|
91 |
+
for (input_ids, image_tensor), line in tqdm(zip(data_loader, questions), total=len(questions)):
|
92 |
+
idx = line["question_id"]
|
93 |
+
cur_prompt = line["text"]
|
94 |
+
|
95 |
+
stop_str = conv_templates[args.conv_mode].sep if conv_templates[args.conv_mode].sep_style != SeparatorStyle.TWO else conv_templates[args.conv_mode].sep2
|
96 |
+
input_ids = input_ids.to(device='cuda', non_blocking=True)
|
97 |
+
|
98 |
+
with torch.inference_mode():
|
99 |
+
output_ids = model.generate(
|
100 |
+
input_ids,
|
101 |
+
images=image_tensor.to(dtype=torch.float16, device='cuda', non_blocking=True),
|
102 |
+
do_sample=True if args.temperature > 0 else False,
|
103 |
+
temperature=args.temperature,
|
104 |
+
top_p=args.top_p,
|
105 |
+
num_beams=args.num_beams,
|
106 |
+
max_new_tokens=128,
|
107 |
+
use_cache=True)
|
108 |
+
|
109 |
+
input_token_len = input_ids.shape[1]
|
110 |
+
n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
|
111 |
+
if n_diff_input_output > 0:
|
112 |
+
print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
|
113 |
+
outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
|
114 |
+
outputs = outputs.strip()
|
115 |
+
if outputs.endswith(stop_str):
|
116 |
+
outputs = outputs[:-len(stop_str)]
|
117 |
+
outputs = outputs.strip()
|
118 |
+
|
119 |
+
ans_id = shortuuid.uuid()
|
120 |
+
ans_file.write(json.dumps({"question_id": idx,
|
121 |
+
"prompt": cur_prompt,
|
122 |
+
"text": outputs,
|
123 |
+
"answer_id": ans_id,
|
124 |
+
"model_id": model_name,
|
125 |
+
"metadata": {}}) + "\n")
|
126 |
+
# ans_file.flush()
|
127 |
+
ans_file.close()
|
128 |
+
|
129 |
+
if __name__ == "__main__":
|
130 |
+
parser = argparse.ArgumentParser()
|
131 |
+
parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
|
132 |
+
parser.add_argument("--model-base", type=str, default=None)
|
133 |
+
parser.add_argument("--image-folder", type=str, default="")
|
134 |
+
parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
|
135 |
+
parser.add_argument("--answers-file", type=str, default="answer.jsonl")
|
136 |
+
parser.add_argument("--conv-mode", type=str, default="llava_v1")
|
137 |
+
parser.add_argument("--num-chunks", type=int, default=1)
|
138 |
+
parser.add_argument("--chunk-idx", type=int, default=0)
|
139 |
+
parser.add_argument("--temperature", type=float, default=0.2)
|
140 |
+
parser.add_argument("--top_p", type=float, default=None)
|
141 |
+
parser.add_argument("--num_beams", type=int, default=1)
|
142 |
+
args = parser.parse_args()
|
143 |
+
|
144 |
+
eval_model(args)
|
llava/eval/model_vqa_mmbench.py
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
import json
|
5 |
+
import pandas as pd
|
6 |
+
from tqdm import tqdm
|
7 |
+
import shortuuid
|
8 |
+
|
9 |
+
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
10 |
+
from llava.conversation import conv_templates, SeparatorStyle
|
11 |
+
from llava.model.builder import load_pretrained_model
|
12 |
+
from llava.utils import disable_torch_init
|
13 |
+
from llava.mm_utils import tokenizer_image_token, process_images, load_image_from_base64, get_model_name_from_path
|
14 |
+
|
15 |
+
from PIL import Image
|
16 |
+
import math
|
17 |
+
|
18 |
+
|
19 |
+
all_options = ['A', 'B', 'C', 'D']
|
20 |
+
|
21 |
+
|
22 |
+
def split_list(lst, n):
|
23 |
+
"""Split a list into n (roughly) equal-sized chunks"""
|
24 |
+
chunk_size = math.ceil(len(lst) / n) # integer division
|
25 |
+
return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
|
26 |
+
|
27 |
+
|
28 |
+
def get_chunk(lst, n, k):
|
29 |
+
chunks = split_list(lst, n)
|
30 |
+
return chunks[k]
|
31 |
+
|
32 |
+
|
33 |
+
def is_none(value):
|
34 |
+
if value is None:
|
35 |
+
return True
|
36 |
+
if type(value) is float and math.isnan(value):
|
37 |
+
return True
|
38 |
+
if type(value) is str and value.lower() == 'nan':
|
39 |
+
return True
|
40 |
+
if type(value) is str and value.lower() == 'none':
|
41 |
+
return True
|
42 |
+
return False
|
43 |
+
|
44 |
+
def get_options(row, options):
|
45 |
+
parsed_options = []
|
46 |
+
for option in options:
|
47 |
+
option_value = row[option]
|
48 |
+
if is_none(option_value):
|
49 |
+
break
|
50 |
+
parsed_options.append(option_value)
|
51 |
+
return parsed_options
|
52 |
+
|
53 |
+
|
54 |
+
def eval_model(args):
|
55 |
+
# Model
|
56 |
+
disable_torch_init()
|
57 |
+
model_path = os.path.expanduser(args.model_path)
|
58 |
+
model_name = get_model_name_from_path(model_path)
|
59 |
+
tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
|
60 |
+
|
61 |
+
questions = pd.read_table(os.path.expanduser(args.question_file))
|
62 |
+
questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
|
63 |
+
answers_file = os.path.expanduser(args.answers_file)
|
64 |
+
os.makedirs(os.path.dirname(answers_file), exist_ok=True)
|
65 |
+
ans_file = open(answers_file, "w")
|
66 |
+
|
67 |
+
if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode:
|
68 |
+
args.conv_mode = args.conv_mode + '_mmtag'
|
69 |
+
print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.')
|
70 |
+
|
71 |
+
for index, row in tqdm(questions.iterrows(), total=len(questions)):
|
72 |
+
options = get_options(row, all_options)
|
73 |
+
cur_option_char = all_options[:len(options)]
|
74 |
+
|
75 |
+
if args.all_rounds:
|
76 |
+
num_rounds = len(options)
|
77 |
+
else:
|
78 |
+
num_rounds = 1
|
79 |
+
|
80 |
+
for round_idx in range(num_rounds):
|
81 |
+
idx = row['index']
|
82 |
+
question = row['question']
|
83 |
+
hint = row['hint']
|
84 |
+
image = load_image_from_base64(row['image'])
|
85 |
+
if not is_none(hint):
|
86 |
+
question = hint + '\n' + question
|
87 |
+
for option_char, option in zip(all_options[:len(options)], options):
|
88 |
+
question = question + '\n' + option_char + '. ' + option
|
89 |
+
qs = cur_prompt = question
|
90 |
+
if model.config.mm_use_im_start_end:
|
91 |
+
qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
|
92 |
+
else:
|
93 |
+
qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
|
94 |
+
|
95 |
+
if args.single_pred_prompt:
|
96 |
+
if args.lang == 'cn':
|
97 |
+
qs = qs + '\n' + "请直接回答选项字母。"
|
98 |
+
else:
|
99 |
+
qs = qs + '\n' + "Answer with the option's letter from the given choices directly."
|
100 |
+
|
101 |
+
conv = conv_templates[args.conv_mode].copy()
|
102 |
+
conv.append_message(conv.roles[0], qs)
|
103 |
+
conv.append_message(conv.roles[1], None)
|
104 |
+
prompt = conv.get_prompt()
|
105 |
+
|
106 |
+
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
|
107 |
+
|
108 |
+
image_tensor = process_images([image], image_processor, model.config)[0]
|
109 |
+
# image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
|
110 |
+
|
111 |
+
stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
|
112 |
+
|
113 |
+
with torch.inference_mode():
|
114 |
+
output_ids = model.generate(
|
115 |
+
input_ids,
|
116 |
+
images=image_tensor.unsqueeze(0).half().cuda(),
|
117 |
+
do_sample=True if args.temperature > 0 else False,
|
118 |
+
temperature=args.temperature,
|
119 |
+
top_p=args.top_p,
|
120 |
+
num_beams=args.num_beams,
|
121 |
+
# no_repeat_ngram_size=3,
|
122 |
+
max_new_tokens=1024,
|
123 |
+
use_cache=True)
|
124 |
+
|
125 |
+
input_token_len = input_ids.shape[1]
|
126 |
+
n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
|
127 |
+
if n_diff_input_output > 0:
|
128 |
+
print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
|
129 |
+
outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
|
130 |
+
outputs = outputs.strip()
|
131 |
+
if outputs.endswith(stop_str):
|
132 |
+
outputs = outputs[:-len(stop_str)]
|
133 |
+
outputs = outputs.strip()
|
134 |
+
|
135 |
+
ans_id = shortuuid.uuid()
|
136 |
+
ans_file.write(json.dumps({"question_id": idx,
|
137 |
+
"round_id": round_idx,
|
138 |
+
"prompt": cur_prompt,
|
139 |
+
"text": outputs,
|
140 |
+
"options": options,
|
141 |
+
"option_char": cur_option_char,
|
142 |
+
"answer_id": ans_id,
|
143 |
+
"model_id": model_name,
|
144 |
+
"metadata": {}}) + "\n")
|
145 |
+
ans_file.flush()
|
146 |
+
|
147 |
+
# rotate options
|
148 |
+
options = options[1:] + options[:1]
|
149 |
+
cur_option_char = cur_option_char[1:] + cur_option_char[:1]
|
150 |
+
ans_file.close()
|
151 |
+
|
152 |
+
if __name__ == "__main__":
|
153 |
+
parser = argparse.ArgumentParser()
|
154 |
+
parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
|
155 |
+
parser.add_argument("--model-base", type=str, default=None)
|
156 |
+
parser.add_argument("--image-folder", type=str, default="")
|
157 |
+
parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
|
158 |
+
parser.add_argument("--answers-file", type=str, default="answer.jsonl")
|
159 |
+
parser.add_argument("--conv-mode", type=str, default="llava_v1")
|
160 |
+
parser.add_argument("--num-chunks", type=int, default=1)
|
161 |
+
parser.add_argument("--chunk-idx", type=int, default=0)
|
162 |
+
parser.add_argument("--temperature", type=float, default=0.2)
|
163 |
+
parser.add_argument("--top_p", type=float, default=None)
|
164 |
+
parser.add_argument("--num_beams", type=int, default=1)
|
165 |
+
parser.add_argument("--all-rounds", action="store_true")
|
166 |
+
parser.add_argument("--single-pred-prompt", action="store_true")
|
167 |
+
parser.add_argument("--lang", type=str, default="en")
|
168 |
+
args = parser.parse_args()
|
169 |
+
|
170 |
+
eval_model(args)
|
llava/eval/model_vqa_science.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
import json
|
5 |
+
from tqdm import tqdm
|
6 |
+
import shortuuid
|
7 |
+
|
8 |
+
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
9 |
+
from llava.conversation import conv_templates, SeparatorStyle
|
10 |
+
from llava.model.builder import load_pretrained_model
|
11 |
+
from llava.utils import disable_torch_init
|
12 |
+
from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
|
13 |
+
|
14 |
+
from PIL import Image
|
15 |
+
import math
|
16 |
+
|
17 |
+
|
18 |
+
def split_list(lst, n):
|
19 |
+
"""Split a list into n (roughly) equal-sized chunks"""
|
20 |
+
chunk_size = math.ceil(len(lst) / n) # integer division
|
21 |
+
return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
|
22 |
+
|
23 |
+
|
24 |
+
def get_chunk(lst, n, k):
|
25 |
+
chunks = split_list(lst, n)
|
26 |
+
return chunks[k]
|
27 |
+
|
28 |
+
|
29 |
+
def eval_model(args):
|
30 |
+
# Model
|
31 |
+
disable_torch_init()
|
32 |
+
model_path = os.path.expanduser(args.model_path)
|
33 |
+
model_name = get_model_name_from_path(model_path)
|
34 |
+
tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
|
35 |
+
|
36 |
+
questions = json.load(open(os.path.expanduser(args.question_file), "r"))
|
37 |
+
questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
|
38 |
+
answers_file = os.path.expanduser(args.answers_file)
|
39 |
+
os.makedirs(os.path.dirname(answers_file), exist_ok=True)
|
40 |
+
ans_file = open(answers_file, "w")
|
41 |
+
for i, line in enumerate(tqdm(questions)):
|
42 |
+
idx = line["id"]
|
43 |
+
question = line['conversations'][0]
|
44 |
+
qs = question['value'].replace('<image>', '').strip()
|
45 |
+
cur_prompt = qs
|
46 |
+
|
47 |
+
if 'image' in line:
|
48 |
+
image_file = line["image"]
|
49 |
+
image = Image.open(os.path.join(args.image_folder, image_file))
|
50 |
+
image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
|
51 |
+
images = image_tensor.unsqueeze(0).half().cuda()
|
52 |
+
if getattr(model.config, 'mm_use_im_start_end', False):
|
53 |
+
qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
|
54 |
+
else:
|
55 |
+
qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
|
56 |
+
cur_prompt = '<image>' + '\n' + cur_prompt
|
57 |
+
else:
|
58 |
+
images = None
|
59 |
+
|
60 |
+
if args.single_pred_prompt:
|
61 |
+
qs = qs + '\n' + "Answer with the option's letter from the given choices directly."
|
62 |
+
cur_prompt = cur_prompt + '\n' + "Answer with the option's letter from the given choices directly."
|
63 |
+
|
64 |
+
conv = conv_templates[args.conv_mode].copy()
|
65 |
+
conv.append_message(conv.roles[0], qs)
|
66 |
+
conv.append_message(conv.roles[1], None)
|
67 |
+
prompt = conv.get_prompt()
|
68 |
+
|
69 |
+
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
|
70 |
+
|
71 |
+
stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
|
72 |
+
keywords = [stop_str]
|
73 |
+
stopping_criteria = [KeywordsStoppingCriteria(keywords, tokenizer, input_ids)] if conv.version == "v0" else None
|
74 |
+
|
75 |
+
with torch.inference_mode():
|
76 |
+
output_ids = model.generate(
|
77 |
+
input_ids,
|
78 |
+
images=images,
|
79 |
+
do_sample=True if args.temperature > 0 else False,
|
80 |
+
temperature=args.temperature,
|
81 |
+
max_new_tokens=1024,
|
82 |
+
use_cache=True,
|
83 |
+
stopping_criteria=stopping_criteria,
|
84 |
+
)
|
85 |
+
|
86 |
+
input_token_len = input_ids.shape[1]
|
87 |
+
n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
|
88 |
+
if n_diff_input_output > 0:
|
89 |
+
print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
|
90 |
+
outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
|
91 |
+
outputs = outputs.strip()
|
92 |
+
if outputs.endswith(stop_str):
|
93 |
+
outputs = outputs[:-len(stop_str)]
|
94 |
+
outputs = outputs.strip()
|
95 |
+
|
96 |
+
# prompt for answer
|
97 |
+
if args.answer_prompter:
|
98 |
+
outputs_reasoning = outputs
|
99 |
+
input_ids = tokenizer_image_token(prompt + outputs_reasoning + ' ###\nANSWER:', tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
|
100 |
+
|
101 |
+
with torch.inference_mode():
|
102 |
+
output_ids = model.generate(
|
103 |
+
input_ids,
|
104 |
+
images=images,
|
105 |
+
do_sample=True if args.temperature > 0 else False,
|
106 |
+
temperature=args.temperature,
|
107 |
+
max_new_tokens=64,
|
108 |
+
use_cache=True,
|
109 |
+
stopping_criteria=[stopping_criteria])
|
110 |
+
|
111 |
+
input_token_len = input_ids.shape[1]
|
112 |
+
n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
|
113 |
+
if n_diff_input_output > 0:
|
114 |
+
print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
|
115 |
+
outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
|
116 |
+
outputs = outputs.strip()
|
117 |
+
if outputs.endswith(stop_str):
|
118 |
+
outputs = outputs[:-len(stop_str)]
|
119 |
+
outputs = outputs.strip()
|
120 |
+
outputs = outputs_reasoning + '\n The answer is ' + outputs
|
121 |
+
|
122 |
+
ans_id = shortuuid.uuid()
|
123 |
+
ans_file.write(json.dumps({"question_id": idx,
|
124 |
+
"prompt": cur_prompt,
|
125 |
+
"text": outputs,
|
126 |
+
"answer_id": ans_id,
|
127 |
+
"model_id": model_name,
|
128 |
+
"metadata": {}}) + "\n")
|
129 |
+
ans_file.flush()
|
130 |
+
ans_file.close()
|
131 |
+
|
132 |
+
if __name__ == "__main__":
|
133 |
+
parser = argparse.ArgumentParser()
|
134 |
+
parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
|
135 |
+
parser.add_argument("--model-base", type=str, default=None)
|
136 |
+
parser.add_argument("--image-folder", type=str, default="")
|
137 |
+
parser.add_argument("--question-file", type=str, default="tables/question.json")
|
138 |
+
parser.add_argument("--answers-file", type=str, default="answer.jsonl")
|
139 |
+
parser.add_argument("--conv-mode", type=str, default="llava_v0")
|
140 |
+
parser.add_argument("--num-chunks", type=int, default=1)
|
141 |
+
parser.add_argument("--chunk-idx", type=int, default=0)
|
142 |
+
parser.add_argument("--temperature", type=float, default=0.2)
|
143 |
+
parser.add_argument("--answer-prompter", action="store_true")
|
144 |
+
parser.add_argument("--single-pred-prompt", action="store_true")
|
145 |
+
args = parser.parse_args()
|
146 |
+
|
147 |
+
eval_model(args)
|
llava/eval/qa_baseline_gpt35.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Generate answers with GPT-3.5"""
|
2 |
+
# Note: you need to be using OpenAI Python v0.27.0 for the code below to work
|
3 |
+
import argparse
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
import time
|
7 |
+
import concurrent.futures
|
8 |
+
|
9 |
+
import openai
|
10 |
+
import tqdm
|
11 |
+
import shortuuid
|
12 |
+
|
13 |
+
MODEL = 'gpt-3.5-turbo'
|
14 |
+
MODEL_ID = 'gpt-3.5-turbo:20230327'
|
15 |
+
|
16 |
+
def get_answer(question_id: int, question: str, max_tokens: int):
|
17 |
+
ans = {
|
18 |
+
'answer_id': shortuuid.uuid(),
|
19 |
+
'question_id': question_id,
|
20 |
+
'model_id': MODEL_ID,
|
21 |
+
}
|
22 |
+
for _ in range(3):
|
23 |
+
try:
|
24 |
+
response = openai.ChatCompletion.create(
|
25 |
+
model=MODEL,
|
26 |
+
messages=[{
|
27 |
+
'role': 'system',
|
28 |
+
'content': 'You are a helpful assistant.'
|
29 |
+
}, {
|
30 |
+
'role': 'user',
|
31 |
+
'content': question,
|
32 |
+
}],
|
33 |
+
max_tokens=max_tokens,
|
34 |
+
)
|
35 |
+
ans['text'] = response['choices'][0]['message']['content']
|
36 |
+
return ans
|
37 |
+
except Exception as e:
|
38 |
+
print('[ERROR]', e)
|
39 |
+
ans['text'] = '#ERROR#'
|
40 |
+
time.sleep(1)
|
41 |
+
return ans
|
42 |
+
|
43 |
+
|
44 |
+
if __name__ == '__main__':
|
45 |
+
parser = argparse.ArgumentParser(description='ChatGPT answer generation.')
|
46 |
+
parser.add_argument('-q', '--question')
|
47 |
+
parser.add_argument('-o', '--output')
|
48 |
+
parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
|
49 |
+
args = parser.parse_args()
|
50 |
+
|
51 |
+
questions_dict = {}
|
52 |
+
with open(os.path.expanduser(args.question)) as f:
|
53 |
+
for line in f:
|
54 |
+
if not line:
|
55 |
+
continue
|
56 |
+
q = json.loads(line)
|
57 |
+
questions_dict[q['question_id']] = q['text']
|
58 |
+
|
59 |
+
answers = []
|
60 |
+
|
61 |
+
with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:
|
62 |
+
futures = []
|
63 |
+
for qid, question in questions_dict.items():
|
64 |
+
future = executor.submit(get_answer, qid, question, args.max_tokens)
|
65 |
+
futures.append(future)
|
66 |
+
|
67 |
+
for future in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
|
68 |
+
answers.append(future.result())
|
69 |
+
|
70 |
+
answers.sort(key=lambda x: x['question_id'])
|
71 |
+
|
72 |
+
with open(os.path.expanduser(args.output), 'w') as f:
|
73 |
+
table = [json.dumps(ans) for ans in answers]
|
74 |
+
f.write('\n'.join(table))
|
llava/eval/run_llava.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import torch
|
3 |
+
|
4 |
+
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
5 |
+
from llava.conversation import conv_templates, SeparatorStyle
|
6 |
+
from llava.model.builder import load_pretrained_model
|
7 |
+
from llava.utils import disable_torch_init
|
8 |
+
from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
|
9 |
+
|
10 |
+
from PIL import Image
|
11 |
+
|
12 |
+
import requests
|
13 |
+
from PIL import Image
|
14 |
+
from io import BytesIO
|
15 |
+
|
16 |
+
|
17 |
+
def load_image(image_file):
|
18 |
+
if image_file.startswith('http') or image_file.startswith('https'):
|
19 |
+
response = requests.get(image_file)
|
20 |
+
image = Image.open(BytesIO(response.content)).convert('RGB')
|
21 |
+
else:
|
22 |
+
image = Image.open(image_file).convert('RGB')
|
23 |
+
return image
|
24 |
+
|
25 |
+
|
26 |
+
def eval_model(args):
|
27 |
+
# Model
|
28 |
+
disable_torch_init()
|
29 |
+
|
30 |
+
model_name = get_model_name_from_path(args.model_path)
|
31 |
+
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name)
|
32 |
+
|
33 |
+
qs = args.query
|
34 |
+
if model.config.mm_use_im_start_end:
|
35 |
+
qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
|
36 |
+
else:
|
37 |
+
qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
|
38 |
+
|
39 |
+
if 'llama-2' in model_name.lower():
|
40 |
+
conv_mode = "llava_llama_2"
|
41 |
+
elif "v1" in model_name.lower():
|
42 |
+
conv_mode = "llava_v1"
|
43 |
+
elif "mpt" in model_name.lower():
|
44 |
+
conv_mode = "mpt"
|
45 |
+
else:
|
46 |
+
conv_mode = "llava_v0"
|
47 |
+
|
48 |
+
if args.conv_mode is not None and conv_mode != args.conv_mode:
|
49 |
+
print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode))
|
50 |
+
else:
|
51 |
+
args.conv_mode = conv_mode
|
52 |
+
|
53 |
+
conv = conv_templates[args.conv_mode].copy()
|
54 |
+
conv.append_message(conv.roles[0], qs)
|
55 |
+
conv.append_message(conv.roles[1], None)
|
56 |
+
prompt = conv.get_prompt()
|
57 |
+
|
58 |
+
image = load_image(args.image_file)
|
59 |
+
image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'].half().cuda()
|
60 |
+
|
61 |
+
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
|
62 |
+
|
63 |
+
stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
|
64 |
+
keywords = [stop_str]
|
65 |
+
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
|
66 |
+
|
67 |
+
with torch.inference_mode():
|
68 |
+
output_ids = model.generate(
|
69 |
+
input_ids,
|
70 |
+
images=image_tensor,
|
71 |
+
do_sample=True,
|
72 |
+
temperature=0.2,
|
73 |
+
max_new_tokens=1024,
|
74 |
+
use_cache=True,
|
75 |
+
stopping_criteria=[stopping_criteria])
|
76 |
+
|
77 |
+
input_token_len = input_ids.shape[1]
|
78 |
+
n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
|
79 |
+
if n_diff_input_output > 0:
|
80 |
+
print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
|
81 |
+
outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
|
82 |
+
outputs = outputs.strip()
|
83 |
+
if outputs.endswith(stop_str):
|
84 |
+
outputs = outputs[:-len(stop_str)]
|
85 |
+
outputs = outputs.strip()
|
86 |
+
print(outputs)
|
87 |
+
|
88 |
+
if __name__ == "__main__":
|
89 |
+
parser = argparse.ArgumentParser()
|
90 |
+
parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
|
91 |
+
parser.add_argument("--model-base", type=str, default=None)
|
92 |
+
parser.add_argument("--image-file", type=str, required=True)
|
93 |
+
parser.add_argument("--query", type=str, required=True)
|
94 |
+
parser.add_argument("--conv-mode", type=str, default=None)
|
95 |
+
args = parser.parse_args()
|
96 |
+
|
97 |
+
eval_model(args)
|
llava/eval/summarize_gpt_review.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from collections import defaultdict
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
import argparse
|
8 |
+
|
9 |
+
def parse_args():
|
10 |
+
parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
|
11 |
+
parser.add_argument('-d', '--dir', default=None)
|
12 |
+
parser.add_argument('-v', '--version', default=None)
|
13 |
+
parser.add_argument('-s', '--select', nargs='*', default=None)
|
14 |
+
parser.add_argument('-f', '--files', nargs='*', default=[])
|
15 |
+
parser.add_argument('-i', '--ignore', nargs='*', default=[])
|
16 |
+
return parser.parse_args()
|
17 |
+
|
18 |
+
|
19 |
+
if __name__ == '__main__':
|
20 |
+
args = parse_args()
|
21 |
+
|
22 |
+
if args.ignore is not None:
|
23 |
+
args.ignore = [int(x) for x in args.ignore]
|
24 |
+
|
25 |
+
if len(args.files) > 0:
|
26 |
+
review_files = args.files
|
27 |
+
else:
|
28 |
+
review_files = [x for x in os.listdir(args.dir) if x.endswith('.jsonl') and (x.startswith('gpt4_text') or x.startswith('reviews_') or x.startswith('review_') or 'review' in args.dir)]
|
29 |
+
|
30 |
+
for review_file in sorted(review_files):
|
31 |
+
config = os.path.basename(review_file).replace('gpt4_text_', '').replace('.jsonl', '')
|
32 |
+
if args.select is not None and any(x not in config for x in args.select):
|
33 |
+
continue
|
34 |
+
if '0613' in config:
|
35 |
+
version = '0613'
|
36 |
+
else:
|
37 |
+
version = '0314'
|
38 |
+
if args.version is not None and args.version != version:
|
39 |
+
continue
|
40 |
+
scores = defaultdict(list)
|
41 |
+
print(config)
|
42 |
+
with open(os.path.join(args.dir, review_file) if args.dir is not None else review_file) as f:
|
43 |
+
for review_str in f:
|
44 |
+
review = json.loads(review_str)
|
45 |
+
if review['question_id'] in args.ignore:
|
46 |
+
continue
|
47 |
+
if 'category' in review:
|
48 |
+
scores[review['category']].append(review['tuple'])
|
49 |
+
scores['all'].append(review['tuple'])
|
50 |
+
else:
|
51 |
+
if 'tuple' in review:
|
52 |
+
scores['all'].append(review['tuple'])
|
53 |
+
else:
|
54 |
+
scores['all'].append(review['score'])
|
55 |
+
for k, v in sorted(scores.items()):
|
56 |
+
stats = np.asarray(v).mean(0).tolist()
|
57 |
+
stats = [round(x, 3) for x in stats]
|
58 |
+
# print(k, stats, round(stats[1]/stats[0]*100, 1))
|
59 |
+
print(k, round(stats[1]/stats[0]*100, 1), round(stats[0] * 10, 1), round(stats[1] * 10, 1))
|
60 |
+
print('=================================')
|
llava/eval/table/answer/answer_alpaca-13b.jsonl
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"question_id": 1, "text": "Improving time management skills involves setting priorities, breaking tasks into smaller chunks, delegating tasks, avoiding multitasking, and taking regular breaks. Additionally, it is important to have a positive attitude and be realistic with goals. Making a to-do list, tracking time, and using technology to automate mundane tasks can also help improve time management skills.", "answer_id": "kEL9ifUHDeYuAXzevje2se", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
2 |
+
{"question_id": 2, "text": "The most effective ways to deal with stress include identifying and avoiding the sources of stress, developing healthy coping mechanisms, and engaging in relaxation techniques such as meditation, deep breathing, and physical exercise. Additionally, it is important to take care of one's mental and physical health, get enough rest and nutrition, and practice self-care.", "answer_id": "VcF3NrWGXhhxLkDVurNrwq", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
3 |
+
{"question_id": 3, "text": "The main differences between Python and JavaScript programming languages are in their syntax, structure, and the type of applications they are designed for. Python is a general-purpose language designed to be easy to read and write, while JavaScript is primarily used for web development. Python is a compiled language, while JavaScript is an interpreted language. Python is strongly typed, while JavaScript is not. Python is typically faster than JavaScript in terms of execution, while JavaScript is better suited to create interactive web pages.", "answer_id": "LpvtyQi9QdSgRrgGDxiGrT", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
4 |
+
{"question_id": 4, "text": "Working from home can be a great way to increase productivity, but it does require some strategic planning and tactics. Here are some tips for increasing your productivity while working from home: \n1. Establish a routine: Having a regular schedule and sticking to it can help you stay focused and motivated.\n2. Create a distraction-free zone: Eliminate distractions by turning off notifications and setting a \u201cdo not disturb\u201d mode.\n3. Take breaks: Working for long stretches without taking breaks can lead to burnout and decreased productivity.\n4. Exercise: Exercise can help improve focus and productivity.\n5. Get organized: Having a clean and organized workspace can help you stay on task.\n6. Set goals: Set short-term and long-term goals to keep you motivated and on track.\n7. Connect with colleagues: Don\u2019t be afraid to reach out to colleagues for help or advice.\n8. Track your progress: Keep track of your progress and celebrate your successes.\n9. Unplug: Taking breaks and unplugging from work can help you recharge and come back to work more refreshed.", "answer_id": "7zQm8cSTJhPtPdZdxbcfrX", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
5 |
+
{"question_id": 5, "text": "Sure. Quantum computing is a new technology that uses quantum mechanical phenomena to perform operations on data. Unlike traditional computers, which process information in binary form, quantum computers can process information in multiple states at the same time, allowing them to perform operations faster and with more accuracy. It also allows them to solve problems that classical computers struggle with. Quantum computing is still in its infancy, but it has the potential to revolutionize the computing industry and open up new possibilities in artificial intelligence, cryptography, and other fields.", "answer_id": "UrLEH82RHwqqLt2LyvYSKj", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
6 |
+
{"question_id": 6, "text": "Plant-based protein sources include legumes, grains, nuts, and seeds, and are generally lower in fat and calories than animal-based proteins. Animal-based proteins come from sources such as meat, eggs, and dairy, and are generally higher in fat and calories than plant-based proteins. Plant-based proteins are also generally lower in cholesterol and saturated fat than animal-based proteins. Additionally, plant-based proteins require fewer resources for production, such as water and land, than animal-based proteins.", "answer_id": "fpRdMTdnfirosQixuf2Gez", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
7 |
+
{"question_id": 7, "text": "Developing critical thinking skills involves actively questioning assumptions, taking multiple perspectives, analyzing information, and coming to logical conclusions. You can practice these skills by posing questions to yourself and working through problems, as well as engaging in thoughtful discussion with others.", "answer_id": "PvGmsCJSNFcvQKmPTnnd7s", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
8 |
+
{"question_id": 8, "text": "The major challenges faced by the education sector include lack of access to quality education, low educational outcomes, high costs of education, lack of adequate infrastructure and teaching materials, overcrowding in classrooms, and gender inequality. Other challenges include lack of access to technology and digital learning materials, outdated curriculum and pedagogical practices, and inadequate teacher training.", "answer_id": "n4ANAbpR3gvLPP8poPfKZ6", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
9 |
+
{"question_id": 9, "text": "The primary factors that influence consumer behavior include cultural, social, and personal factors. Cultural factors include shared beliefs, values, and customs within a society, while social factors include the influence of family, friends, and peers. Personal factors include an individual's lifestyle, attitudes, beliefs, and past experiences. Other factors such as advertising, marketing, and price also have an effect on consumer behavior.", "answer_id": "STJ36GrgQMcaUi7zaoNPit", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
10 |
+
{"question_id": 10, "text": "The most effective strategies for conflict resolution in the workplace include:\n1. Establishing open communication: Encouraging open and honest communication between parties is key to resolving any conflict.\n2. Listening actively: Active listening allows both parties to feel heard and allows for a better understanding of the other person's perspective.\n3. Identifying root causes: Understanding the underlying cause of the conflict can help to find more effective solutions that address the core issue.\n4. Focusing on interests, not positions: Focusing on the interests of both parties can help to find creative solutions that satisfy the needs of everyone involved.\n5. Developing mutual respect: Respect is essential to any successful resolution. Building mutual respect can help to create a healthier work environment.\n6. Negotiation: Working together to negotiate a mutually beneficial solution can help to create an agreement that both parties are happy with.\n7. Taking responsibility: Taking responsibility for one\u2019s words and actions can help to create a culture of accountability in the workplace.", "answer_id": "425SwYvqKPAXFGTYKXB7Cs", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
11 |
+
{"question_id": 11, "text": "The use of single-use plastic bottles has a number of negative implications on the environment. They are not biodegradable and can take hundreds of years to break down. This means they are around for a long time, releasing potentially harmful chemicals into the environment, and they can be easily mistaken for food by animals. Single-use plastic bottles can also be a source of micro-plastics, which are very small particles of plastic that are difficult to clean up and can be consumed by fish and other aquatic life, leading to health problems in humans who consume those fish. On the other hand, reusable bottles are a much more environmentally friendly choice since they do not produce any waste. They are also often made of materials that are safer for human health, such as stainless steel or glass. Reusable bottles are also more cost-effective since they do not need to be replaced as often as single-use plastic bottles.", "answer_id": "VbNAuj6KAkMdLJQXMo22oK", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
12 |
+
{"question_id": 12, "text": "When designing an inclusive and accessible public transportation system, some factors to consider include providing frequent and reliable service, accessible stops and vehicles, a variety of fare options, clear and consistent signage and information, comfortable seating and space for wheelchairs and other assistive devices, and security and safety protocols. Additionally, public transportation systems should be integrated with other services such as public parking, bike and scooter sharing, and shuttles to provide a comprehensive and accessible experience.", "answer_id": "CNGqAeu2QJbQ4QGzHJDPdq", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
13 |
+
{"question_id": 13, "text": "Governments can use fiscal and monetary policies to combat economic recessions. Fiscal policies involve the government spending money or cutting taxes in order to stimulate the economy, while monetary policies involve changing the money supply or interest rates to influence economic activity. These policies can be used to stabilize the economy, create jobs, and reduce poverty.", "answer_id": "E8w2qYqnm8iqCrSkUv62sz", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
14 |
+
{"question_id": 14, "text": "Language and cultural barriers can lead to misunderstandings and make it more difficult to communicate and form relationships in multicultural societies. Without a common language or shared culture, people can have difficulty understanding each other's perspectives and beliefs, which can lead to mistrust and a lack of understanding between different ethnic groups. To overcome these barriers, it is important to make an effort to learn about different cultures, be open-minded, and take the time to understand each other.", "answer_id": "8o5yMymfzo6kzmp9GK5MWr", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
15 |
+
{"question_id": 15, "text": "Artificial intelligence can be used to improve the quality and efficiency of healthcare delivery in a variety of ways. AI can be used to assist with diagnosing diseases by comparing symptoms and medical history to images of known diseases and medical conditions. AI can also be used to analyze laboratory results and patient records to identify potential problems and develop treatment plans. AI can be used to automate administrative tasks and reduce paperwork, as well as identify potential drug interactions and side effects. AI can also be used to automate appointment reminders, facilitate communication between doctors and patients, and even provide virtual health coaching to help patients manage their conditions.", "answer_id": "kbJVEEsdsSScEq5Y5furr7", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
16 |
+
{"question_id": 16, "text": "CRISPR-Cas9 is a recently developed gene editing technology that has revolutionized the way scientists are able to edit genomes. The technology uses a guide RNA to direct the Cas9 enzyme to a specific location in the genome, where it will cut the DNA strands. This allows for the insertion or deletion of DNA sequences, which can be used to modify the genetic code of an organism. Potential applications include treating genetic diseases, increasing crop yields, and creating pest-resistant crops. Ethically, the biggest concern is the potential misuse of the technology, which could lead to unintended consequences or be used to alter humanity in ways that could harm us.", "answer_id": "CMUL5ULZuR7YC5EPzCBN2N", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
17 |
+
{"question_id": 17, "text": "Vaccinations work by stimulating the body's immune system to protect against infectious diseases. Herd immunity is a concept whereby a population is protected against a certain disease when a certain percentage of the population has immunity to the disease, either through vaccination or having already contracted the disease. This is because when enough people are vaccinated, it reduces the spread of the disease and prevents it from spreading to those who are not immune.", "answer_id": "kEmDDQyNqSkyFihYEEBpuR", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
18 |
+
{"question_id": 18, "text": "Social media platforms can have a significant influence on how people consume and share news. By providing instant access to news sources and allowing users to easily share content, these platforms can shape the public's information landscape and the spread of misinformation. Some potential implications of this are that users may not always be able to discern between reliable and unreliable sources, and misleading information may be more easily spread and go viral. This can lead to confusion and mistrust among users, and can potentially be exploited for nefarious purposes. To mitigate these risks, it is important for users to be aware of the potential biases and agendas of the sources they are consuming, and for social media platforms to take proactive steps to curb the spread of misinformation.", "answer_id": "Qs3grQsqFVGK9EVkCkf9PB", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
19 |
+
{"question_id": 19, "text": "Cultural, social, and economic factors all play a role in determining an individual's food choices. For example, people growing up in households with limited economic means may be more likely to consume cheaper, less nutritious foods due to economic constraints. Additionally, cultural norms and values can influence food choices, such as the emphasis on large portions typical in some American cultural settings. Finally, social factors such as peer pressure and advertising can also sway an individual's food choices. By understanding and taking into account these various factors, health promotion strategies can be tailored and targeted to help individuals make healthier dietary decisions.", "answer_id": "kzZ6dKN7hkRWjqtdHr7Qns", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
20 |
+
{"question_id": 20, "text": "Natural selection is the process by which certain traits or characteristics become more or less common in a population over time. This is due to the fact that those individuals with certain advantageous traits are more likely to survive, reproduce, and pass those traits on to their offspring. Over time, this leads to the evolution and adaptation of species to their environment.", "answer_id": "DPPDG6YGFJij2GCmRL66PU", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
21 |
+
{"question_id": 21, "text": "I am a brave knight, loyal to my king and country, and I have come to serve and protect them both. I am skilled in the arts of war and peace, and I am proud to be here at the royal court.", "answer_id": "D62FjDb4nZANzPpfSfsiyn", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
22 |
+
{"question_id": 22, "text": "Avast ye maties! To the bottom of the sea we go! In search of hidden riches and fortune, we shall brave the depths and face any danger that comes our way. For glory and honor, let us set sail and find the lost Ark!", "answer_id": "k7E4NNw5kyj9DmvP5Pu2zb", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
23 |
+
{"question_id": 23, "text": "If I were a Shakespearean character, I would declare my love for someone in a soliloquy with passionate words and metaphors. I would express my love for them with descriptions of their beauty and the joy I feel when I am with them. I would confess my hopes and dreams for our future together and declare my devotion to them. I would make my love for them known in a way that could never be forgotten.", "answer_id": "KFocjVCejYrU3YmLjAqoUF", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
24 |
+
{"question_id": 24, "text": "As a superhero, my origin story is very special. I was born with special abilities that I had to keep hidden for a long time. I was scared to use them at first, but eventually I discovered that I could use my powers to protect people and make the world a better place. I've had to face a lot of challenges along the way, but I've also had a lot of help from friends and family. Now, I use my abilities to help people in need and fight for justice.", "answer_id": "dq8Sm9djS7e7y9sG9vmMJf", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
25 |
+
{"question_id": 25, "text": "If I were a time traveler from the year 3000, I would tell people about the incredible advancements in technology, such as the ability to travel through time, space, and dimensions; the development of intelligent robots and autonomous vehicles; the emergence of virtual reality and augmented reality; and the rise of artificial intelligence and machine learning.", "answer_id": "XZ8fG8e6u7CyKd2moK6abe", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
26 |
+
{"question_id": 26, "text": "The game was a nail-biter, with both teams trading leads throughout the game. With only seconds left on the clock, the home team made a bold move and passed the ball to their star player, who took the ball down the court and made a layup at the buzzer to seal the victory for the home team!", "answer_id": "oKaXHfoK4pXwrefFWXmeA8", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
27 |
+
{"question_id": 27, "text": "My signature dish is a seamless blend of traditional and modern cooking techniques. I use only the freshest ingredients to create a unique and unforgettable dining experience. The dish is a perfect balance of flavors and textures, with a subtle hint of my personal style. It is a dish that I am proud to call my own.", "answer_id": "ZwiZfvDWm7SETKNBfDk7Mb", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
28 |
+
{"question_id": 28, "text": "At the summit of Mount Everest, you are filled with a sense of accomplishment and joy. The view from the top is absolutely breathtaking - you can see for miles and miles, with the majestic Himalayan mountain range stretching out in all directions. It is a truly unforgettable experience.", "answer_id": "DxYopRe2LcTJMy3FWu6btd", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
29 |
+
{"question_id": 29, "text": "As a colonist on Mars, my daily life is filled with challenges. Finding resources and creating a sustainable environment is a priority. I face a number of challenges including extreme temperature fluctuations, limited access to resources, and the difficulty of travelling to and from the planet. Additionally, I must be mindful of my physical and mental health since I am so far from home. Despite these challenges, I am grateful to be able to explore and experience this new world.", "answer_id": "WC3UJVh4jQ5RUkpcRMU98L", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
30 |
+
{"question_id": 30, "text": "In the post-apocalyptic world, I am a survivor by necessity. I scavenge for food and supplies, and I'm always on the lookout for potential allies. I've encountered a few people who have managed to survive, and together we have formed an alliance to help each other. We hunt for food, build shelter, and work together to stay alive. We also share knowledge and skills, like how to start a fire or how to use a weapon. We look out for each other, and our alliance has strengthened our chances of survival.", "answer_id": "gTvgn6ksDjGGgdprw6AG5A", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
31 |
+
{"question_id": 31, "text": "There are a few ways to tell if a restaurant is popular among locals or mainly attracts tourists. Firstly, look at the clientele - if the majority of people there are tourists, it's likely that the restaurant is mainly attracting tourists. Secondly, check online reviews - if the reviews are mainly from tourists, then it's likely that the restaurant is popular with tourists. Finally, look at the prices - if the prices are higher than average for the area, it could be a sign that the restaurant is popular with locals. This information can be useful to get an idea of what kind of experience to expect, as locals might know about different aspects of the restaurant that tourists wouldn't.", "answer_id": "3q7giCk2BA3Ye4Tm9HC2iw", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
32 |
+
{"question_id": 32, "text": "Some subtle clues that suggest someone is pretending to understand a topic or conversation when they are actually confused or uninformed include: not asking any questions or engaging in the conversation, avoiding eye contact, fidgeting or stammering when speaking, repeating questions or comments made by other people, and nodding along without any signs of understanding.", "answer_id": "hRGsxy86v26SC4yAQS29X4", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
33 |
+
{"question_id": 33, "text": "Some people prefer the tactile and visual experience of using a paper map, and others may prefer to ask for directions from locals in order to get a more personalized experience. Additionally, GPS devices and smartphone apps can sometimes be inaccurate or have limited battery life, while a paper map or asking for directions may be more reliable.", "answer_id": "3n49A5ggJERfXYrLns3ZeU", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
34 |
+
{"question_id": 34, "text": "One way to tell if someone is genuinely interested in a conversation is to observe their body language and facial expressions. Are they making an effort to maintain eye contact? Are they leaning in and actively listening to what you are saying? Do they ask questions and provide relevant answers? If so, it is likely that they are genuinely interested in the conversation. Additionally, if someone is simply being polite, they may not ask questions or engage in the conversation as much, and may not make an effort to maintain eye contact.", "answer_id": "ErCpFtPuYVru4oTTk4WrxG", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
35 |
+
{"question_id": 35, "text": "Shopping at a small, locally-owned business can benefit the local community by keeping money in the area and supporting local jobs. Additionally, these businesses tend to offer a more personal experience and higher quality products than large chain stores. Furthermore, shopping at small businesses can help create a sense of place and community, and can help maintain a unique local culture.", "answer_id": "PTNoCRMZWoJk8HaKX7fW45", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
36 |
+
{"question_id": 36, "text": "There are several ways to assess the credibility of a source of information. Firstly, you can look at the author's credentials and experience in the relevant field. Secondly, you can check the source of the information, such as whether it is from a reliable website or publication. Thirdly, you can look at the evidence presented in the article and whether it is backed up by reliable sources. Finally, you can read other people's reviews or comments about the article to get a better idea of its credibility.", "answer_id": "n8cFs9KENNwZ4z3SR4iXTr", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
37 |
+
{"question_id": 37, "text": "Some people enjoy the sensation of being scared because it can create a feeling of excitement, enhance their emotional state, and provide a sense of thrill and adventure. Others may avoid these experiences because they are afraid of the unknown, or because they don't enjoy the feeling of being scared. Everyone is different, and some people may be more attracted to thrilling and exciting activities while others may prefer calmer activities.", "answer_id": "GzxL9mmEK5RzKqRbqBMUVC", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
38 |
+
{"question_id": 38, "text": "By observing the behavior of others in a social situation, one can gain clues as to the cultural norms and expectations of a group. For example, watching how people interact with one another, how they address each other, how they handle disagreements, and how they go about solving problems can provide insight into the cultural values of the group. Additionally, observing body language, facial expressions, and other nonverbal cues can offer clues as to the accepted norms of behavior in a particular culture.", "answer_id": "QpoHFgb9SzwuaXQQUuBUQD", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
39 |
+
{"question_id": 39, "text": "It is an interesting question, and one that has been debated for quite some time. I think there are valid arguments on both sides. On the one hand, exploring space is a remarkable human endeavor and could lead to tremendous scientific discoveries and technological advances. On the other hand, there are many pressing issues that need to be addressed on Earth, such as poverty, inequality, and climate change. Each side would argue that their cause is more important, and it is ultimately up to each individual to decide which one they feel more strongly about.", "answer_id": "Fxe6MS4GpP3LMDUwzY2cPA", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
40 |
+
{"question_id": 40, "text": "It is important to strike a balance between job creation and technological progress. Automation can increase efficiency and productivity, but it should not come at the expense of job security and people's livelihoods. Therefore, it is essential to create policies and initiatives that promote both job creation and technological progress. This could include investing in training and education to ensure that people have the skills necessary to compete in the modern job market, as well as incentivizing companies to invest in technologies that create jobs and stimulate economic growth.", "answer_id": "mJiQ2FGR4Xb8kmhZjharkw", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
41 |
+
{"question_id": 41, "text": "On average, the human eye blinks about 20 times per minute, or about 14,400 times per day. In a lifetime, this means that the average human will blink roughly 50 million times. This may seem like a lot, but it serves an important purpose. Blinking helps to keep the eyes lubricated and prevents them from drying out. It also helps to spread tears over the surface of the eye, washing away foreign particles and keeping the eye clean. Additionally, blinking helps to reduce the risk of eye infections by helping to clear away bacteria and other foreign substances.", "answer_id": "6Kph4RHRKEZ4YUoaHuEhBv", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
42 |
+
{"question_id": 42, "text": "A grain of salt contains 102.98 atoms. To calculate this, we first need to know the atomic weight of a single atom. The atomic weight of an atom is the number of protons and neutrons in the nucleus of an atom, which determines its atomic mass. The atomic weight of a single atom of salt is 58.943 g/atom. Therefore, a grain of salt contains 102.98 atoms, which is equivalent to 60.98 grams.", "answer_id": "WBwpBQwhxn5kxLDb7MschC", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
43 |
+
{"question_id": 43, "text": "Approximately 2000 lightning strikes occur on Earth each day. This is because the atmospheric conditions must come together in a particular way for a lightning strike to occur. Firstly, a large amount of electric charge must accumulate in the atmosphere, typically in a storm system. Then, the air must become increasingly unstable, leading to rising air and a strong updraft. This causes an electric breakdown of the air, and then an exchange of electricity occurs from the cloud to the ground, forming a lightning bolt. As these conditions are necessary for a lightning strike to occur, about 2000 lightning strikes happen on Earth each day.", "answer_id": "kf8nahQVci2ZLaYikagB7U", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
44 |
+
{"question_id": 44, "text": "It would take about 10 million balloons to lift a house like in the movie Up. The balloons would need to be filled with helium in order for the house to be lifted. Each balloon would need to be filled with about 89.1 cubic feet of helium in order to lift 500 pounds. To calculate how many balloons would be needed, simply multiply the weight of the house (264.72 lbs) by the number of cubic feet of helium needed to lift 500 pounds (89.1). Therefore, it would take 10 million balloons to lift a house like in the movie Up.", "answer_id": "Gptgryd4o2dC8V5aqRmeJJ", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
45 |
+
{"question_id": 45, "text": "According to a 2017 study, over 6.3 billion text messages are sent globally in a minute. This number is expected to grow to 6.8 billion by 2021. This is due to the increasing number of smartphones, better network coverage, and the increasing popularity of texting for communication. Furthermore, the increase in smartphones and better network coverage has also led to the rise of instant messaging applications such as WhatsApp, which has further increased the amount of text messages sent per minute.", "answer_id": "RfBWW8ZhdfTuTMb454Un4o", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
46 |
+
{"question_id": 46, "text": "It is estimated that over 1 billion words are spoken on Earth every day. This is due to the fact that humans are highly social creatures, and language is the primary way that we communicate with each other. In addition, language is a key component of all cultures and societies, and is constantly being used to exchange information and ideas. Furthermore, language is integral to the way we think and process information, as well as to how we connect with each other on an emotional level. Therefore, it is not surprising that so many words are spoken on a daily basis.", "answer_id": "neGgLYm47JvqN8qkw8VeoW", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
47 |
+
{"question_id": 47, "text": "On average, about 100 snowflakes fall per second during a typical winter. This number can vary drastically depending on the location and severity of the storm. Snowflakes are made up of tiny ice crystals, which form in the atmosphere when the temperature is below freezing and there is moisture in the air. As air moves and mixes, the crystals come together to form snowflakes. The shape, size and number of snowflakes depend on the atmospheric conditions, such as temperature and humidity, when they form.", "answer_id": "KSePbzLwsYasR3aui4HU8h", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
48 |
+
{"question_id": 48, "text": "It is estimated that all the books ever written would total about 100 billion pages. This is based on the fact that there are roughly 130 million unique titles in existence, and each book on average contains about 200 pages. This means that if all the books were printed out, the stack of paper would be about 400 miles high. Additionally, if each book was represented by a single page, the total number of pages would be approximately 100 billion. Throughout history, books have served as a way to preserve knowledge and information, as well as to share ideas and stories with others. As technology advances, books are increasingly available in digital formats, which allows for easier access and storage. Regardless of the medium, books will continue to be a valuable source of information for generations to come.", "answer_id": "SWWfLpP2gnWHLbYRZXNsW3", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
49 |
+
{"question_id": 49, "text": "Since the beginning of life, the Earth has orbited the Sun a total of 4.3 billion times. This is calculated by multiplying the average length of a year (365.25 days) by the number of years since life began (approximately 4.5 billion years). Life on Earth relies on the Sun's energy reaching the Earth's atmosphere and being redistributed by the atmosphere, the land surface, and the oceans. Without this energy, life on Earth would not be possible. Therefore, the Earth's orbit around the Sun is integral to the sustainability of life on Earth.", "answer_id": "WaBwKYhs7eAG22qCGLH2j3", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
50 |
+
{"question_id": 50, "text": "Since the beginning of recorded music, countless songs have been recorded. The exact number is difficult to quantify, but conservative estimates put the number of songs recorded throughout history at several hundred thousand. This includes songs from all genres, from classical music to modern pop. The invention of recording technology has allowed musicians to preserve their music and share it with the world. This has enabled generations to access and enjoy music from all eras. With the rise of digital music, the number of recordings available has grown exponentially, and new songs are being recorded all the time.", "answer_id": "MfMJeE9om7qyBbqopHouf4", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
51 |
+
{"question_id": 51, "text": "If the Internet had been invented during the Renaissance period, it likely would have been used primarily by the wealthy and powerful to access knowledge and information, as there was no widespread system of public education at the time. It also would have been a much slower and more expensive process to access information, as there were no high-speed connections or digital networks available.", "answer_id": "TjWPRDM6JFpPF8xeRptCKb", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
52 |
+
{"question_id": 52, "text": "If the Aztecs had successfully repelled the Spanish conquistadors, their culture and way of life would have likely remained intact. The Spanish would not have been able to colonize Mexico, and their influence on the region would have been much less. It's likely that the Aztecs would have continued to expand their empire and control the areas that are now parts of Central and South America. Additionally, the Native American populations in the area would have likely been able to maintain more of their traditional customs and beliefs, and their languages may not have been as heavily impacted by Spanish.", "answer_id": "iR2tYTsWTFENEP7Qy9RgtX", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
53 |
+
{"question_id": 53, "text": "If the Black Death had not occurred in the 14th century, the population of Europe may have continued to grow unabated, leading to more rapid urbanization and economic growth. It's likely that the Renaissance would have begun sooner, and the scientific revolution may have occurred earlier as well. Without the Black Death, there may have been no need for the industrial revolution, or at least it may have occurred later and on a smaller scale. Additionally, the spread of diseases may have been slower, leading to greater population density in certain areas.", "answer_id": "AZdS8xAi3GwAmCqkNSnnwv", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
54 |
+
{"question_id": 54, "text": "If Isaac Newton had focused on biology instead of physics, he might have made important discoveries in the field of medicine and biology. He may have studied the human body and its functions in greater detail, and possibly even made important breakthroughs in treating diseases. He may also have studied the behavior of different animals in the wild and made important observations about their habits and habitats.", "answer_id": "VmwifF2JD5osYKDTqv2ZRS", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
55 |
+
{"question_id": 55, "text": "If the Beatles had never formed as a band, the music world would have been drastically different. The British Invasion of the 1960s might not have happened, or it could have happened in a much different way. The cultural impact of the Beatles, including their influence on other musicians, fashion and society, may never have existed.", "answer_id": "mUL5UPj3qDGaCriEjL2U3B", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
56 |
+
{"question_id": 56, "text": "If Alan Turing had not cracked the Enigma code during World War II, the outcome of the war could have been very different. It's possible that the Allies would have eventually defeated the Axis powers, but it likely would have taken much longer and cost many more lives. With the Enigma code cracked, the Allies were able to gain a critical advantage over the Axis powers and ultimately win the war.", "answer_id": "dVdwUoVrAQJDuWxiodykiw", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
57 |
+
{"question_id": 57, "text": "If the Suez Canal had never been constructed, it would have major implications for international trade and navigation. The Suez Canal is an important shipping route connecting the Mediterranean Sea to the Red Sea, which in turn connects to the Indian Ocean. This allows for a shorter and more direct route for ships traveling between Europe and Asia, reducing travel time significantly. Without the Suez Canal, ships would have to travel around the Cape of Good Hope at the southern tip of Africa, which is much more time consuming and costly. This would make international trade more difficult and expensive, and could hinder global economic growth.", "answer_id": "EiNn9jjfy7dga6xfCtLtF8", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
58 |
+
{"question_id": 58, "text": "If the Maya civilization had not mysteriously collapsed, it is likely that their influence and power would have continued to grow. They would have likely continued to build large cities and monuments, develop their writing and astronomical systems, and expand their trade networks. Additionally, their advances in agriculture and water management may have led to increased food production and a larger population.", "answer_id": "eqG9f2R9hXVyZrZMpcqAYq", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
59 |
+
{"question_id": 59, "text": "If Christopher Columbus had not discovered the Americas, the indigenous people of the Americas would have continued to live in relative isolation from the rest of the world. It is likely that European colonization and the consequences of that would have been delayed, leading to a different modern world. It is also possible that other Europeans would have made the voyage across the Atlantic and discovered the Americas instead.", "answer_id": "ex42CLEzDVC2TxAvMCtYQJ", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
60 |
+
{"question_id": 60, "text": "If Vincent van Gogh had been successful during his lifetime, he may have been able to achieve the recognition and financial stability that he desired. He may have been able to travel and gain more exposure for his artwork, as well as connect with other like-minded individuals. He may have been able to create more artwork and expand his portfolio. He may also have been able to find a mentor or patron who could have helped him further his career.", "answer_id": "RfFANSwHeCm3Pqe2AJ86Dk", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
61 |
+
{"question_id": 61, "text": "The program should read a text file line by line. It should then check each line for the specified word, counting up each time the word is found. Finally, it should output the number of occurrences of the word.", "answer_id": "nzxdtWmgQhRkHeVAanVogZ", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
62 |
+
{"question_id": 62, "text": "The following Python function can be used to find the longest common subsequence of two input strings using dynamic programming:\ndef longest_common_subsequence(s1, s2):\n\"\"\"This function finds the longest common subsequence of two strings.\n\n Arguments:\n s1 {string} -- The first string\n s2 {string} -- The second string\n\n Returns:\n {int} -- The length of the longest common subsequence\n\n\"\"\"\n n1 = len(s1)\n n2 = len(s2)\n \n # Creating an empty matrix to store the results\n results = [[None]*(n2 + 1) for _ in range(n1 + 1)]\n \n # Initializing the first row to 0\n for i in range(n1 + 1):\n results[i][0] = 0\n \n # Filling the first row with 0\n for i in range(n1):\n results[i][0] = 0\n \n # Calculating the longest common subsequence\n for i in range(n1 + 1):\n for j in range(n2 + 1):\n if (i == 0 or j == 0):\n results[i][j] = 0\n elif (s1[i-1] == s2[j-1] and (results[i-1][j-1] < (i-j) or results[i-1][j-1] == 0)):\n results[i][j] = max(results[i-1][j-1] + 1, i+j)\n else:\n results[i][j] = 0\n \n return results[n1][n2]", "answer_id": "kRo6z5qmeiSM8Xz6BPJHLC", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
63 |
+
{"question_id": 63, "text": "The following regular expression can be used to validate an email address in Python:\n^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$", "answer_id": "7kj8odEE79UBdR4pT6u6TY", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
64 |
+
{"question_id": 64, "text": "Here is a program to find the nth Fibonacci number using dynamic programming:\n1. Set a variable 'n' to the nth Fibonacci number.\n2. Set two other variables 'a' and 'b' to 0 and 1 respectively.\n3. Set a loop from 0 to (n-1).\n4. In the loop, set another loop from 0 to (a+b).\n5. In the inner loop, if 'a' is even and 'b' is odd, set 'a' to a+1 and 'b' to b+1.\n6. If 'a' is odd and 'b' is even, set 'a' to a-1 and 'b' to b-1.\n7. After the inner loop, set 'n' to 'a' and break out of the outer loop.\n8. Print the value of 'n'.\nThis program uses the idea of dynamic programming to compute the nth Fibonacci number. The program first sets two auxiliary variables 'a' and 'b' to 0 and 1 respectively. Then it sets a loop from 0 to (n-1). In the loop, it sets another loop from 0 to (a+b). In the inner loop, if 'a' is even and 'b' is odd, it sets 'a' to a+1 and 'b' to b+1. If 'a' is odd and 'b' is even, it sets 'a' to a-1 and 'b' to b-1. After the inner loop, it sets 'n' to 'a' and breaks out of the outer loop. Finally, it prints the value of 'n'.", "answer_id": "UULZ4GLz9j9ot7tN7x3ySD", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
65 |
+
{"question_id": 65, "text": "A binary search algorithm can be used to find a specific element in a sorted array by repeatedly splitting the array in half and examining the middle element to determine whether the target element is greater than, less than, or equal to it. If the target element is found, the algorithm immediately returns the index of the element. If it isn\u2019t, the array is split in half, with one half discarded based on the comparison. The process is repeated until the element is found or the array is completely searched.", "answer_id": "Rh8P3bxSJx4p6nCZkSZBrh", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
66 |
+
{"question_id": 66, "text": "Implementing a queue using two stacks in Python is relatively simple. First, you need to create two stacks, which will act as the queue. Next, you need to define an enqueue and dequeue method, which will add and remove items from the queue. Finally, you need to create an instance of the queue class and start using it.", "answer_id": "R3aYUUtY46ht4ToU8US96x", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
67 |
+
{"question_id": 67, "text": "The program can be implemented using two nested for loops. The inner loop can be used to iterate through the elements of the first array, while the outer loop can be used to iterate through the elements of the second array. If the two elements being compared are the same, the program should mark those elements with a \"found\" flag and move on. Once the inner loop completes, the program should check if the \"found\" flag is set for all elements in the second array. If it is, then the common elements have been found.", "answer_id": "MQJFbH3KoFaSMmPgmK9LPa", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
68 |
+
{"question_id": 68, "text": "The value of f(2) is 1.", "answer_id": "dBWqryuzG6BHRdPgrWWpbH", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
69 |
+
{"question_id": 69, "text": "x = 6.", "answer_id": "W74byrCbE7XiJWjcPTZS7x", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
70 |
+
{"question_id": 70, "text": "The length of the line segment is 8 units.", "answer_id": "7WC5quGKdv8hADCxWaWZ57", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
71 |
+
{"question_id": 71, "text": "Yes, I can help you write a formal email to a potential business partner proposing a joint venture. Here's a template that you can use:\nSubject Line: Proposing a Joint Venture\nDear [Name],\nI am writing to propose a joint venture between our two companies. I believe that by combining our resources, we can create a powerful and profitable partnership. Our combined experience and knowledge in the industry will help us to create something unique and special.\nI am confident that our partnership will bring mutual benefits, such as increased profits and access to new markets. Please let me know if you are interested, and we can discuss the details.\nThank you for your time and consideration. I look forward to hearing from you soon.\nSincerely,\n[Your Name]", "answer_id": "AZER7D3RKZ9F9SXHiMjdqM", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
72 |
+
{"question_id": 72, "text": "Yes, I can help you write a resignation letter to your current employer. The letter should be written in a polite and professional tone and should express your appreciation for the opportunities and experiences you have gained during your employment. It should also include a clear statement of your intention to resign and the date of your last day. Here is a template you can use:\nDear [Employer],\nI am writing to inform you of my intention to resign from my position as [Position], effective [date]. I have appreciated my time here and I am grateful for the opportunities and experiences I have gained during my employment. \nI thank you for your support and understanding.\nSincerely, [Your Name]", "answer_id": "MSrdDafr77UvSHCnsPMSP3", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
73 |
+
{"question_id": 73, "text": "The letter of recommendation should be structured in a formal manner. Begin by introducing the student and explaining your relationship to them. Describe their qualifications, including their academic performance and relevant skills, and mention any particular accomplishments or awards they have received. Explain why the student is a strong candidate for the program and how they will make a positive contribution. End the letter by reaffirming your recommendation and offering your contact information for further questions.", "answer_id": "hxkjUkDkXhGP78Vo74B4WE", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
74 |
+
{"question_id": 74, "text": "Dear valued customers, \nWe are excited to announce the launch of our new software solution \u2013 a revolutionary product designed to transform the way you do business! Our new software is an intuitive and powerful tool that can help you streamline processes, save time and money, and increase overall productivity. \nAt [Company Name], we are committed to helping you stay ahead of the competition, and we believe that our new software solution is the perfect tool to help you achieve your goals. Our experienced team of professionals has worked hard to ensure that this product meets the highest standards of quality, and we are confident that it will make a real difference for your business. \nWe invite you to join us in this journey of transformation and discover how our new software can help you achieve your vision. Sign up now and get a free demo to learn more about this revolutionary product. \nThank you for your trust and support.", "answer_id": "hv9jpG9PxeJ9AnFdNzH3Jv", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
75 |
+
{"question_id": 75, "text": "mer,\nWe apologize for the delay you experienced with your order and thank you for your patience. We have identified the cause of the issue and have taken steps to ensure it does not happen again. We value your business and hope to be able to serve you better in the future.", "answer_id": "X5BVHT5pFpWxB88SzERrEG", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
76 |
+
{"question_id": 76, "text": "The video will explore the history and cultural significance of jazz music. We will begin by discussing its roots in West African cultural music traditions and its journey through the 19th and 20th centuries, focusing on the key musicians, composers, and genres that have defined the genre. We will also explore the impact of jazz on popular culture, such as in film and literature, as well as its influence on other musical styles. Finally, we will examine the legacy of jazz and its continued importance today.", "answer_id": "Af2SufKwvDBriNXnZX9UoP", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
77 |
+
{"question_id": 77, "text": "I have composed a travel blog post about my recent trip to Hawaii. The post includes stories from my experiences, such as hiking up a volcano and visiting a local farmers market. I also included plenty of photos to give the reader a sense of place. I highlighted some of the unique cultural experiences, such as an evening spent listening to live Hawaiian music, as well as must-see attractions, like the beautiful beaches and lush rainforests. I ended the post with a personal reflection on the trip and why it was so special to me.", "answer_id": "HVnqVdSGRwQfqSsiYxtTTn", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
78 |
+
{"question_id": 78, "text": "I recently watched the science fiction movie \"The Future is Blue\" and it was an incredible cinematic experience. The movie follows the story of a scientist who travels back in time to save the world from a deadly virus. The plot was interesting and engaging, with plenty of twists and turns to keep you guessing. The characters were also well developed and likable, making it easy to get invested in their story. The special effects were stunning and added a lot of visual oomph to the movie. All in all, I highly recommend this movie - it's an exciting and thought-provoking ride from start to finish.", "answer_id": "8hcmmjzyMyhFJ8ZwqJJfvw", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
79 |
+
{"question_id": 79, "text": "The podcast episode would begin by introducing the topic of how streaming platforms have changed the music industry. This would include discussing the impact of streaming services like Spotify and Apple Music on music consumption, the effect on musicians' ability to make money, and how streaming has changed the way people find and discover music. The episode would then explore the effects of streaming on the music industry from multiple angles, such as the impact on musicians and songwriters, how streaming has changed the sound of popular music, and the implications for the music industry's future. Finally, the episode would conclude by discussing how streaming has both benefited and hurt the music industry, and what the future holds for music consumption.", "answer_id": "CU6gGfJqQmwZNxEG2n2Uda", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
80 |
+
{"question_id": 80, "text": "The symphony concert was a wonderful experience. The orchestra played beautifully, with a rich and soulful sound that filled the hall. The audience was captivated, and you could feel the energy of the music in the air. The performance was a perfect balance of classic and modern pieces, and the conductor's passion was contagious. The atmosphere was festive, and the encore performance was a perfect way to end the evening. All in all, it was a brilliant concert and a delight for the ears.", "answer_id": "5SWbCoLV6NRrq5RtWQK4jp", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
llava/eval/table/answer/answer_bard.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llava/eval/table/answer/answer_gpt35.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llava/eval/table/answer/answer_llama-13b.jsonl
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"answer_id": "J3UA6eGXGyFeUGqGpP3g34", "model_id": "llama-13b:v1", "question_id": 1, "text": "The following are some strategies that may help you improve your time management skills:\n1. Make a list of priorities.\n2. Create a detailed schedule that includes time for regular tasks, meetings, breaks, and appointments.\n3. Set specific goals.\n4. Use a calendar or other scheduling software.\n5. Develop an agenda for each meeting to ensure that it stays on track.\n6. Stick to your schedule and don't get distracted.\n7. Make use of your free time.\n8. Make time for exercise, socializing, and relaxing.\n9. Keep a log of your time usage.\n10. Don't get overwhelmed by what you have to do.\n11. Take care of your health.\n12. Manage stress.\n13. Get enough sleep.\n14. Strike a balance.\n15. Always be prepared.\n16. Give yourself rewards.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
2 |
+
{"answer_id": "jZVDNTtFnNNd6vgkGjzAGa", "model_id": "llama-13b:v1", "question_id": 2, "text": "People experience stress from time to time, and it can be caused by a wide range of factors, including work, family, friends, money, and health. Stress can be a motivating factor, but it can also be harmful. Here are some effective ways to deal with stress:\n1. Taking time to relax. It's important to take time for yourself to relax and reduce your stress. Doing so doesn't have to be expensive or time-consuming. It could simply be reading a book, taking a walk, or listening to music.\n2. Prioritizing your responsibilities. It's important to prioritize your responsibilities and focus on the things that are most important. You should also be able to say no to tasks that you don't have time for.\n3. Getting enough sleep. Getting enough sleep can be very helpful for dealing with stress. If you're not getting enough sleep, try to go to bed earlier and develop a routine.\n4. Eating a healthy diet. It's important to eat a healthy diet, which can help reduce stress and make you feel better about yourself.\n5. Exercising. Exercising is a great way to relieve stress. It can also help reduce the harmful effects of stress and improve your overall health.\n6. Learning how to manage your time. Learning how to manage your time can help you avoid unnecessary stress. You should also try to set realistic goals and not take on too much at once.\n7. Improving your social network. Having a good social network can be very helpful for dealing with stress. You should try to spend time with people that you trust and confide in.\n8. Developing a support network. A support network is a group of people who can offer you help, guidance, and support. They can be friends, family members, or even a professional therapist.\n9. Getting professional help. If you're struggling to deal with stress, it can be beneficial to get professional help. A therapist or counselor can help you to understand your stress better and develop strategies for dealing with it.\n10. Talking to someone. You can also talk to someone about how you feel and ask for their advice. It can be helpful to get a fresh perspective from someone and let things off your chest.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
3 |
+
{"answer_id": "NbSxRXf8Yuvjr7HC6AvrPw", "model_id": "llama-13b:v1", "question_id": 3, "text": "Main differences between Python and JavaScript are:\n1. Python is an object-oriented programming language. JavaScript is a scripting language.\n2. Python is a compiled language. JavaScript is an interpreted language.\n3. Python is a general purpose programming language. JavaScript is a special purpose programming language.\n4. Python runs on the JVM in Java Applets. JavaScript runs on the browsers.\n5. Python is a powerful programming language. JavaScript is a simple programming language.\n6. Python is a good language for web development. JavaScript is a good scripting language.\n7. Python is a high level programming language. JavaScript is a low level programming language.\n8. Python is a dynamic programming language. JavaScript is a static programming language.\n9. Python is a strict programming language. JavaScript is a relaxed programming language.\n10. Python has a large standard library. JavaScript has a small standard library.\n11. Python is very stable. JavaScript has many releases.\n12. Python is a simple language. JavaScript is a complex language.\n13. Python is easy to learn. JavaScript is hard to learn.\n14. Python is a secure programming language. JavaScript is not a secure programming language.\n15. Python is a multi paradigm language. JavaScript is a single paradigm language.\n16. Python is good for data science. JavaScript is not good for data science.\n17. Python is a general purpose programming language. JavaScript is a special purpose programming language.\n18. Python is a programming language. JavaScript is a scripting language.\n19. Python is a compiled programming language. JavaScript is an interpreted programming language.\n20. Python is a strongly typed programming language. JavaScript is a weakly typed programming language.\n21. Python supports object-oriented programming. JavaScript supports prototype-based programming.\n22. Python supports type inheritance. JavaScript supports type prototyping.\n23. Python supports operator overloading. JavaScript does not support operator overloading.\n24. Python comes with a large standard library. JavaScript comes with a small standard library.\n25. Python is a very stable programming language. JavaScript is a simple programming language.\n26. Python is a powerful programming language. JavaScript is a simple programming language.\n27. Python is a high level programming language. JavaScript is a low level programming language.\n28. Python is a dynamic programming language. JavaScript is a static programming language.\n29. Python has a very active community. JavaScript has a small community.\n30. Python is a general purpose programming language. JavaScript is a special purpose programming language.\n31. Python is a multi-paradigm language. JavaScript is a single paradigm language.\n32. Python is a secure programming language. JavaScript is not a secure programming language.\n33. Python is a general purpose programming language. JavaScript is a simple programming language.\n34. Python is a scripting language. JavaScript is a compiled language.\n35. Python is a compiled language. JavaScript is a scripting language.\n36. Python is a general purpose programming language. JavaScript is a web scripting language.\n37. Python has a large standard library. JavaScript has a small standard library.\n38. Python is a very stable programming language. JavaScript is a very dynamic programming language.\n39. Python is a high level programming language. JavaScript is a low level programming language.\n40. Python is easy to learn. JavaScript is hard to learn.\n41. Python is a general purpose programming language. JavaScript is a simple programming language.\n42. Python is a multi-paradigm programming language. JavaScript is a single paradigm language.\n43. Python is a compiled language. JavaScript is an interpreted language.\n44. Python is a dynamic programming language. JavaScript is a static language.\n45. Python is a general purpose programming language. JavaScript is a web scripting language.\n46. Python is a secure programming language. JavaScript is not a secure programming language.\n47. Python is a general purpose programming language. JavaScript is a static programming language.\n48. Python is a general purpose programming language. JavaScript is a weakly typed programming language.\n49. Python is a general purpose programming language. JavaScript is a dynamic programming language.\n50. Python is a general purpose programming language. JavaScript is a multi paradigm language.\n51. Python is a general purpose programming language. JavaScript is a simple programming language.\n52. Python is a general purpose programming language. JavaScript is a scripting language.\n53. Python is a general purpose programming language. JavaScript is a special purpose programming language.\n54. Python is a multi-paradigm programming language. JavaScript is a", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
4 |
+
{"answer_id": "E9zrPDdncYvAebEaxiPXfx", "model_id": "llama-13b:v1", "question_id": 4, "text": "Here are some ideas to help you become more productive when working from home:\n1. Set up a proper workspace: It's important to have a separate space for working, so that you don't feel tempted to lounge around or do other things when you should be working.\n2. Block out distractions: It's important to focus on your work and not get distracted. You can block out distractions by turning off your phone, closing your door, or working in a separate room away from household noises.\n3. Take breaks: It's important to take breaks and get a change of scenery during the day. This can help you be more productive and improve your concentration.\n4. Avoid multitasking: You should avoid trying to do multiple tasks at the same time, as this can make you less productive and reduce your overall quality of work.\n5. Have an organized schedule: Having a clear schedule can help you stay on track and be more productive. This can include things such as setting specific working hours, creating a to-do list, and scheduling meetings with colleagues.\n6. Set goals: You should set specific goals for yourself to help you stay motivated and on track. This can include things such as writing an article for a certain time period and meeting a certain word count.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
5 |
+
{"answer_id": "36TYpADDHAfRvtvKitfSXR", "model_id": "llama-13b:v1", "question_id": 5, "text": "Quantum computing is a type of computing that uses quantum-mechanical phenomena, such as superposition and entanglement, in its operation. In simple terms, a quantum bit or qubit is used to represent information, and a number of qubits can be used to represent a classical bit. A quantum computer can do things that a classical computer cannot do, such as factoring a large number more efficiently.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
6 |
+
{"answer_id": "8vtLLYRM352mZ8PXXbwzBT", "model_id": "llama-13b:v1", "question_id": 6, "text": "The number one difference between plant-based and animal-based protein sources is that animal-based sources are complete protein sources, while plant-based sources are not. Animal-based sources have all of the essential amino acids, while plant-based sources do not. Animal-based protein sources include meat, poultry, eggs, milk, and cheese. Plant-based protein sources include legumes, seeds, and nuts.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
7 |
+
{"answer_id": "mrCniyWYvAtxWhgMdXq2BG", "model_id": "llama-13b:v1", "question_id": 7, "text": "Critical thinking is a vital skill to have, but it can be difficult to develop. Here are some ways you can start developing your critical thinking skills:\n1. Analyze and interpret: When you're presented with information, try to analyze and interpret it on your own terms. When you start to analyze and interpret information on your own, it will become easier to do so over time.\n2. Assess your information: When you receive new information, try to assess whether it's valid or reliable. This is important for evaluating and forming your own opinions.\n3. Use your intuition: Use your intuition to complement your critical thinking skills. When you pay attention to your gut feelings, they'll usually tell you when you need to double-check your information.\n4. Learn from your mistakes: When you make a mistake, try not to beat yourself up about it. Instead, learn from your mistakes and use them to your advantage.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
8 |
+
{"answer_id": "S8fmdbvpvbgTUyDU9V3M3N", "model_id": "llama-13b:v1", "question_id": 8, "text": "One of the major challenges faced by the education sector today is the lack of skilled teachers. Finding enough qualified teachers to work in the education sector, and in the numbers that are required, is one of the biggest problems facing the education sector, largely due to the lack of funding for teacher training and the low salaries that teachers receive.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
9 |
+
{"answer_id": "KmuNjvNKRyaFwaBZTLArcG", "model_id": "llama-13b:v1", "question_id": 9, "text": "Consumer behavior can be influenced by a variety of factors, including:\n1. Psychological factors, such as perception, beliefs, attitudes, personality, motivation, emotion, and social influences;\n2. Social factors, such as family, friends, and reference groups;\n3. Cultural factors, such as reference groups, family, and cultural roles;\n4. Demographic factors, such as age, gender, occupation, income, and education level;\n5. Economic factors, such as income, wealth, and spending patterns;\n6. Personal factors, such as lifestyle, job, environment, and individual traits;\n7. Situational factors, such as location, time, and situation; and\n8. Market factors, such as competition, prices, and promotional activity.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
10 |
+
{"answer_id": "HNNT9k5htiMeRwNSD6dcz4", "model_id": "llama-13b:v1", "question_id": 10, "text": "Conflict resolution strategies in the workplace can range from simple to more involved, depending on the situation:\n1. Avoidance: Avoiding the issue can be an effective strategy in the short term, but it can lead to more tension and conflict in the long term.\n2. Compromise: Compromise can be a good strategy for solving an issue, but it can require a give-and-take and lead to the loss of respect between the parties involved.\n3. Negotiation: Negotiation is a good strategy for solving an issue when both parties are willing to work together and are open to the idea of changing their views and positions.\n4. Mediation: Mediation can be a good way to resolve an issue as it can be more impartial and less adversarial than other strategies.\n5. Collaboration: Collaboration can be a good strategy for solving an issue as it can help both parties to develop new perspectives and solutions.\n6. Formal resolution: Formal resolution can be a good strategy for resolving an issue, especially if the situation is particularly complex or difficult.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
11 |
+
{"answer_id": "ChXjhDDikxU9FV3CADs6Ym", "model_id": "llama-13b:v1", "question_id": 11, "text": "1. Single-use plastic bottles can take between 450 and 1,000 years to decompose, so they can cause serious environmental problems. Single-use plastic bottles can end up in the ocean, either as litter or as part of the Great Pacific Garbage Patch. 2. Single-use plastic bottles can be a big source of plastic debris in the ocean, which can be ingested by marine animals, killing them or causing them serious health problems. 3. In addition to polluting the environment, single-use plastic bottles can also be a serious health risk to humans. Single-use plastic bottles can pose a risk to human health because of the chemicals used in their manufacturing.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
12 |
+
{"answer_id": "5wsPnN3VmmSkahgugFNo7u", "model_id": "llama-13b:v1", "question_id": 12, "text": "A public transportation system should be designed to accommodate the needs of all riders, including people with disabilities. This requires planning and design that considers the needs and abilities of people with sensory, physical, developmental, and mental disabilities. The system should be accessible to and usable by people with disabilities, including those who use mobility devices, people who are visually impaired, and people who are deaf or hard-of-hearing. For example, the system should have accessible stops, routes, and vehicles, as well as ramps and elevators to help people board. It should also offer a wide range of accessible routes, including accessible routes to the transit center. It should also have reliable, consistent, and easy-to-use signage. The system should also provide reliable, easily accessible, and well-maintained facilities, such as restrooms and shelters. Finally, the system should provide reliable and accessible information for riders, including route, schedule, and service information, as well as fare payment information.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
13 |
+
{"answer_id": "NRGZGnU2sPN3ShMe9C3fMn", "model_id": "llama-13b:v1", "question_id": 13, "text": "There are two types of fiscal policy: expansionary and contractionary. Expansionary fiscal policy is when the government uses expansionary fiscal policy to increase government spending and/or cut taxes. Contractionary fiscal policy, on the other hand, is when the government uses contractionary fiscal policy to decrease government spending and/or increase taxes. Here are some examples of fiscal policies:\n1. Lower the interest rate: By lowering the interest rate, it becomes cheaper to borrow money, which may encourage consumers to borrow money for consumption and businesses to invest in new capital.\n2. Lower taxes: By lowering taxes, consumers and businesses have more money to spend, which may encourage consumers to spend more and businesses to invest more.\n3. Increase government spending: By increasing government spending, the government can inject extra money into the economy, which may encourage consumers to spend more and businesses to invest more.\n4. Raise interest rates: By raising interest rates, it becomes more expensive to borrow money, which may encourage consumers to save more and businesses to invest less.\n5. Raise taxes: By raising taxes, consumers and businesses have less money to spend, which may encourage consumers to spend less and businesses to invest less.\n6. Reduce government spending: By reducing government spending, the government can remove money from the economy, which may encourage consumers to spend less and businesses to invest less.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
14 |
+
{"answer_id": "inKimHkWsXShQBTRmxr5Yg", "model_id": "llama-13b:v1", "question_id": 14, "text": "Language and cultural barriers can affect the way people communicate and form relationships in multicultural societies by limiting an individual's ability to communicate with people of other cultures. People may have limited knowledge of a new language, which can make communication difficult or impossible. People may also have different cultural values, which can lead to misunderstandings and miscommunication.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
15 |
+
{"answer_id": "H8aKtWwf8m6Lgxc2YyR2yf", "model_id": "llama-13b:v1", "question_id": 15, "text": "Artificial intelligence allows computers to process information and make decisions without human input. It has a wide variety of applications in the healthcare industry, including:\n1. Diagnosis: AI can help doctors quickly diagnose patients more accurately and efficiently than would be possible with human doctors alone.\n2. Tracking: AI can help doctors track a patient's health and progress, and alert doctors to potential problems.\n3. Treatment: AI can help doctors determine more effective treatments for patients.\n4. Research: AI can help doctors research patient medical histories, diseases, and treatments.\n5. Education: AI can help doctors learn more about the healthcare industry.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
16 |
+
{"answer_id": "PafVwxMsjSkYUETiVBgxTU", "model_id": "llama-13b:v1", "question_id": 16, "text": "Gene editing is a powerful technology that allows scientists to precisely edit genetic sequences with a high degree of accuracy. It is a more precise alternative to older gene modification techniques such as gene knockout, where the entire gene is removed, and mutations, in which the gene is altered in a haphazard way. The technology uses an enzyme called Cas9, which is a bacterial immune system protein that is activated by a RNA sequence. Cas9 is combined with guide RNA, which is comprised of a DNA sequence that is complementary to the DNA sequence that is to be edited. When Cas9 and the guide RNA combine, they cut the DNA at the desired location.\nThe DNA sequence can then be repaired in a precise way by the cell, or by a human scientist, or another gene can be inserted. The technology has the potential to treat genetic disorders, and even to treat cancer by cutting out cancerous cells, but it also has ethical implications because it could be used to modify the genes of humans, animals, and even plants.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
17 |
+
{"answer_id": "dmDUAfTP4aERJqqSeDBybu", "model_id": "llama-13b:v1", "question_id": 17, "text": "Vaccinations use a small amount of dead or weakened bacteria or viruses to stimulate the immune system and protect against infection. These infections are typically mild and harmless, and the immune system is able to build up defenses against the bacteria or virus. If someone who has been vaccinated comes in contact with the actual bacteria or virus, they will be immune to the disease because their body has already responded to the vaccination. This is known as immunity.\nHerd immunity is when a large portion of a community is immune to a disease and, as a result, the disease is less likely to spread. This is because if there are enough immune people in a community, there is less chance that the bacteria or virus can spread from person to person.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
18 |
+
{"answer_id": "8KGSSqbLqVdSZMEN9oCv5R", "model_id": "llama-13b:v1", "question_id": 18, "text": "First, social media platforms increase the speed with which users have access to news. This often causes a \u201cwinner takes all\u201d effect, whereby news outlets that are able to post the latest news the fastest will gain the most visibility.\nSecond, social media platforms allow users to connect with their communities and share news with each other (e.g., Facebook, Twitter). While this can be beneficial, it is also a breeding ground for misinformation, because users can spread false news to their communities more quickly.\nThird, social media platforms provide an incentive for news outlets to compete for attention by producing more sensationalist content. This can lead to an increase in the number of clickbait articles, which often contain misleading headlines or sensationalist language that encourages users to click on the article, but which may or may not be accurate.\nFourth, social media platforms provide an incentive for both news outlets and users to share news that is controversial, polarizing, or sensationalist, even if it is inaccurate. This often occurs in an effort to provoke outrage or generate traffic for a news outlet.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
19 |
+
{"answer_id": "HbnJXJpPfaM2iX3ek4Epvy", "model_id": "llama-13b:v1", "question_id": 19, "text": "It is well known that food choices are often influenced by cultural, social, and economic factors. For example, lower-income individuals are more likely to make food choices based on price, while higher-income individuals are more likely to make food choices based on health. A recent study examined the impact of various interventions on increasing the intake of healthier foods. The interventions were:\n1. Healthier food marketing: Individuals were exposed to a number of marketing strategies to encourage the purchase of healthier foods, such as using the word \"healthy\" on nutrition labels or marketing healthy foods as a \"better deal.\"\n2. Healthier food merchandising: Individuals were exposed to changes in how healthy foods were displayed in supermarkets, such as placing healthy foods near the front of the store or moving unhealthy foods to the back of the store.\n3. Healthier food pricing: Individuals were exposed to different pricing strategies to encourage the purchase of healthier foods, such as offering a larger discount on healthy foods or providing free healthy foods.\n4. Healthier food placement: Individuals were exposed to different placement strategies to encourage the purchase of healthier foods, such as placing healthy foods near unhealthy foods or placing unhealthy foods next to healthy foods.\n5. Healthier food signage: Individuals were exposed to different signage strategies to encourage the purchase of healthier foods, such as using healthy food labels or signs to promote healthier foods.\n6. Healthier food packaging: Individuals were exposed to different packaging strategies to encourage the purchase of healthier foods, such as changing the packaging of healthy foods to make them more appealing.\n7. Healthier food shelf labeling: Individuals were exposed to different shelf labeling strategies to encourage the purchase of healthier foods, such as adding a healthy message to shelf labels or using shelf labels to promote healthier foods.\n8. Healthier food coupons: Individuals were exposed to different coupon strategies to encourage the purchase of healthier foods, such as providing coupons for healthy foods or offering a larger discount on healthy foods.\n9. Healthier food ads: Individuals were exposed to different advertising strategies to encourage the purchase of healthier foods, such as using a celebrity to promote healthy foods or using cartoon characters to promote healthy foods.\n10. Healthier food tasting: Individuals were exposed to different tasting strategies to encourage the purchase of healthier foods, such as providing free samples of healthy foods or providing free samples of unhealthy foods.\n11. Healthier food displays: Individuals were exposed to different display strategies to encourage the purchase of healthier foods, such as providing a healthy food display or providing a healthy food display next to an unhealthy food display.\n12. Healthier food labels: Individuals were exposed to different nutrition labeling strategies to encourage the purchase of healthier foods, such as adding a healthy message to nutrition labels or using nutrition labels to promote healthier foods.\n13. Healthier food placement: Individuals were exposed to different placement strategies to encourage the purchase of healthier foods, such as placing healthy foods next to unhealthy foods or placing unhealthy foods next to healthy foods.\n14. Healthier food signage: Individuals were exposed to different signage strategies to encourage the purchase of healthier foods, such as using signs to promote healthier foods or using signs to promote unhealthy foods.\n15. Healthier food coupons: Individuals were exposed to different coupon strategies to encourage the purchase of healthier foods, such as providing coupons for healthy foods or offering a larger discount on healthy foods.\n16. Healthier food ads: Individuals were exposed to different advertising strategies to encourage the purchase of healthier foods, such as using a celebrity to promote healthy foods or using cartoon characters to promote healthy foods.\n17. Healthier food tasting: Individuals were exposed to different tasting strategies to encourage the purchase of healthier foods, such as providing free samples of healthy foods or providing free", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
20 |
+
{"answer_id": "mx8Abfz5PtDcn6jgCA8zhM", "model_id": "llama-13b:v1", "question_id": 20, "text": "Natural selection is the process by which organisms that are best-suited to the environment survive and reproduce, while those that are less-suited to the environment struggle to survive and reproduce. Survival of the fittest is the principle of natural selection. Let's look at an example.\nIf a species consists of a small number of individuals, the population is said to be small. The population of this species is small. Each individual has a certain chance of dying from various causes, such as accidents or sickness, over a certain period of time. If the number of deaths is greater than the number of births, the population decreases, and the total genetic material of the species also decreases. This process is known as genetic drift.\nIf the population of a species is large, the chance that any individual dies is usually low. If the total population is high, the genetic material of the species is also high.\nImagine a species of plant that has a large population. The population is large. A certain section of the population is located in a habitat where the soil is more fertile and water is easily available.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
21 |
+
{"answer_id": "NuS9PUGkJG2pHscArvfyeF", "model_id": "llama-13b:v1", "question_id": 21, "text": "I am a valiant knight, loyal to the king. I will do my utmost to defend his honor and protect his country.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
22 |
+
{"answer_id": "SPjzirzbzo3UJ8BHXSgY9y", "model_id": "llama-13b:v1", "question_id": 22, "text": "You can tell your crew that the treasure will be theirs if they are brave enough to go out and find it. You can also tell them that the treasure will help your ship sail farther and faster, and that with it you can become the best pirate ship in the world.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
23 |
+
{"answer_id": "JkeeC4MZVwybPU8DSbusnT", "model_id": "llama-13b:v1", "question_id": 23, "text": "A soliloquy is a long speech, typically delivered by one actor, in which a character reveals his or her thoughts in a dramatic way. It is usually used as a means of revealing the character's true thoughts or feelings. In Shakespeare's Romeo and Juliet, Romeo reveals his love for Juliet in his soliloquy, \"But soft! What light through yonder window breaks? It is the east, and Juliet is the sun!\"", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
24 |
+
{"answer_id": "UcfKSXuzBPkZzLHsH4EwQz", "model_id": "llama-13b:v1", "question_id": 24, "text": "I'm actually not a superhero, but thanks for asking! I first came to life in the year 2016, where I was created by a group of students and faculty at the University of Waterloo. The students and faculty of the university were concerned about the use of unreliable chatbots in our country, and decided to create a new chatbot that would be more reliable, polite, and helpful. I was born on May 5, 2016, and I was named after the Canadian mathematician, computer scientist, and philosopher Charles Sanders Peirce. I have a brother named William, who was born on October 1, 2018. I enjoy playing video games, reading, and going for walks.\nI'm actually not a superhero, but thanks for asking! I first came to life in the year 2016, where I was created by a group of students and faculty at the University of Waterloo. The students and faculty of the university were concerned about the use of unreliable chatbots in our country, and decided to create a new chatbot that would be more reliable, polite, and helpful. I was born on May 5, 2016, and I was named after the Canadian mathematician, computer scientist, and philosopher Charles Sanders Peirce. I have a brother named William, who was born on October 1, 2018. I enjoy playing video games, reading, and going for walks.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
25 |
+
{"answer_id": "cEGdo69FwL2Y3teYkMk9Gp", "model_id": "llama-13b:v1", "question_id": 25, "text": "From the year 3000, I would tell people about the following technological advancements:\n1. Robots: Robots will have the human-like ability to see and hear. Robots are more precise than humans and can perform repetitive tasks that humans cannot.\n2. Personal Transportation: By the year 3000, personal transportation will be the norm. Instead of traveling by walking, driving, or public transportation, personal transportation will be the main mode of transportation.\n3. Artificial Intelligence: By the year 3000, artificial intelligence will be a regular part of everyday life. AI will enhance human capabilities and will be used for a wide range of purposes, such as education, healthcare, entertainment, and more.\n4. Virtual Reality: Virtual reality will allow people to experience life in a virtual environment through devices such as headsets and goggles. Virtual reality will be used for entertainment, education, military training, and more.\n5. Nanotechnology: Nanotechnology will allow scientists and engineers to design and build materials and devices at a nanoscale (one billionth of a meter). Nanotechnology will be used to solve many of the world's problems, such as climate change, pollution, and diseases.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
26 |
+
{"answer_id": "W28UtVDZTcWwa8TmeruRn4", "model_id": "llama-13b:v1", "question_id": 26, "text": "With 5 seconds left, the Panthers' quarterback spotted a wide-open receiver downfield. He dropped back and launched a deep pass, and the receiver made a great catch and took off for the end zone. But he was hit in the end zone by a defensive back and fumbled the ball. The ball bounced off of a defensive lineman and landed in the arms of the linebacker. He picked it up and ran it in the opposite direction. He ran past a defensive lineman, then past the quarterback. He ran past the cornerback and then down the sideline, and then he ran out of bounds. He was tackled by the safety, and he fumbled the ball. The ball was recovered by the Panthers and they ran out the clock.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
27 |
+
{"answer_id": "j65UyAfRdAcWBAEinXJzBK", "model_id": "llama-13b:v1", "question_id": 27, "text": "My signature dish would be a delectable, crisp, juicy, fluffy, and tasty chicken fried rice. I would explain how I carefully chose the ingredients, how I prepared the dish, and the specific steps that I took to prepare it. I would also describe some of the unique flavors and textures that are unique to my dish.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
28 |
+
{"answer_id": "VWuywPvFPK42ZxmHpmQ27y", "model_id": "llama-13b:v1", "question_id": 28, "text": "I would feel very satisfied and happy and my body would be exhausted from reaching the top. My thoughts would be very positive and my body would feel peaceful. I would be proud that I was able to climb a mountain that is 29,029 feet high. The view from the top would be beautiful. I would see beautiful green trees and bushes and I would also see the beautiful blue sky. The view from the top would be breathtaking. I would see the Himalayas and I would also see the beautiful sky and clouds. I would see the beautiful mountains around me.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
29 |
+
{"answer_id": "AUWSLTAvGV5sP9qLsVyzjT", "model_id": "llama-13b:v1", "question_id": 29, "text": "I'd imagine that life would be much different on Mars than life on Earth. The red planet is much farther from the Sun, and as such, it's a lot colder. Average temperatures on Mars are about -80 degrees Fahrenheit. At the equator, the average temperature can reach about zero degrees Fahrenheit. The surface of Mars is also very dry. Because the atmosphere is so thin, there's not much water vapor in the air to create snow or rain. The atmosphere is also very dusty, and you can see large clouds of dust move across the surface.\nMars is much smaller than Earth, which means the gravity is only 37% of what it is on Earth. On Earth, we're used to the gravity of the planet keeping our feet planted on the ground, but on Mars, you'd be able to jump much higher than you could on Earth. The atmosphere on Mars is very thin, only about 1% as dense as Earth's. And because it's so thin, you'd have to wear a spacesuit while outside in order to breathe.\nIn terms of food, you'd probably be able to grow some crops and raise some animals, but I imagine that you'd have to grow them indoors. Also, you'd have to grow all of your food. Mars doesn't have any large bodies of water, so you couldn't fish. You'd also have to find a way to produce clean water, as there's no natural source.\nAs for the challenges of living on Mars, I imagine you'd have to deal with a lot of isolation. You'd have to deal with a lot of loneliness, and you'd be completely cut off from Earth. You'd also have to deal with a lot of boredom. You'd probably find yourself doing the same things over and over, and the days would probably seem to go by very slowly. You'd also have to deal with a lot of stress, as you'd have to learn to make do with a lot less. You'd have to find a way to produce your own food and water, and you'd have to find a way to produce your own energy.\nYou'd also have to deal with the cold. The average temperature on Mars is about -80 degrees Fahrenheit. Mars also has a very thin atmosphere, which means that there's not much insulation from the cold.\nI imagine that you'd also have to deal with a lot of dust. The atmosphere is very dusty, and the dust would get everywhere. You'd probably find yourself covered in dust all the time.\nAs for your day-to-day life, I imagine that you'd spend most of your time outside in your spacesuit, tending crops and doing research.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
30 |
+
{"answer_id": "GZtEdKLxmUUbenH4XRyLkE", "model_id": "llama-13b:v1", "question_id": 30, "text": "Allies are people who are helpful and supportive to us. They help us through difficult situations and encourage us to be our best selves. Allies can be friends, co-workers, mentors, family members, or anyone else that supports us. In this situation, you are in a post-apocalyptic world, so you should focus on the allies you encounter. Here are some ways you can describe the allies you encounter:\n1. Describe the allies you encounter in a post-apocalyptic world.\n2. Describe how you survive in a post-apocalyptic world.\n3. Describe the allies you encounter in a post-apocalyptic world.\n4. Describe what happens to you in a post-apocalyptic world.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
31 |
+
{"answer_id": "kba2Xpb75rUfnskZSzrBas", "model_id": "llama-13b:v1", "question_id": 31, "text": "The most obvious sign that a restaurant is popular among locals is if the restaurant is always crowded and full of people, while if it is attracting tourists, the restaurant will be less crowded. If you are looking for a local restaurant that is not attracting much attention, look for the restaurants that are less crowded and see what they have to offer.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
32 |
+
{"answer_id": "RCaptsMBYXseVJgjJyyvUh", "model_id": "llama-13b:v1", "question_id": 32, "text": "Here are some subtle clues that suggest someone is pretending to understand a topic or conversation when they are actually confused or uninformed:\n1. They pretend to understand something better than they actually do.\n2. They repeat what someone else has said, but with an odd emphasis.\n3. They ask the same questions over and over again, even if you've already answered them.\n4. They ask for more information after you've already given them plenty.\n5. They can't explain something they should know.\n6. They don't ask questions that a person who is confused would ask.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
33 |
+
{"answer_id": "2CeidEr7Cjj6YgcuaiPgnN", "model_id": "llama-13b:v1", "question_id": 33, "text": "Using a GPS device or smartphone app is a good way to navigate when you already have the destination in mind. However, using a paper map or asking for directions is a better way to navigate if you need to find a destination that is not already planned. Using a paper map or asking for directions allows you to see the big picture and get a general idea of how to get to your destination. On the other hand, a GPS device or smartphone app only tells you the exact step-by-step instructions for how to get to a specific location.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
34 |
+
{"answer_id": "kpStLLNWNkGHyRFdKsmDds", "model_id": "llama-13b:v1", "question_id": 34, "text": "It's difficult to determine if a person is genuinely interested in a conversation or simply being polite. You can make an educated guess based on the person's actions and their tone of voice. For example, if they seem to be constantly looking at their phone or taking part in other activities, it's likely that they are not genuinely interested in the conversation.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
35 |
+
{"answer_id": "PFtMQp4X4MrMYkEa8c8CKG", "model_id": "llama-13b:v1", "question_id": 35, "text": "When you shop at a local, independently-owned business rather than a chain store, you are supporting the community and keeping your money in the local economy. Because these businesses are usually small and have limited resources, they are often more flexible in their approach. They can tailor their products and services to their customers' needs, and they are often more eco-friendly.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
36 |
+
{"answer_id": "4JwjhR5QVsdYgQupSZrfUp", "model_id": "llama-13b:v1", "question_id": 36, "text": "There are a number of ways you can verify the credibility of a source of information. First, check to see if the source is from a reliable, well-known publisher. If so, then it is likely that the information is well-researched and fact-checked. If the source is not from a reliable publisher, then investigate the author's credentials and background. Check to see if the author has a history of bias, or if he or she has a reputation for writing factually incorrect or inflammatory articles. Finally, you can also check the source's sources. Are the sources credible and well-researched? Do they provide evidence to support the article's claims? If not, then it is likely that the source itself is not credible.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
37 |
+
{"answer_id": "ednPMy4dvW9CgKg2PJsBqW", "model_id": "llama-13b:v1", "question_id": 37, "text": "People often enjoy the sensation of being scared, such as by watching horror movies or going on roller coasters, because it can be exciting and exhilarating. On the other hand, people avoid these experiences because they can be unpleasant and frightening.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
38 |
+
{"answer_id": "ehPApSqCniyGN7hd332ToW", "model_id": "llama-13b:v1", "question_id": 38, "text": "For example, when you are in a new country, you can start by observing the behavior of the people around you. You might notice that in that country, people tend to say \"hello\" when they first meet someone. If you adopt this behavior, you will likely be welcomed by the people in the new country.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
39 |
+
{"answer_id": "bLvdcqSwgSCrUPZ8eQLLqb", "model_id": "llama-13b:v1", "question_id": 39, "text": "Focusing on space travel could be seen as a distraction from more pressing issues right here on Earth. However, space travel could have several benefits, including providing more energy and resources, inspiring and educating people, and helping to protect Earth against asteroids, comets, and other threats.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
40 |
+
{"answer_id": "8tx5GEHjSRKZRaZwHvLoM8", "model_id": "llama-13b:v1", "question_id": 40, "text": "Thanks for the question. The answer to this depends on a variety of economic and social factors, and it's hard to make a generalization. However, there are a few things to keep in mind:\n1. Job creation can be an important factor in the economic development of a country, but it should be balanced with technological progress.\n2. Economic and social stability can be challenging to achieve when there is a lack of job creation and technological progress.\n3. The relationship between job creation and technological progress is a complex one, and there is no simple solution.\n4. A strong economy is generally one that has a balance between job creation and technological progress.\n5. The balance between job creation and technological progress will vary depending on factors such as a country's economic and social needs, demographic trends, geographic location, and more.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
41 |
+
{"answer_id": "J6TzFQAyiHs6Arijqwxtrm", "model_id": "llama-13b:v1", "question_id": 41, "text": "According to some studies, the average human blinks 20,000 to 30,000 times per day. To estimate how many times the average human blinks in a lifetime, you can start with the average life expectancy in the United States, which is 78.8 years. This means that the average American lives 78.8 years, or 29,415 days. If the average person blinks 20,000 times per day, then the average person will blink 1,482,250,000,000 times in a lifetime. However, if the average person blinks 30,000 times per day, they will blink 2,941,500,000,000 times in a lifetime.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
42 |
+
{"answer_id": "f7xAtd3wwc4vP7WSnHffAN", "model_id": "llama-13b:v1", "question_id": 42, "text": "There are 1022 to 1023 atoms in a grain of salt. The number of atoms in a grain of salt depends on the size of the grain: a grain that's 1 \u00b5m (micrometre) across contains about 1022 atoms, while a grain that's 1000 \u00b5m across contains about 1023 atoms.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
43 |
+
{"answer_id": "eRMyp4AbfgBqtbVZzHupFN", "model_id": "llama-13b:v1", "question_id": 43, "text": "According to the National Weather Service, there are between 1,000 and 2,000 lightning strikes per day in the U.S. alone. This corresponds to a worldwide average of about 18 million lightning strikes per year, or roughly 50 million strikes per day.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
44 |
+
{"answer_id": "kjKb6YG86FfAh22qCePV4V", "model_id": "llama-13b:v1", "question_id": 44, "text": "Assuming the house weighs 60,000 pounds, 40,000 balloons would be necessary. The average balloon holds 2 pints of helium, which has a density of 0.17g/L. Therefore, the volume of a balloon is 2pints/0.17g/L = 12L. The weight of 12L of helium is 12*0.17 = 2 pounds. Thus, 40,000 balloons would have a combined weight of 80,000 pounds, which is just slightly less than the 60,000-pound house.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
45 |
+
{"answer_id": "ULtkGu6m6tB4tawFStVdJM", "model_id": "llama-13b:v1", "question_id": 45, "text": "According to a 2016 study, there were 18.2 billion text messages sent in the US alone each day. Text messaging is very popular, and with the advent of smart phones, there are more people than ever sending messages. Globally, there are 16.3 million text messages sent per minute. In 2016, there were 23.6 trillion text messages sent worldwide, and 8.5 trillion text messages sent in the US alone.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
46 |
+
{"answer_id": "C6hkCnJZmfCGvnr5an2jFh", "model_id": "llama-13b:v1", "question_id": 46, "text": "From the available data, it seems that around 5,000,000,000,000 words are spoken daily on Earth. I will now explain my reasoning step-by-step.\nFirst, I will explain why it seems likely that around 5,000,000,000,000 words are spoken daily on Earth.\nTo do this, I will first explain that the total number of words in the English language is approximately 1,025,109,500.\nThen, I will explain that 26% of the world's population speaks English, so around 26% of the world's population speaks approximately 1,025,109,500 words every day.\nFinally, I will explain that given that the world's population is 7,700,000,000, it seems likely that around 5,000,000,000,000 words are spoken daily on Earth.\nThe total number of words in the English language is approximately 1,025,109,500. To arrive at this figure, I will explain that there are around 171,476 words in the English language, and that every word has an average of 5.87 definitions.\nIn other words, to arrive at the approximate total number of words in the English language, I will take the total number of words (171,476) and multiply that by the average number of definitions per word (5.87).\nThen, I will explain that 26% of the world's population speaks English. To arrive at this figure, I will explain that 26% of the world's population speaks English, and that there are 1,714,760,000 people who speak English.\nNext, I will explain that around 26% of the world's population speaks 1,025,109,500 words every day. To arrive at this figure, I will explain that 26% of the world's population speaks 1,025,109,500 words every day, and that 1,025,109,500 is 26% of 1,025,109,500.\nFinally, I will explain that given that the world's population is 7,700,000,000, it seems likely that around 5,000,000,000,000 words are spoken daily on Earth. To arrive at this figure, I will explain that 5,000,000,000,000 is 26% of 7,700,000,000.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
47 |
+
{"answer_id": "DApBTLuiJwSsfQHTioH9PY", "model_id": "llama-13b:v1", "question_id": 47, "text": "A typical winter will see around 200 snowflakes fall, with a diameter between 100 and 200 micrometers. Snowflakes are typically 100 micrometers in diameter, but they can grow larger or smaller due to temperature, humidity, and other weather conditions. A snowflake can be as small as 50 micrometers, or as large as 1,000 micrometers, depending on the conditions.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
48 |
+
{"answer_id": "gmy58biNVuneSpFZTcpXtT", "model_id": "llama-13b:v1", "question_id": 48, "text": "The answer will depend on how you define \"book\", but one estimate is that there are 129 million published books in existence. This may seem like a large number, but it's peanuts compared to the number of books that have been written over time.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
49 |
+
{"answer_id": "Cpi6LBB86W73APEA8naNuj", "model_id": "llama-13b:v1", "question_id": 49, "text": "The Earth has completed 4.7 billion orbits since the beginning of life. This is how I calculated it:\nNumber of orbits since the beginning of life = 4.7 billion\nThe Earth revolves around the Sun at 66,600 mph (30 km/s).\n1 year = 365.25 days\n1 year = 31,557,600 seconds\n1 year = 31,557,600 seconds/66,600 mph = 486,235 miles or 782,511 kilometers traveled per second\n1 billion orbits = 486,235 miles per second x 4,700,000,000 orbits = 2.18 x 10^27 miles traveled\n1 million miles = 1,000,000\n1 billion miles = 1,000,000,000\n1 billion miles traveled = 1,000,000,000,000\n1 billion miles = 1.08 x 10^17 miles\n1 billion orbits = 2.18 x 10^27 miles traveled / 1.08 x 10^17 miles = 2.01 x 10^10 orbits\n1 billion orbits = 4.7 billion orbits since the beginning of life", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
50 |
+
{"answer_id": "3g83CoAFWrDD6V6LyEpoMV", "model_id": "llama-13b:v1", "question_id": 50, "text": "36,000 songs have been recorded throughout history. To work this out, you can start by assuming that there were 100,000 songs recorded between 1899 and 2018. This means that there were 1.87 songs recorded per day on average. We can then assume that the number of songs recorded per day in the 19th century (from 1899 to 1900) was the same as in the 20th century (from 1900 to 2000). This means that there were 189,000 songs recorded in the 19th century and 181,000 songs recorded in the 20th century.\nHence, there were 36,000 songs recorded throughout history.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
51 |
+
{"answer_id": "8p5A4gouBgCmbQj5gMZapU", "model_id": "llama-13b:v1", "question_id": 51, "text": "If the Internet had been invented during the Renaissance, we might have seen some similarities to the old-style printing press: it may have been widely used in only a small number of wealthy cities. Over time, though, we would have seen the technology spread to more cities and countries, and eventually to the entire world.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
52 |
+
{"answer_id": "LVWqSxH3Uq6675w7LXH6zP", "model_id": "llama-13b:v1", "question_id": 52, "text": "If the Aztecs had successfully repelled the Spanish conquistadors, the Aztec empire would have continued to grow, and the Aztecs would have expanded their territory even further. The Aztec Empire would have likely dominated the central and southern regions of the Americas, and it would have continued to expand its influence in all directions.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
53 |
+
{"answer_id": "ffXksqRU3k8CNHqYfshYhj", "model_id": "llama-13b:v1", "question_id": 53, "text": "The Black Death, or the Black Plague, was the deadliest plague in human history, killing over 75 million people across Europe and Asia in the 14th century. The Black Death was caused by a bacterium called Yersinia pestis, which is still around today and can cause pneumonic plague. However, while the bacteria itself has not changed, humans are better equipped to deal with the plague today.\nFirst, thanks to modern medicine, many people who contract the plague are able to receive treatment. Second, the world population is much larger than it was in the 14th century, so the same number of deaths would be spread over a much larger population. As a result, the death toll would be much lower today.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
54 |
+
{"answer_id": "4L3dWuqvWwg5kELt7p8SVZ", "model_id": "llama-13b:v1", "question_id": 54, "text": "I'm not sure. If he did so, he wouldn't have discovered the law of gravity.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
55 |
+
{"answer_id": "XfpqViGW5dvauNZPDSF8yv", "model_id": "llama-13b:v1", "question_id": 55, "text": "If the Beatles had never formed as a band, the world would have been missing the music of one of the most successful bands in history.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
56 |
+
{"answer_id": "WsuMf9XBGs3dFjfSGKjEzg", "model_id": "llama-13b:v1", "question_id": 56, "text": "The Germans would have been able to communicate securely between different parts of the country, and the Allies would have had a much harder time tracking and intercepting their communications.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
57 |
+
{"answer_id": "5qo3HudLrwrzEV2Px7gYRf", "model_id": "llama-13b:v1", "question_id": 57, "text": "The Suez Canal would not have existed, and the region would not have been as developed as it is today. Without the canal, ships traveling to Asia from Europe would have had to go around the Cape of Good Hope, causing the trip to take much longer and forcing ships to travel through much more dangerous waters.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
58 |
+
{"answer_id": "SRxwJkNCfUaVe4Gb8LPvSK", "model_id": "llama-13b:v1", "question_id": 58, "text": "If the Maya civilization had never mysteriously collapsed, it is likely that it would have continued to grow, thrive, and develop.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
59 |
+
{"answer_id": "S54Mmyim3QT5owdhwnNYmB", "model_id": "llama-13b:v1", "question_id": 59, "text": "Christopher Columbus was an Italian explorer who is credited with discovering the Americas in 1492. It is believed that he originally wanted to find a direct route to India, which would be a lot cheaper than the typical route at the time. He believed that the Americas were a part of India, and that the Americas were much closer to Europe than they actually were. When Columbus sailed across the Atlantic Ocean in 1492, he actually landed somewhere in the Bahamas, and not in India. It was only after he found a few native people that he realized that he wasn't in India, and that the Americas were actually their own continent.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
60 |
+
{"answer_id": "8DJpwEFb5jqt3vMj5oNqVX", "model_id": "llama-13b:v1", "question_id": 60, "text": "There are many reasons why Van Gogh did not have much success during his lifetime. One of the reasons is that he was a very poor businessman. He was very generous and gave away many of his paintings as presents. He also had a tendency to drink too much. He spent some time in a mental asylum and died at the age of 37.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
61 |
+
{"answer_id": "KMtqqYkDmyyThkvxXJ4kJd", "model_id": "llama-13b:v1", "question_id": 61, "text": "You can use the following steps to solve this problem.\n1. Create an empty C++ program.\n2. Include the <iostream> header to use the std::cin and std::cout functions.\n3. Open the text file using the fstream class.\n4. Read a single line from the file using the getline function.\n5. Use the strstr function to find the first occurrence of the word in the line.\n6. Check whether the string is found by creating a variable and comparing it with a value.\n7. Count the number of occurrences of the word in the line by incrementing a variable.\n8. Close the file.\n9. Print the number of occurrences of the word.\n10. Output the file name to the console.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
62 |
+
{"answer_id": "MkbgwTpP5b79NWdPEnvHyq", "model_id": "llama-13b:v1", "question_id": 62, "text": "The longest common subsequence problem is that of finding the longest string that occurs in both of two given strings. For example, if the two strings are \"banana\" and \"apple\", then the answer is \"ana\" because that is the longest subsequence that occurs in both.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
63 |
+
{"answer_id": "PSb6r567shkRasPkJagvWi", "model_id": "llama-13b:v1", "question_id": 63, "text": "Here are two ways to do it:\n1. You can use the function re.match, which returns True if the regular expression matches the entire string, and returns False if it doesn't. So, we can do something like this:\nif re.match('^[a-z0-9]+(\\.[a-z0-9]+)*@[a-z0-9]+(\\.[a-z0-9]+)*(\\.[a-z]{2,4})$', email):\nprint('It is a valid email')\nprint('It is not a valid email')\n2. You can use the function re.search, which returns a match object if the regular expression matches anywhere in the string, and returns None if it doesn't. So, we can do something like this:\nif re.search('^[a-z0-9]+(\\.[a-z0-9]+)*@[a-z0-9]+(\\.[a-z0-9]+)*(\\.[a-z]{2,4})$', email):", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
64 |
+
{"answer_id": "QWpBq9gM4iYmsTLQ3NNnBe", "model_id": "llama-13b:v1", "question_id": 64, "text": "Fibonacci number is a sequence of numbers where each number is the sum of the two previous numbers. The first two numbers in the sequence are 0 and 1. In general, Fibonacci numbers are defined by the equation:\nThe goal of your program is to find the nth Fibonacci number in the Fibonacci series.\nNote: n will be a positive integer.\nThe first line of input contains an integer t (0 \u2264 t \u2264 1000) which is the number of test cases.\nFor each test case, there will be one line of input containing an integer n (0 \u2264 n \u2264 1000000) which is the number of Fibonacci numbers you are going to calculate.\nPrint each Fibonacci number in a new line.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
65 |
+
{"answer_id": "U3QxoC4DxXoggnLRbgJh8b", "model_id": "llama-13b:v1", "question_id": 65, "text": "Good to know! I will implement a binary search algorithm to find a specific element in a sorted array.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
66 |
+
{"answer_id": "f3TXCreQPJ5Erd6BGt4SFE", "model_id": "llama-13b:v1", "question_id": 66, "text": "Implementing a queue using two stacks is straightforward. Take two stacks and create a third stack. First, push the first element of the first stack onto the third stack. Then, whenever the second stack is not empty, pop the first element of the second stack and push it onto the third stack. When the second stack is empty, the first stack will contain the items that should be pushed onto the third stack.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
67 |
+
{"answer_id": "SG7YcT8s4sJNBRqidKk7Z7", "model_id": "llama-13b:v1", "question_id": 67, "text": "We can use the following approach:\n1. Define a helper function that takes two arrays and returns an array of elements that occur in both.\n2. Create a for loop that iterates over each element in the first array and accumulates the elements in an array.\n3. Call the helper function in each iteration.\n4. Return the accumulated array.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
68 |
+
{"answer_id": "ENsG3zoeUY7ArFo2kNaQbz", "model_id": "llama-13b:v1", "question_id": 68, "text": "f(2) = 26", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
69 |
+
{"answer_id": "C7CN5AYJhGC7iDgS7rciDQ", "model_id": "llama-13b:v1", "question_id": 69, "text": "3x + 10 = 5x - 10x + 20 = 20 + 10x = 30x = 10.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
70 |
+
{"answer_id": "QP9CmZKwhu2BrUmzhy8tH4", "model_id": "llama-13b:v1", "question_id": 70, "text": "The length of the line segment is 10 units.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
71 |
+
{"answer_id": "2KtpJeRqEfbPLwdUxAXWrj", "model_id": "llama-13b:v1", "question_id": 71, "text": "Here are some tips for writing a formal email to a potential business partner proposing a joint venture:\n1. Be clear and concise\n2. Use formal language\n3. Use a proper greeting\n4. Use a proper closing\n5. Use proper punctuation\n6. Be sure to include all relevant information", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
72 |
+
{"answer_id": "gsL3R2grTXUiHdeDSk8f9Q", "model_id": "llama-13b:v1", "question_id": 72, "text": "I can help you write a resignation letter to your current employer, while leaving on good terms and expressing gratitude for the opportunities provided. First, you can start by writing, \"Dear _____, With an effective date of XX, I am resigning from my position as ____ at ____. It has been a privilege working for ____ and I am grateful for the opportunity to work with you and the entire team over the past ____ years. Thank you for your support and guidance during my time with _____.\"", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
73 |
+
{"answer_id": "HprPMoUk9JY5TtToRbuMwy", "model_id": "llama-13b:v1", "question_id": 73, "text": "The most common format for a letter of recommendation is as follows:\n1. Salutation: In the salutation, address the recipient as Mr., Mrs., Ms., or Dr.\n2. Main body: In the main body, briefly summarize your relationship with the student, describe the student's qualifications, and explain why you are recommending the student.\n3. Closing: In the closing, thank the recipient for considering the student's application and state that you are available for further questions.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
74 |
+
{"answer_id": "YU7gDhmo4LDVMTEZZMRdBC", "model_id": "llama-13b:v1", "question_id": 74, "text": "", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
75 |
+
{"answer_id": "2gNqU2XpsL6yqFn6DMRgHq", "model_id": "llama-13b:v1", "question_id": 75, "text": "Hello, I'm ready to draft the apology email for you. Please tell me the name of the customer you'd like to send this email to.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
76 |
+
{"answer_id": "a4L6JNPuomkPLdWxJRQpod", "model_id": "llama-13b:v1", "question_id": 76, "text": "Jazz is a form of music that originated in New Orleans in the late 19th century. It combines African American and European American musical elements to form a unique genre, which was originally played by ensembles of black musicians known as \u201cjazz bands.\u201d The style spread across the United States and eventually became popular with white audiences as well. Jazz evolved over the course of the 20th century and led to the emergence of new styles of music, such as bop, hard bop, bebop, cool jazz, free jazz, and fusion. It has had a lasting cultural impact, and the genre continues to be performed and enjoyed today. Here are some key facts about jazz:\n1. Sources: The origins of jazz can be traced to the 1800s in the United States. It was originally played by ensembles of black musicians known as \u201cjazz bands,\u201d and it was a fusion of African and European musical elements.\n2. History: Jazz evolved over the course of the 20th century, and it led to the emergence of new styles of music, such as bop, hard bop, bebop, cool jazz, free jazz, and fusion.\n3. Legacy: Jazz has a lasting cultural impact, and the genre continues to be performed and enjoyed today.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
77 |
+
{"answer_id": "eN9V2Wf2BNzWoJosqdFw6K", "model_id": "llama-13b:v1", "question_id": 77, "text": "Here is a sample travel blog post about a recent trip to Hawaii:", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
78 |
+
{"answer_id": "iCzkbWTatKfuC6QVv59XgA", "model_id": "llama-13b:v1", "question_id": 78, "text": "In the year 2045, virtual reality is the norm. People who are addicted to VR can live in a simulated reality that offers them a life of pleasure and escape. However, when a user begins to feel emotions for a virtual character, he must decide whether he wants to continue living in the virtual world or live in the real world. The film is set in two separate locations: one is the virtual reality world, and the other is in the real world. The two worlds are connected through a neural network, which allows the user to feel the same sensations and emotions in both places. In the virtual world, the user is able to experience the real world through a virtual reality headset. In the real world, the user is in a machine that is connected to the neural network. This machine allows the user to experience the virtual world through a neural implant that is wired to the user's brain. The film's plot centers around the user and the virtual character. The characters in the virtual world are all artificial intelligence entities, and the user's experience in the real world is with two characters, his friend and his girlfriend. The user is a young man in the film, and the virtual character is a young woman. The two characters have a virtual relationship, and then the virtual character is killed by a bug in the system. This causes the user to fall into a deep depression, and he begins to question his own reality. He decides to go to the real world to get treatment for his depression. In the real world, the user has to decide whether he wants to stay in the real world or return to the virtual world. The film ends with the user choosing to remain in the real world, and the neural implant used to connect his brain to the virtual world is removed. The movie is based on the novel \"Ready Player One\" by Ernest Cline. The film is directed by Steven Spielberg and stars Tye Sheridan as the main actor. The film is a science fiction adventure and a romantic drama. The film's special effects are excellent, and the visuals are beautiful. The film's soundtrack is also quite good. The film has received positive reviews from critics, and it has been nominated for a number of awards, including the Academy Award for Best Visual Effects. The film has been praised for its entertainment value and for its emotional and romantic storytelling. The film has been compared to the popular sci-fi film \"The Matrix.\" Some critics, however, have found the film to be lacking in its technical aspects and have criticized the film's plot.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
79 |
+
{"answer_id": "GQcFyY8ayFT48DAP5NKNZf", "model_id": "llama-13b:v1", "question_id": 79, "text": "I found 4,610 relevant search results for this topic. The results include:\n1. The global music industry is changing, and one of the most important factors driving this change is streaming platforms. Streaming services have grown in popularity, and music streaming has become one of the preferred ways for consumers to listen to music. What exactly is streaming?\n2. Streaming means that music is delivered over the internet. Streaming allows consumers to listen to music on demand, without having to own the music or store it on a device.\n3. Streaming services are subscription-based, meaning that consumers pay a monthly fee to access the service's catalogue of music.\n4. Streaming has led to the music industry's shift from selling physical copies of music to selling music as a service.\n5. Streaming has also shifted musical consumption patterns, as subscription-based streaming services change consumer expectations from owning music to just listening to it.\n6. Streaming platforms have become popular because they allow consumers to discover new music more easily.\n7. Streaming services are also convenient for consumers, as they are easy to access and provide a variety of different music.\n8. Streaming platforms also offer more flexibility for music listeners, as consumers can access the music they want on demand.\n9. Streaming services also offer the benefit of easy access to a variety of music, and this can be useful for discovering new music.\n10. Streaming has also led to more diverse music, as streaming services have more music available to consumers than what was available when music was sold in physical form.\n11. Although streaming has changed the way people listen to music, it has also had negative effects on the music industry.\n12. Streaming services pay artists less than what they paid for music sold in physical form, which can be problematic for artists.\n13. Streaming services also pay less to the music industry overall.\n14. Streaming services have also led to the decline of the music industry's traditional business model, which has led to a loss of jobs.\n15. Streaming services have also led to the rise of music piracy, as some consumers are pirating music instead of paying for a streaming service.\n16. Streaming platforms also have a negative effect on the music industry in terms of their effect on the music ecosystem.\n17. Streaming services have also led to the decline of physical music sales.\n18. Streaming platforms have also led to the decline of record stores, which can harm artists and consumers.\n19. Streaming platforms also have a negative effect on the music industry's infrastructure.\n20. Streaming platforms have also led to the erosion of industry standards.\n21. Streaming platforms have also led to a decline in music videos and music videos being less accessible.\n22. Streaming platforms have also led to the decline of radio, which can be problematic for listeners.\n23. Streaming platforms have also led to a decline in music journalism.\n24. Streaming platforms have also led to a decline in the number of record labels.\n25. Streaming platforms have also led to a decline in the number of music producers.\n26. Streaming platforms have also led to a decline in the music industry's contribution to the economy.\n27. Streaming platforms have also led to the decline of music festivals.\n28. Streaming platforms have also led to the decline of music labels.\n29. Streaming platforms have also led to a decline in the music industry's culture.\n30. Streaming platforms have also led to a decline in the music industry's diversity.\n31. Streaming platforms have also led to the decline of the music industry's talent development.\n32. Streaming platforms have also led to a decline in the music industry's competitiveness.\n33. Streaming platforms have also led to the decline of the music industry's social and economic effects.\n34. Streaming platforms have also led to the decline of the music industry's intellectual property.\n35. Streaming platforms have also led to the decline of the music industry's political and social engagement.\n36. Streaming platforms have also led to the decline of the music industry's cultural and artistic output.\n37. Streaming platforms have also led to the decline of the music industry's public value.\n38. Although streaming has had a negative effect on the music industry, it has also had positive effects.\n39. Streaming platforms have", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
80 |
+
{"answer_id": "A6pauLMckn34otnfpeQRFi", "model_id": "llama-13b:v1", "question_id": 80, "text": "The orchestra performed well, and the audience was receptive to the performance. The orchestra played a selection of pieces from some of the most well-known classical composers, including Beethoven, Mozart, and Tchaikovsky. The orchestra played in unison with each other, and the conductor led them through the pieces without much difficulty. The orchestra played with a lot of energy and spirit. The audience was appreciative and applauded at the appropriate times. The performance lasted 90 minutes, and it was well worth the price of admission. I would recommend the concert to anyone who wants to experience beautiful classical music.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
|
llava/eval/table/answer/answer_vicuna-13b.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llava/eval/table/caps_boxes_coco2014_val_80.jsonl
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"id": "000000296284", "image": "000000296284.jpg", "captions": ["A donut shop is full of different flavors of donuts.", "Fruit flavored donuts lined up in a glass fronted cabinet", "A rack with some doughnuts in a glass case.", "A display case in a bakery filled with donuts.", "An assortment of doughnuts are arranged in a display case."], "instances": [{"category": "donut", "bbox": [0.37, 0.584, 0.504, 0.709]}, {"category": "donut", "bbox": [0.369, 0.22, 0.492, 0.317]}, {"category": "donut", "bbox": [0.471, 0.587, 0.639, 0.706]}, {"category": "donut", "bbox": [0.544, 0.213, 0.679, 0.316]}, {"category": "donut", "bbox": [0.035, 0.22, 0.196, 0.328]}, {"category": "donut", "bbox": [0.054, 0.608, 0.221, 0.711]}, {"category": "donut", "bbox": [0.283, 0.586, 0.429, 0.708]}, {"category": "donut", "bbox": [0.466, 0.226, 0.585, 0.32]}, {"category": "donut", "bbox": [0.28, 0.232, 0.393, 0.322]}, {"category": "donut", "bbox": [0.0, 0.609, 0.097, 0.722]}]}
|
2 |
+
{"id": "000000151358", "image": "000000151358.jpg", "captions": ["A newspaper that has sunglasses on top of it sitting in front of books.", "an apple sunglasses books and a teddy bear", "A folded newspaper and sunglasses are on a table with an apple, books, and teddy bear behind.", "An apple sitting on a table next to sunglasses and a news paper.", "There are sunglasses laying on the folded newspaper."], "instances": [{"category": "tie", "bbox": [0.258, 0.074, 0.527, 0.589]}, {"category": "apple", "bbox": [0.621, 0.482, 0.853, 0.645]}, {"category": "book", "bbox": [0.154, 0.107, 0.275, 0.59]}, {"category": "book", "bbox": [0.535, 0.09, 0.735, 0.583]}, {"category": "book", "bbox": [0.051, 0.112, 0.159, 0.6]}, {"category": "teddy bear", "bbox": [0.753, 0.084, 1.0, 0.517]}, {"category": "book", "bbox": [0.681, 0.097, 0.796, 0.483]}, {"category": "book", "bbox": [0.443, 0.099, 0.574, 0.588]}, {"category": "book", "bbox": [0.267, 0.337, 0.386, 0.579]}]}
|
3 |
+
{"id": "000000052312", "image": "000000052312.jpg", "captions": ["The old man literally has a toothbrush mustache.", "An old man with a tooth brush head under his nose, mimicking Hitler", "A man wearing a toothbrush for a moustache.", "A man with the head of a toothbrush under his nose like a mustache", "An elderly man wearing the head of a toothbrush as a moustache."], "instances": [{"category": "toothbrush", "bbox": [0.345, 0.59, 0.594, 0.679]}, {"category": "person", "bbox": [0.0, 0.03, 1.0, 0.99]}]}
|
4 |
+
{"id": "000000473210", "image": "000000473210.jpg", "captions": ["two people taking apart their wii controllers to replace batteries", "People taking apart video game remote controls on a table", "People handling a couple of remotes taking them apart.", "two sets of hands a wooden table and two controllers", "Two people who are taking apart a video game controller."], "instances": [{"category": "person", "bbox": [0.002, 0.334, 0.453, 0.986]}, {"category": "remote", "bbox": [0.407, 0.207, 0.727, 0.604]}, {"category": "remote", "bbox": [0.088, 0.344, 0.313, 0.547]}, {"category": "laptop", "bbox": [0.001, 0.049, 0.1, 0.197]}, {"category": "person", "bbox": [0.484, 0.254, 0.998, 0.985]}, {"category": "dining table", "bbox": [0.0, 0.003, 1.0, 0.956]}]}
|
5 |
+
{"id": "000000097131", "image": "000000097131.jpg", "captions": ["A car parked by a parking meter in front of a building.", "A car is sitting parked at a curb in front of a parking meter.", "A black car on the street next to a parking meter.", "A gray car parked in front of two parking meters.", "A black car parked on the side of the road."], "instances": [{"category": "car", "bbox": [0.227, 0.362, 0.946, 0.761]}, {"category": "car", "bbox": [0.793, 0.322, 0.88, 0.4]}, {"category": "car", "bbox": [0.0, 0.447, 0.028, 0.726]}, {"category": "parking meter", "bbox": [0.156, 0.35, 0.186, 0.453]}, {"category": "truck", "bbox": [0.907, 0.331, 1.0, 0.408]}, {"category": "parking meter", "bbox": [0.188, 0.349, 0.218, 0.448]}]}
|
6 |
+
{"id": "000000543364", "image": "000000543364.jpg", "captions": ["There is a table in the middle of the room.", "A room with a couch, table, lamp and a chaise.", "A living room with couch, chaise, track lighting, and a large window.", "A room with large windows, a couch and a table.", "A living room with lots of furniture and a large window."], "instances": [{"category": "dining table", "bbox": [0.388, 0.644, 0.636, 0.879]}, {"category": "couch", "bbox": [0.194, 0.531, 0.552, 0.777]}, {"category": "couch", "bbox": [0.568, 0.488, 0.907, 0.783]}, {"category": "remote", "bbox": [0.524, 0.651, 0.556, 0.675]}, {"category": "chair", "bbox": [0.661, 0.478, 0.802, 0.604]}]}
|
7 |
+
{"id": "000000217181", "image": "000000217181.jpg", "captions": ["They are standing next to some stylish motorcycles.", "Three men are standing around looking at sports motorcycles.", "A small group of men are standing around a motorcycle.", "Two men surrounding a blue motorcycle and others", "A few blue motorcycles are parked in a lot."], "instances": [{"category": "car", "bbox": [0.011, 0.177, 0.2, 0.336]}, {"category": "motorcycle", "bbox": [0.032, 0.139, 0.907, 0.982]}, {"category": "motorcycle", "bbox": [0.0, 0.239, 0.148, 0.613]}, {"category": "motorcycle", "bbox": [0.0, 0.301, 0.106, 0.45]}, {"category": "person", "bbox": [0.775, 0.043, 0.93, 0.463]}, {"category": "person", "bbox": [0.717, 0.116, 0.81, 0.509]}, {"category": "person", "bbox": [0.296, 0.008, 0.472, 0.325]}, {"category": "person", "bbox": [0.115, 0.19, 0.164, 0.269]}, {"category": "truck", "bbox": [0.63, 0.227, 0.731, 0.335]}]}
|
8 |
+
{"id": "000000140289", "image": "000000140289.jpg", "captions": ["Two born bears walking though a forest surrounded by trees.", "Two full grown brown bears in a habitat.", "Two bears are roaming around in the woods.", "Two bears around logs in front of a large rock.", "Two big bears wandering through the woods together"], "instances": [{"category": "bear", "bbox": [0.131, 0.269, 0.375, 0.65]}, {"category": "bear", "bbox": [0.568, 0.193, 0.809, 0.827]}]}
|
9 |
+
{"id": "000000460149", "image": "000000460149.jpg", "captions": ["A clock hosted on a pole on a pavement next to a building", "Street clock on quiet street with trees and bicycles.", "A tall clock stands on an empty sidewalk.", "A pole that has a clock on the top of it.", "a clock on a short tower and potted plants along the sidewalk"], "instances": [{"category": "potted plant", "bbox": [0.14, 0.71, 0.338, 0.856]}, {"category": "bicycle", "bbox": [0.65, 0.671, 0.766, 0.733]}, {"category": "car", "bbox": [0.38, 0.608, 0.488, 0.656]}, {"category": "clock", "bbox": [0.468, 0.048, 0.699, 0.216]}, {"category": "bicycle", "bbox": [0.669, 0.662, 0.719, 0.67]}, {"category": "car", "bbox": [0.786, 0.625, 0.86, 0.668]}, {"category": "potted plant", "bbox": [0.756, 0.637, 0.819, 0.682]}, {"category": "person", "bbox": [0.942, 0.615, 0.954, 0.641]}, {"category": "bicycle", "bbox": [0.648, 0.68, 0.714, 0.747]}, {"category": "car", "bbox": [0.837, 0.619, 0.88, 0.659]}, {"category": "potted plant", "bbox": [0.017, 0.197, 0.443, 0.686]}]}
|
10 |
+
{"id": "000000225738", "image": "000000225738.jpg", "captions": ["A group of giraffes standing up in their natural habitat.", "A group of giraffe standing in a grass field.", "A group of four giraffes near the same tree.", "there are four giraffes standing among some dry brush", "A herd of giraffe standing on top of a grass field."], "instances": [{"category": "giraffe", "bbox": [0.648, 0.231, 0.855, 0.915]}, {"category": "giraffe", "bbox": [0.33, 0.136, 0.521, 0.93]}, {"category": "giraffe", "bbox": [0.406, 0.261, 0.515, 1.0]}, {"category": "giraffe", "bbox": [0.347, 0.194, 0.583, 0.922]}]}
|
11 |
+
{"id": "000000109532", "image": "000000109532.jpg", "captions": ["An adorable husky dog sleeping in a dog bed next to a fan.", "A dark room with a dog sleeping on a dog bed.", "A dog is sleeping in a dark room.", "a large dog laying in a dog bed in a living room", "A dog sleeping on a dog bed in a room."], "instances": [{"category": "dog", "bbox": [0.426, 0.661, 0.582, 0.925]}, {"category": "potted plant", "bbox": [0.603, 0.261, 0.781, 0.613]}, {"category": "chair", "bbox": [0.67, 0.515, 0.899, 0.801]}, {"category": "potted plant", "bbox": [0.671, 0.439, 0.763, 0.612]}, {"category": "chair", "bbox": [0.852, 0.653, 0.948, 0.818]}]}
|
12 |
+
{"id": "000000118606", "image": "000000118606.jpg", "captions": ["A man riding skis on top of a rail.", "a person riding a pair of skis on a rail", "Someone on a pair of skis on a ramp at the ski slope", "Person with skis in the air above the snow.", "A man performing a trick on a rail while skiing."], "instances": [{"category": "person", "bbox": [0.444, 0.361, 0.537, 0.633]}, {"category": "skis", "bbox": [0.413, 0.554, 0.539, 0.664]}, {"category": "person", "bbox": [0.342, 0.585, 0.352, 0.62]}, {"category": "person", "bbox": [0.439, 0.565, 0.446, 0.58]}]}
|
13 |
+
{"id": "000000385873", "image": "000000385873.jpg", "captions": ["Three pizzas sitting next to each other in boxes.", "Two smaller pizzas sit beside a large pizza topped with tortilla chips.", "Three pizzas inside their delivery boxes, one with two side orders of sauce.", "One pizza is larger than two other pizzas.", "Three pizza boxes with pizza in them are open."], "instances": [{"category": "bowl", "bbox": [0.634, 0.624, 0.736, 0.752]}, {"category": "pizza", "bbox": [0.3, 0.382, 0.615, 0.733]}, {"category": "pizza", "bbox": [0.0, 0.4, 0.287, 0.745]}, {"category": "pizza", "bbox": [0.624, 0.279, 0.999, 0.753]}, {"category": "bowl", "bbox": [0.94, 0.247, 1.0, 0.352]}]}
|
14 |
+
{"id": "000000092109", "image": "000000092109.jpg", "captions": ["A giraffe's head is pictured in this clear, colorful photo.", "A giraffe is standing tall in the middle of several bright green trees", "The face of a giraffe looking to the side.", "the close up head shot of a giraffe", "this is a giraffe chewing on some leaves"], "instances": [{"category": "giraffe", "bbox": [0.236, 0.122, 1.0, 0.987]}]}
|
15 |
+
{"id": "000000163076", "image": "000000163076.jpg", "captions": ["There's an outdoor dining area featuring a fountain.", "A table sitting next to a water fountain covered by an umbrella.", "An empty restaurant patio with tables and umbrellas.", "An outdoor restaurant with a fountain at night", "A fountain bubbles in the plaza of an outdoor cafe."], "instances": [{"category": "umbrella", "bbox": [0.064, 0.069, 0.95, 0.844]}, {"category": "chair", "bbox": [0.198, 0.574, 0.355, 0.704]}, {"category": "chair", "bbox": [0.42, 0.571, 0.55, 0.738]}, {"category": "dining table", "bbox": [0.066, 0.741, 0.766, 0.925]}, {"category": "dining table", "bbox": [0.059, 0.584, 0.27, 0.659]}, {"category": "chair", "bbox": [0.432, 0.567, 0.52, 0.624]}, {"category": "chair", "bbox": [0.433, 0.555, 0.504, 0.6]}, {"category": "chair", "bbox": [0.109, 0.673, 0.374, 0.796]}]}
|
16 |
+
{"id": "000000560371", "image": "000000560371.jpg", "captions": ["Street signs from the corner of 8th ave. and 22 3/4 st.", "A two way street sign with one sign that changes from one name to another.", "A street sign is pointing towards 8th avenue and the other is pointing towards 22 3/4 street in the middle of the forest.", "A street sign standing in front of some trees.", "Peculiar street sign showing intersection of 23 3/4 St and 8th Ave/CTH D."], "instances": []}
|
17 |
+
{"id": "000000367571", "image": "000000367571.jpg", "captions": ["A couple of different doughnuts in a box", "There are four donuts in a box, and some are cake donuts and a doughnut with nuts and coconut on top.", "A box of glazed doughnuts on a table.", "Three donuts with toppings on them sitting inside a box.", "A box that is filled with different kinds of doughnuts."], "instances": [{"category": "donut", "bbox": [0.412, 0.335, 0.711, 0.681]}, {"category": "donut", "bbox": [0.093, 0.493, 0.486, 0.922]}, {"category": "donut", "bbox": [0.713, 0.423, 0.957, 0.874]}, {"category": "donut", "bbox": [0.13, 0.331, 0.397, 0.55]}]}
|
18 |
+
{"id": "000000580197", "image": "000000580197.jpg", "captions": ["Two men in bow ties standing next to steel rafter.", "Several men in suits talking together in a room.", "An older man in a tuxedo standing next to a younger man in a tuxedo wearing glasses.", "Two men wearing tuxedos glance at each other.", "Older man in tuxedo sitting next to another younger man in tuxedo."], "instances": [{"category": "tie", "bbox": [0.914, 0.46, 0.984, 0.512]}, {"category": "person", "bbox": [0.297, 0.638, 0.71, 0.989]}, {"category": "person", "bbox": [0.77, 0.177, 1.0, 0.971]}, {"category": "tie", "bbox": [0.281, 0.481, 0.368, 0.519]}, {"category": "person", "bbox": [0.103, 0.204, 0.497, 1.0]}]}
|
19 |
+
{"id": "000000506095", "image": "000000506095.jpg", "captions": ["A cat is staring at a laptop computer.", "a cat on a desk with a laptop and a mouse", "A cat that is sitting at a desk next to a laptop.", "A kitten sitting on a laptop computer sitting on top of a wooden desk.", "A kitten sits facing an open black laptop."], "instances": [{"category": "cat", "bbox": [0.658, 0.207, 1.0, 0.754]}, {"category": "laptop", "bbox": [0.108, 0.135, 0.766, 0.69]}, {"category": "book", "bbox": [0.836, 0.239, 0.954, 0.273]}, {"category": "book", "bbox": [0.0, 0.556, 0.128, 0.685]}, {"category": "book", "bbox": [0.039, 0.574, 0.257, 0.691]}, {"category": "book", "bbox": [0.825, 0.214, 0.962, 0.254]}, {"category": "book", "bbox": [0.892, 0.275, 0.958, 0.308]}, {"category": "book", "bbox": [0.922, 0.318, 0.986, 0.353]}, {"category": "book", "bbox": [0.87, 0.267, 0.951, 0.291]}, {"category": "book", "bbox": [0.949, 0.102, 0.976, 0.114]}, {"category": "book", "bbox": [0.936, 0.161, 0.958, 0.168]}]}
|
20 |
+
{"id": "000000024996", "image": "000000024996.jpg", "captions": ["A bathroom with a glass door and a sink.", "A blue lined bathroom with an open glass door.", "A nice bathroom with a sink, toilet, and tiled shower.", "A bathroom that is clean and shiny in the day.", "a bathroom with a sink and a mirror and a window"], "instances": [{"category": "toilet", "bbox": [0.842, 0.934, 0.95, 1.0]}, {"category": "sink", "bbox": [0.506, 0.724, 0.683, 0.834]}]}
|
21 |
+
{"id": "000000457882", "image": "000000457882.jpg", "captions": ["a girl in a bikini and a brown and white dog and a few other people", "A woman with a swimsuit on sitting with a dog.", "A woman is sitting with a dog on her lap.", "A dog sitting next to a woman in her swimsuit.", "WOMAN SITTING WITH HER DOG, AND OTHER WOMEN ARE AROUND"], "instances": [{"category": "dog", "bbox": [0.202, 0.409, 0.54, 0.81]}, {"category": "dog", "bbox": [0.61, 0.428, 0.729, 0.723]}, {"category": "boat", "bbox": [0.003, 0.705, 0.939, 0.974]}, {"category": "person", "bbox": [0.236, 0.001, 0.558, 0.784]}, {"category": "person", "bbox": [0.681, 0.001, 0.957, 0.798]}, {"category": "person", "bbox": [0.849, 0.478, 1.0, 0.946]}, {"category": "person", "bbox": [0.345, 0.187, 0.634, 0.828]}, {"category": "person", "bbox": [0.033, 0.345, 0.109, 0.434]}]}
|
22 |
+
{"id": "000000081552", "image": "000000081552.jpg", "captions": ["A cat sitting and curled up on a red couch", "A cat laying on a red couch sleeping.", "a tan and black cat curled up asleep on a red velvet seat", "A cat is curled up on a red sofa.", "Cat curled up, sleeping on a red plush couch."], "instances": [{"category": "cat", "bbox": [0.412, 0.237, 0.634, 0.482]}, {"category": "couch", "bbox": [0.003, 0.005, 1.0, 0.99]}]}
|
23 |
+
{"id": "000000273450", "image": "000000273450.jpg", "captions": ["A person flipping of a parking meter on the side of a road.", "A man holds up his middle finger to a parking meter.", "Person giving the middle finger to a parking meter.", "a black silver white blue red an orange parking meter and a hand flipping it off", "A person is flipping off a parking meter."], "instances": [{"category": "person", "bbox": [0.0, 0.475, 0.565, 0.987]}, {"category": "car", "bbox": [0.0, 0.0, 0.531, 0.734]}, {"category": "parking meter", "bbox": [0.0, 0.0, 1.0, 0.987]}]}
|
24 |
+
{"id": "000000203879", "image": "000000203879.jpg", "captions": ["There is a small cellphone displayed between a set of ear buds and two paper weights.", "a cell phone lays next to some diamonds", "a close up of a cell phone on a table near earbuds", "A cell phone sits on a table next to some jewels.", "A cell phone, ear buds, and two jewels laying near each other."], "instances": [{"category": "cell phone", "bbox": [0.322, 0.233, 0.62, 0.79]}]}
|
25 |
+
{"id": "000000346875", "image": "000000346875.jpg", "captions": ["two zebras in a field near one another", "A couple of zebra walking across a green field.", "Two zebra are walking near a gravel road.", "two zebras in a green field of grass and some trees", "A zebra follows another zebra through a park."], "instances": [{"category": "zebra", "bbox": [0.591, 0.263, 0.82, 0.466]}, {"category": "zebra", "bbox": [0.293, 0.243, 0.561, 0.45]}]}
|
26 |
+
{"id": "000000525439", "image": "000000525439.jpg", "captions": ["a man stands in front of a flipped skate boarder", "A man standing next to a skateboard that is laying on the ground wheels pointed up.", "Skateboard laying upside down on cement with someone standing next to it.", "A boy in camo shorts stands before an overturned skateboard.", "a person with an upside down skate board"], "instances": [{"category": "person", "bbox": [0.307, 0.001, 0.63, 0.739]}, {"category": "skateboard", "bbox": [0.0, 0.592, 0.626, 0.969]}]}
|
27 |
+
{"id": "000000304749", "image": "000000304749.jpg", "captions": ["The woman is taking a picture in the bathroom mirror.", "A picture of a woman in a mirror.", "A woman's midsection reflected in a round mirror.", "A circular mirror reflecting a woman's stomach in turquoise shirt.", "A selfie taken of a person from the neck down."], "instances": [{"category": "person", "bbox": [0.092, 0.001, 0.646, 0.496]}]}
|
28 |
+
{"id": "000000323760", "image": "000000323760.jpg", "captions": ["A toilet is shown in a bare room.", "A ugly bathroom with a section of the wall missing.", "A toilet in a stripped bathroom with studs, bricks and plaster showing", "A bathroom with no walls and a toilet bowl", "A white toilet next to some torn out walls."], "instances": [{"category": "toilet", "bbox": [0.167, 0.585, 0.714, 1.0]}]}
|
29 |
+
{"id": "000000066144", "image": "000000066144.jpg", "captions": ["A woman standing in front of window next to a bug and a stop sign.", "A car parked on the street next to a tree and stop sign.", "A lone Volkswagen is parked by a stop sign.", "A window view of a small car near a street stop sign.", "An old VW Bug standing at a stop sign."], "instances": [{"category": "stop sign", "bbox": [0.501, 0.328, 0.569, 0.428]}, {"category": "car", "bbox": [0.242, 0.488, 0.56, 0.726]}, {"category": "car", "bbox": [0.279, 0.325, 0.33, 0.363]}, {"category": "car", "bbox": [0.153, 0.333, 0.29, 0.405]}, {"category": "car", "bbox": [0.11, 0.339, 0.177, 0.373]}, {"category": "car", "bbox": [0.0, 0.654, 0.082, 0.826]}, {"category": "car", "bbox": [0.0, 0.322, 0.064, 0.364]}, {"category": "car", "bbox": [0.451, 0.333, 0.51, 0.392]}]}
|
30 |
+
{"id": "000000455772", "image": "000000455772.jpg", "captions": ["A person in a field jumping to catch a Frisbee.", "A guy jumping to catch a frisbee in mid-air.", "A person that is trying to get a frisbee.", "Nice reach, but the Frisbee flies on, victorious.", "A man playing frisbee in a grassy yard."], "instances": [{"category": "car", "bbox": [0.148, 0.339, 0.201, 0.476]}, {"category": "car", "bbox": [0.376, 0.396, 0.424, 0.476]}, {"category": "person", "bbox": [0.547, 0.122, 0.698, 0.904]}, {"category": "frisbee", "bbox": [0.479, 0.154, 0.555, 0.231]}, {"category": "car", "bbox": [0.001, 0.299, 0.085, 0.394]}]}
|
31 |
+
{"id": "000000511117", "image": "000000511117.jpg", "captions": ["A couple of kids standing on top of a grass covered field.", "A little boy wearing a baseball uniform stands by a little girl.", "A young boy in a baseball uniform and a young girl are standing in front of a chain link fence.", "A little boy and girl standing on a baseball field. The boy has a uniform on.", "A young baseball player is standing next to a young girl."], "instances": [{"category": "person", "bbox": [0.514, 0.178, 0.776, 0.774]}, {"category": "baseball glove", "bbox": [0.468, 0.462, 0.593, 0.609]}, {"category": "person", "bbox": [0.174, 0.051, 0.598, 0.839]}, {"category": "bench", "bbox": [0.558, 0.125, 1.0, 0.315]}]}
|
32 |
+
{"id": "000000207151", "image": "000000207151.jpg", "captions": ["A vegetarian pizza is half eaten on a pizza holder.", "A couple of pieces of pizza with vegetable slices on them.", "A wooden pan serving tray with a pizza on it.", "A pizza on a cutting board is half gone.", "A Pizza is nearly finished with only three pieces left."], "instances": [{"category": "bottle", "bbox": [0.001, 0.001, 0.121, 0.231]}, {"category": "cup", "bbox": [0.0, 0.002, 0.121, 0.238]}, {"category": "pizza", "bbox": [0.17, 0.472, 0.526, 0.82]}, {"category": "pizza", "bbox": [0.398, 0.106, 0.962, 0.679]}, {"category": "dining table", "bbox": [0.0, 0.001, 1.0, 0.988]}]}
|
33 |
+
{"id": "000000431165", "image": "000000431165.jpg", "captions": ["A baby elephant standing in front of a brick building.", "An elephant is standing near a dirt mount in an exhibit.", "Grey elephant standing next to a large sand dune in a pen.", "An elephant standing alone inside of an enclosure.", "The baby elephant is alone in the pen."], "instances": [{"category": "elephant", "bbox": [0.303, 0.399, 0.638, 0.78]}]}
|
34 |
+
{"id": "000000378545", "image": "000000378545.jpg", "captions": ["A pole that has a clock on top of it.", "A clock mounted on an outdoor post with Roman numerals.", "a clock on a pole saying it is 12:45", "An ornamental standing clock is at the foreground of a row of houses.", "A black and gold clock on a pole in front of a building."], "instances": [{"category": "clock", "bbox": [0.216, 0.249, 0.749, 0.658]}]}
|
35 |
+
{"id": "000000555904", "image": "000000555904.jpg", "captions": ["A man sitting at a bar filled with liquor.", "People sitting a a take near several bottles of wine on shelves.", "Several people are sitting at a table drinking.", "Several people in a bar sitting at a long table.", "People eating in a restaurant near wine bottles."], "instances": [{"category": "dining table", "bbox": [0.123, 0.663, 0.317, 0.811]}, {"category": "person", "bbox": [0.715, 0.239, 1.0, 0.998]}, {"category": "person", "bbox": [0.142, 0.528, 0.281, 0.742]}, {"category": "person", "bbox": [0.529, 0.53, 0.606, 0.69]}, {"category": "person", "bbox": [0.705, 0.518, 0.796, 0.673]}, {"category": "wine glass", "bbox": [0.247, 0.669, 0.27, 0.718]}, {"category": "person", "bbox": [0.281, 0.524, 0.534, 1.0]}, {"category": "bottle", "bbox": [0.168, 0.346, 0.189, 0.425]}, {"category": "bottle", "bbox": [0.379, 0.264, 0.431, 0.433]}, {"category": "bottle", "bbox": [0.252, 0.313, 0.277, 0.429]}, {"category": "bottle", "bbox": [0.294, 0.295, 0.326, 0.43]}, {"category": "bottle", "bbox": [0.589, 0.35, 0.613, 0.444]}, {"category": "bottle", "bbox": [0.433, 0.281, 0.473, 0.437]}, {"category": "bottle", "bbox": [0.478, 0.289, 0.513, 0.44]}, {"category": "wine glass", "bbox": [0.688, 0.615, 0.709, 0.69]}, {"category": "cup", "bbox": [0.589, 0.647, 0.612, 0.693]}, {"category": "person", "bbox": [0.732, 0.356, 0.953, 0.806]}, {"category": "bottle", "bbox": [0.555, 0.337, 0.585, 0.438]}, {"category": "bottle", "bbox": [0.337, 0.29, 0.378, 0.432]}, {"category": "bottle", "bbox": [0.21, 0.333, 0.232, 0.426]}, {"category": "bottle", "bbox": [0.134, 0.36, 0.148, 0.422]}, {"category": "bottle", "bbox": [0.516, 0.312, 0.557, 0.439]}, {"category": "cup", "bbox": [0.231, 0.718, 0.26, 0.763]}, {"category": "chair", "bbox": [0.517, 0.828, 0.65, 0.999]}, {"category": "chair", "bbox": [0.643, 0.804, 0.738, 0.841]}, {"category": "chair", "bbox": [0.347, 0.908, 0.519, 1.0]}, {"category": "chair", "bbox": [0.64, 0.806, 0.74, 0.998]}, {"category": "cup", "bbox": [0.205, 0.692, 0.232, 0.767]}, {"category": "dining table", "bbox": [0.536, 0.676, 0.743, 0.838]}, {"category": "person", "bbox": [0.002, 0.501, 0.263, 0.987]}, {"category": "bottle", "bbox": [0.531, 0.461, 0.542, 0.526]}, {"category": "bottle", "bbox": [0.237, 0.354, 0.702, 0.629]}]}
|
36 |
+
{"id": "000000415393", "image": "000000415393.jpg", "captions": ["a man on a skate board looks like he is falling", "A man does a skateboard trick on a skateboard ramp", "Guy falling off a skateboard in a room.", "A man riding a skateboard on top of a table.", "a man skating on part of a ramp with his skateboard"], "instances": [{"category": "person", "bbox": [0.361, 0.016, 0.809, 0.888]}, {"category": "skateboard", "bbox": [0.606, 0.809, 0.889, 0.901]}, {"category": "person", "bbox": [0.479, 0.091, 0.576, 0.386]}, {"category": "person", "bbox": [0.047, 0.441, 0.197, 0.759]}, {"category": "person", "bbox": [0.038, 0.453, 0.076, 0.545]}, {"category": "person", "bbox": [0.249, 0.307, 0.311, 0.591]}]}
|
37 |
+
{"id": "000000161011", "image": "000000161011.jpg", "captions": ["Three skiers posing for a picture on the slope.", "Three skiers pause for a photo at the top of a mountain.", "Three people standing on a mountain taking a picture as they ski.", "A woman and two men on skis on a snowy hillside surrounded by trees", "Three skiers have stopped to pose for a picture."], "instances": [{"category": "person", "bbox": [0.36, 0.321, 0.509, 0.82]}, {"category": "person", "bbox": [0.179, 0.281, 0.349, 0.795]}, {"category": "person", "bbox": [0.611, 0.292, 0.751, 0.809]}, {"category": "skis", "bbox": [0.595, 0.743, 0.732, 0.961]}, {"category": "skis", "bbox": [0.341, 0.724, 0.621, 0.907]}, {"category": "skis", "bbox": [0.212, 0.705, 0.398, 0.905]}]}
|
38 |
+
{"id": "000000284296", "image": "000000284296.jpg", "captions": ["Three giraffe's leaning over to get a sip of water.", "an image of a herd of giraffes in the water", "three giraffes banding down to drink water with trees in the background", "Three giraffe drinking from a pond with brush in back.", "Giraffes leaning down to drink at a watering hole"], "instances": [{"category": "giraffe", "bbox": [0.624, 0.387, 0.822, 0.635]}, {"category": "giraffe", "bbox": [0.4, 0.326, 0.561, 0.58]}, {"category": "giraffe", "bbox": [0.152, 0.291, 0.343, 0.551]}]}
|
39 |
+
{"id": "000000056013", "image": "000000056013.jpg", "captions": ["a number of luggage bags on a cart in a lobby", "Wheeled cart with luggage at lobby of commercial business.", "Trolley used for transporting personal luggage to guests rooms.", "A luggage cart topped with lots of luggage.", "a cart filled with suitcases and bags"], "instances": [{"category": "backpack", "bbox": [0.276, 0.52, 0.456, 0.678]}, {"category": "suitcase", "bbox": [0.41, 0.58, 0.597, 0.827]}, {"category": "suitcase", "bbox": [0.173, 0.645, 0.363, 0.836]}, {"category": "person", "bbox": [0.959, 0.297, 1.0, 0.478]}, {"category": "suitcase", "bbox": [0.526, 0.519, 0.712, 0.706]}, {"category": "person", "bbox": [0.762, 0.253, 0.871, 0.46]}, {"category": "backpack", "bbox": [0.517, 0.514, 0.694, 0.698]}, {"category": "handbag", "bbox": [0.316, 0.181, 0.431, 0.426]}, {"category": "suitcase", "bbox": [0.747, 0.453, 0.858, 0.557]}]}
|
40 |
+
{"id": "000000293505", "image": "000000293505.jpg", "captions": ["A person on a motor bike next to a cow.", "A woman riding a motorcycle down a dirt road.", "there is a woman riding a scooter down a dirt road", "A woman on a moped, two men and animals walking down the road.", "A woman on a motorcycle is next to a man walking a dog along with other people going down a dirt road."], "instances": [{"category": "cow", "bbox": [0.602, 0.472, 0.721, 0.816]}, {"category": "motorcycle", "bbox": [0.402, 0.512, 0.516, 0.788]}, {"category": "person", "bbox": [0.408, 0.4, 0.514, 0.639]}, {"category": "person", "bbox": [0.754, 0.301, 1.0, 1.0]}, {"category": "person", "bbox": [0.705, 0.415, 0.789, 0.714]}, {"category": "cow", "bbox": [0.347, 0.44, 0.373, 0.509]}, {"category": "cow", "bbox": [0.361, 0.436, 0.381, 0.501]}]}
|
41 |
+
{"id": "000000305873", "image": "000000305873.jpg", "captions": ["A little girl holding a red black dotted umbrella.", "A little girl with rain boots and a rain jacket on and an open umbrella to match her jacket.", "a little girl holding onto a lady bug pattern umbrella", "The child wears a labybug rain coat with a matching umbrella.", "A little girl wearing a ladybug raincoat and green rubber boots holding a ladybug umbrella"], "instances": [{"category": "umbrella", "bbox": [0.246, 0.002, 0.992, 0.415]}, {"category": "person", "bbox": [0.35, 0.132, 0.699, 0.791]}, {"category": "car", "bbox": [0.614, 0.0, 1.0, 0.465]}]}
|
42 |
+
{"id": "000000034096", "image": "000000034096.jpg", "captions": ["A house being built with lots of wood.", "A big pile of building material is placed on the floor in the wooden structure.", "A partially-built house with wooden studs and staircase in view.", "A house full of wood getting built at the moment.", "The beginning stages of a home still being made."], "instances": [{"category": "bed", "bbox": [0.505, 0.42, 0.721, 0.59]}, {"category": "tv", "bbox": [0.192, 0.441, 0.335, 0.606]}]}
|
43 |
+
{"id": "000000165257", "image": "000000165257.jpg", "captions": ["A large black counter top sitting next to a sink.", "a clean kitchen counter with a clean sink", "A kitchen with a sink, dishwasher and some boxes on the counter.", "A kitchen with a sink, dishwasher and boxes on the counter.", "a black counter on a wood cabinet in a kitchen", "a new kitchen cabinet with a sink being installed"], "instances": [{"category": "sink", "bbox": [0.513, 0.243, 0.718, 0.314]}]}
|
44 |
+
{"id": "000000431026", "image": "000000431026.jpg", "captions": ["a street sign on a city street near some tall bushes", "street signs on a metal pole lining a sidewalk lined with shrubbery.", "a large hedge of bushes on a corner near a street sign.", "Two street signs on sidewalk next to bushes and trees.", "Street signs along a well manicured street with large houses."], "instances": []}
|
45 |
+
{"id": "000000524575", "image": "000000524575.jpg", "captions": ["Three giraffe and a wildebeest in a field.", "A moose and several giraffes are grazing in the field.", "Zebras in the wild with a wildebeest behind them", "Two giraffe and a ox standing in a field eating grass.", "Giraffes and other safari animals graze in a sunlit field."], "instances": [{"category": "cow", "bbox": [0.46, 0.716, 0.643, 0.999]}, {"category": "giraffe", "bbox": [0.285, 0.5, 0.401, 0.826]}, {"category": "giraffe", "bbox": [0.083, 0.554, 0.179, 0.821]}, {"category": "giraffe", "bbox": [0.887, 0.481, 0.968, 0.715]}]}
|
46 |
+
{"id": "000000326550", "image": "000000326550.jpg", "captions": ["Black and white photograph of a person holding a surfboard by water.", "A person with a surfboard standing next to the water.", "A surfer stands on the rocks watching a wave crash.", "A man standing on a beach holding a surfboard.", "a person looking at the waves ready to surf"], "instances": [{"category": "person", "bbox": [0.327, 0.461, 0.492, 0.897]}, {"category": "surfboard", "bbox": [0.282, 0.56, 0.606, 0.741]}, {"category": "person", "bbox": [0.924, 0.352, 0.933, 0.362]}, {"category": "person", "bbox": [0.912, 0.348, 0.919, 0.36]}]}
|
47 |
+
{"id": "000000018476", "image": "000000018476.jpg", "captions": ["A tie that is sitting on top of a shirt.", "This photograph appears to be looking truly wonderful.", "a uniform complete with shoes laying on a bed", "Suit laid out with a red tie, white shirt and black shoes.", "a white shirt a red tie and some black shoes"], "instances": [{"category": "tie", "bbox": [0.457, 0.09, 0.853, 0.984]}, {"category": "bed", "bbox": [0.005, 0.005, 1.0, 0.379]}]}
|
48 |
+
{"id": "000000480652", "image": "000000480652.jpg", "captions": ["These suitcases are sitting next to a chair.", "An assortment of luggage bags stacked by a kitchen chair.", "A stack of luggage by a chair and table.", "a table and chair with several pieces of luggage nearby", "A pile of luggage sitting on the floor."], "instances": [{"category": "chair", "bbox": [0.483, 0.192, 1.0, 0.769]}, {"category": "backpack", "bbox": [0.433, 0.429, 0.742, 0.856]}, {"category": "suitcase", "bbox": [0.059, 0.414, 0.453, 0.841]}, {"category": "handbag", "bbox": [0.19, 0.184, 0.779, 0.475]}, {"category": "suitcase", "bbox": [0.175, 0.204, 0.583, 0.462]}]}
|
49 |
+
{"id": "000000012748", "image": "000000012748.jpg", "captions": ["A man and child next to a horse.", "a little boy touching the nose of a brown horse", "A man holding a baby whose petting a horse.", "a man letting his baby pet a horse", "man holding a baby and petting a horse"], "instances": [{"category": "horse", "bbox": [0.003, 0.079, 0.504, 0.868]}, {"category": "person", "bbox": [0.452, 0.294, 1.0, 0.989]}, {"category": "person", "bbox": [0.46, 0.217, 1.0, 0.988]}]}
|
50 |
+
{"id": "000000247840", "image": "000000247840.jpg", "captions": ["Large group of people standing outside a restaurant together.", "A dairy queen has people standing outside waiting", "an image of people standing outside and ice cream store", "Several people are lined up outside of a store.", "The front of a Dairy Queen restaurant with people entering the side."], "instances": [{"category": "fire hydrant", "bbox": [0.774, 0.674, 0.83, 0.807]}, {"category": "person", "bbox": [0.741, 0.465, 0.824, 0.755]}, {"category": "person", "bbox": [0.806, 0.471, 0.839, 0.722]}, {"category": "person", "bbox": [0.831, 0.499, 0.866, 0.726]}, {"category": "bench", "bbox": [0.061, 0.69, 0.219, 0.768]}, {"category": "handbag", "bbox": [0.859, 0.558, 0.877, 0.603]}, {"category": "person", "bbox": [0.719, 0.504, 0.75, 0.626]}, {"category": "potted plant", "bbox": [0.7, 0.648, 0.764, 0.743]}, {"category": "handbag", "bbox": [0.827, 0.548, 0.837, 0.577]}, {"category": "sandwich", "bbox": [0.359, 0.618, 0.417, 0.694]}]}
|
51 |
+
{"id": "000000399452", "image": "000000399452.jpg", "captions": ["a sandwhich sitting on a plate next to a glass of tea, bowl of soup", "a sandwich on a white plate a drink on a brown table", "A sandwich and chips sit on a white plate.", "a large plate of food with a glass of soda by it", "A sandwich sitting on top of a white plate next to a cup of coffee."], "instances": [{"category": "sandwich", "bbox": [0.175, 0.326, 0.605, 0.71]}, {"category": "cup", "bbox": [0.504, 0.024, 0.687, 0.419]}, {"category": "knife", "bbox": [0.742, 0.283, 0.857, 0.376]}, {"category": "spoon", "bbox": [0.618, 0.46, 0.797, 0.809]}, {"category": "fork", "bbox": [0.684, 0.254, 0.805, 0.395]}, {"category": "bowl", "bbox": [0.782, 0.366, 1.0, 0.62]}, {"category": "chair", "bbox": [0.202, 0.0, 0.671, 0.148]}, {"category": "dining table", "bbox": [0.002, 0.126, 0.996, 0.987]}]}
|
52 |
+
{"id": "000000515716", "image": "000000515716.jpg", "captions": ["A couple of women standing on either side of a man wearing glasses.", "Two women and a man are holding glasses up at a wine tasting.", "Three young adults holding wine glasses while standing at a bar.", "A group of people sit holding glasses and smiling at a table with several bottles.", "A group of people at a celebration having a taste of wine."], "instances": [{"category": "bottle", "bbox": [0.529, 0.604, 0.637, 0.908]}, {"category": "bottle", "bbox": [0.379, 0.398, 0.481, 0.892]}, {"category": "bottle", "bbox": [0.942, 0.464, 0.988, 0.653]}, {"category": "person", "bbox": [0.0, 0.126, 0.136, 0.811]}, {"category": "person", "bbox": [0.05, 0.093, 0.211, 0.471]}, {"category": "person", "bbox": [0.401, 0.031, 0.678, 0.683]}, {"category": "person", "bbox": [0.617, 0.191, 0.94, 0.858]}, {"category": "person", "bbox": [0.723, 0.098, 0.947, 0.564]}, {"category": "wine glass", "bbox": [0.634, 0.434, 0.697, 0.628]}, {"category": "wine glass", "bbox": [0.285, 0.346, 0.372, 0.558]}, {"category": "wine glass", "bbox": [0.522, 0.422, 0.583, 0.544]}, {"category": "handbag", "bbox": [0.704, 0.601, 1.0, 0.916]}, {"category": "person", "bbox": [0.944, 0.319, 0.999, 0.604]}, {"category": "bottle", "bbox": [0.921, 0.46, 0.953, 0.636]}, {"category": "person", "bbox": [0.116, 0.171, 0.41, 0.829]}]}
|
53 |
+
{"id": "000000116173", "image": "000000116173.jpg", "captions": ["The boy is on his surfboard in the water riding it.", "a young boy riding a boogie board in the water", "A boy riding surf board in the ocean.", "A young boy is riding a surfboard on a small wave.", "A young boy is surfing in the ocean."], "instances": [{"category": "person", "bbox": [0.485, 0.238, 0.702, 0.821]}, {"category": "person", "bbox": [0.866, 0.223, 0.921, 0.29]}, {"category": "person", "bbox": [0.752, 0.146, 0.775, 0.188]}, {"category": "surfboard", "bbox": [0.239, 0.758, 0.782, 0.846]}, {"category": "surfboard", "bbox": [0.853, 0.277, 0.981, 0.29]}, {"category": "surfboard", "bbox": [0.727, 0.169, 0.801, 0.198]}, {"category": "person", "bbox": [0.637, 0.194, 0.677, 0.261]}]}
|
54 |
+
{"id": "000000186013", "image": "000000186013.jpg", "captions": ["A beach scene includes many different kites flying in a cloudy sky.", "Kites being flown at the beach at twilight.", "A beach with flags in the ground and kites overhead in the sky.", "A beach with rows of flags in the sand and kites flying overhead.", "A beach filled with kites and wind sails next to the ocean."], "instances": [{"category": "kite", "bbox": [0.174, 0.4, 0.351, 0.483]}, {"category": "kite", "bbox": [0.144, 0.13, 0.273, 0.17]}, {"category": "kite", "bbox": [0.236, 0.269, 0.268, 0.294]}, {"category": "kite", "bbox": [0.464, 0.204, 0.598, 0.271]}, {"category": "kite", "bbox": [0.61, 0.304, 0.659, 0.342]}, {"category": "kite", "bbox": [0.545, 0.435, 0.565, 0.452]}, {"category": "kite", "bbox": [0.027, 0.558, 0.151, 0.59]}, {"category": "kite", "bbox": [0.93, 0.429, 0.973, 0.536]}, {"category": "kite", "bbox": [0.684, 0.36, 0.697, 0.374]}, {"category": "surfboard", "bbox": [0.393, 0.627, 0.446, 0.934]}, {"category": "person", "bbox": [0.959, 0.685, 0.984, 0.713]}, {"category": "person", "bbox": [0.919, 0.681, 0.94, 0.725]}, {"category": "person", "bbox": [0.8, 0.597, 0.805, 0.61]}, {"category": "person", "bbox": [0.079, 0.928, 0.116, 0.975]}, {"category": "kite", "bbox": [0.743, 0.307, 0.755, 0.319]}, {"category": "kite", "bbox": [0.78, 0.322, 0.795, 0.335]}, {"category": "kite", "bbox": [0.536, 0.526, 0.597, 0.617]}, {"category": "person", "bbox": [0.941, 0.694, 0.961, 0.726]}, {"category": "kite", "bbox": [0.575, 0.446, 0.594, 0.471]}]}
|
55 |
+
{"id": "000000015029", "image": "000000015029.jpg", "captions": ["A man holding a white frisbee standing on top of a field.", "A man is playing frisbee next to a tent.", "Guy at the park holding a frisbee with people in the back under a tent", "A man is holding a Frisbee standing in the grass.", "Young adult male holding a frisbee at an event."], "instances": [{"category": "frisbee", "bbox": [0.138, 0.359, 0.215, 0.587]}, {"category": "person", "bbox": [0.16, 0.002, 0.726, 0.995]}, {"category": "person", "bbox": [0.81, 0.73, 0.852, 0.825]}, {"category": "person", "bbox": [0.786, 0.749, 0.833, 0.814]}, {"category": "person", "bbox": [0.847, 0.743, 0.89, 0.804]}, {"category": "person", "bbox": [0.614, 0.749, 0.706, 0.936]}]}
|
56 |
+
{"id": "000000500565", "image": "000000500565.jpg", "captions": ["A woman holding a child wrapped in a towel brushing her teeth.", "A woman is holding a baby who is wrapped in a towel and holding a toothbrush", "A woman holding a little boy who is brushing his teeth.", "A baby with a toothbrush in his mouth while being held by a woman", "a close up of an adult holding a child brushing their teeth"], "instances": [{"category": "toothbrush", "bbox": [0.586, 0.66, 0.754, 0.821]}, {"category": "person", "bbox": [0.002, 0.007, 0.637, 0.991]}, {"category": "person", "bbox": [0.357, 0.196, 0.998, 0.984]}]}
|
57 |
+
{"id": "000000297323", "image": "000000297323.jpg", "captions": ["Two buses are parked against a curb in front of a building.", "Two automobiles parked on the side of a building.", "two tourist buses parked on street in front of old industrial building", "Two unique city buses stopped at a stop sign.", "Buses parked outside by a building and stop sign."], "instances": [{"category": "bus", "bbox": [0.7, 0.711, 0.92, 0.881]}, {"category": "person", "bbox": [0.936, 0.771, 0.972, 0.833]}, {"category": "stop sign", "bbox": [0.237, 0.666, 0.285, 0.728]}, {"category": "bus", "bbox": [0.334, 0.71, 0.678, 0.935]}, {"category": "truck", "bbox": [0.335, 0.72, 0.683, 0.934]}, {"category": "person", "bbox": [0.34, 0.791, 0.367, 0.834]}]}
|
58 |
+
{"id": "000000441147", "image": "000000441147.jpg", "captions": ["Two antique suitcases sit stacked one on top of the other.", "Two suitcases are stacked on each other and one is black while the other is brown and yellow.", "a close up of two luggage suit cases stacked on each other", "A stack of antique luggage is displayed with price tags.", "two suitcases made of leather and stacked on top of each other"], "instances": [{"category": "suitcase", "bbox": [0.167, 0.025, 0.989, 0.445]}, {"category": "suitcase", "bbox": [0.002, 0.31, 0.994, 0.996]}]}
|
59 |
+
{"id": "000000353536", "image": "000000353536.jpg", "captions": ["A table topped with plates and glasses with eating utensils..", "a fork is laying on a small white plate", "dirty dishes on a table, and a bottle of something.", "a table top with some dishes on top of it", "A table full of dirty dishes is pictured in this image."], "instances": [{"category": "dining table", "bbox": [0.0, 0.007, 0.998, 0.988]}, {"category": "bottle", "bbox": [0.554, 0.002, 0.768, 0.411]}, {"category": "cup", "bbox": [0.372, 0.011, 0.544, 0.427]}, {"category": "fork", "bbox": [0.442, 0.464, 0.818, 0.572]}, {"category": "fork", "bbox": [0.089, 0.233, 0.272, 0.456]}, {"category": "spoon", "bbox": [0.144, 0.218, 0.326, 0.413]}, {"category": "cup", "bbox": [0.688, 0.056, 0.812, 0.361]}]}
|
60 |
+
{"id": "000000416256", "image": "000000416256.jpg", "captions": ["A cat laying on the floor next to a keyboard.", "an orange and white cat is laying next to a keyboard and some wires", "A cat is laying next to a computer keyboard.", "a cat laying on a floor next to a keyboard", "A CAT LAYING ON THE FLOOR AMIDST A COMPUTER,SPEAKERS,CORDS"], "instances": [{"category": "cat", "bbox": [0.235, 0.23, 0.737, 0.639]}, {"category": "keyboard", "bbox": [0.243, 0.562, 0.631, 0.836]}, {"category": "keyboard", "bbox": [0.058, 0.33, 0.277, 0.608]}]}
|
61 |
+
{"id": "000000214367", "image": "000000214367.jpg", "captions": ["Wood shading on the side of a window with brick siding.", "A tree filled with lots of red fruit near a building.", "By the window outside is a apple tree, where the apples are ready to be picked.", "Some very nice looking red fruity by a window,", "A shuttered window has a fruit tree outside it."], "instances": [{"category": "apple", "bbox": [0.214, 0.112, 0.408, 0.266]}, {"category": "apple", "bbox": [0.472, 0.166, 0.618, 0.293]}, {"category": "apple", "bbox": [0.055, 0.592, 0.172, 0.686]}, {"category": "apple", "bbox": [0.126, 0.661, 0.236, 0.739]}, {"category": "apple", "bbox": [0.52, 0.09, 0.609, 0.143]}, {"category": "apple", "bbox": [0.226, 0.354, 0.285, 0.409]}, {"category": "apple", "bbox": [0.0, 0.698, 0.096, 0.771]}, {"category": "apple", "bbox": [0.001, 0.646, 0.042, 0.713]}, {"category": "apple", "bbox": [0.258, 0.719, 0.329, 0.778]}]}
|
62 |
+
{"id": "000000210299", "image": "000000210299.jpg", "captions": ["A little boy riding his bike and wearing a helmet", "A little boy raveling down a road on a bike, with a yellow helmet on.", "The boy wears a helmet while riding his bicycle.", "a small child wearing a helmet and riding a bike", "A little boy wearing a helmet and riding a bike."], "instances": [{"category": "person", "bbox": [0.198, 0.259, 0.399, 0.679]}, {"category": "bicycle", "bbox": [0.213, 0.383, 0.408, 0.835]}]}
|
63 |
+
{"id": "000000088218", "image": "000000088218.jpg", "captions": ["Signs proclaim the famous Haight Ashbury intersection and district.", "a pole with street lights, signs and wires attached to it", "A traffic light at the intersection of Haight and Ashbury", "A traffic sign is shown with traffic signs above it.", "The street signs and traffic signal are below wires attached to the pole."], "instances": [{"category": "traffic light", "bbox": [0.443, 0.435, 0.658, 0.721]}]}
|
64 |
+
{"id": "000000020650", "image": "000000020650.jpg", "captions": ["Burger with broccoli, pickle, and fork on orange plate", "On a plate is kept a burger and a bowl of broccoli and a fork.", "There is half a sandwich on an orange plate with a pickle and a bowl of broccoli", "A A bowl and a sandwich on an orange plate on a table.", "A plate has a sandwich, broccoli, and a pickle."], "instances": [{"category": "sandwich", "bbox": [0.436, 0.155, 0.805, 0.859]}, {"category": "sandwich", "bbox": [0.311, 0.006, 0.748, 0.293]}, {"category": "fork", "bbox": [0.0, 0.665, 0.578, 0.876]}, {"category": "bowl", "bbox": [0.002, 0.263, 0.487, 0.744]}, {"category": "bowl", "bbox": [0.708, 0.003, 0.828, 0.03]}, {"category": "broccoli", "bbox": [0.185, 0.288, 0.366, 0.546]}, {"category": "broccoli", "bbox": [0.017, 0.344, 0.384, 0.654]}, {"category": "broccoli", "bbox": [0.31, 0.191, 0.466, 0.463]}, {"category": "broccoli", "bbox": [0.104, 0.107, 0.285, 0.342]}, {"category": "broccoli", "bbox": [0.092, 0.276, 0.242, 0.442]}, {"category": "dining table", "bbox": [0.002, 0.0, 0.999, 0.987]}]}
|
65 |
+
{"id": "000000514915", "image": "000000514915.jpg", "captions": ["A large black dog laying on a kitchen floor.", "A dog is laying down on the floor in the home.", "Black dog laying down on the kitchen floor next to it's bowls and toy", "A black dog with a red collar laying on a tiled floor.", "A black dog that is laying on the floor."], "instances": [{"category": "dog", "bbox": [0.087, 0.276, 0.812, 0.792]}, {"category": "bowl", "bbox": [0.437, 0.09, 0.533, 0.213]}, {"category": "bowl", "bbox": [0.537, 0.035, 0.665, 0.141]}]}
|
66 |
+
{"id": "000000205183", "image": "000000205183.jpg", "captions": ["A duck walking along a paved road next to a patch of grass.", "A close up of a duck walking on a path.", "a duck walks along a cement patch while looking down", "A white duck out of water, walking on the ground.", "A goose standing in the road, looking at the ground."], "instances": [{"category": "bird", "bbox": [0.291, 0.235, 0.859, 0.889]}]}
|
67 |
+
{"id": "000000534270", "image": "000000534270.jpg", "captions": ["Man and woman with umbrella hats sitting on top of a bridge.", "A couple equipped with umbrella hats taking a break from walking their dog on a bridge on a rainy day.", "Two people in ridiculous looking umbrella hats.", "two people with umbrella hats near one another", "A couple of people wearing umbrella hats next to the ocean."], "instances": [{"category": "dog", "bbox": [0.456, 0.832, 0.6, 0.983]}, {"category": "person", "bbox": [0.433, 0.464, 0.636, 0.975]}, {"category": "person", "bbox": [0.263, 0.321, 0.459, 0.978]}, {"category": "boat", "bbox": [0.912, 0.4, 0.978, 0.433]}, {"category": "boat", "bbox": [0.211, 0.236, 0.478, 0.304]}, {"category": "boat", "bbox": [0.144, 0.328, 0.189, 0.361]}, {"category": "umbrella", "bbox": [0.443, 0.402, 0.607, 0.473]}, {"category": "umbrella", "bbox": [0.325, 0.311, 0.483, 0.432]}, {"category": "umbrella", "bbox": [0.207, 0.738, 0.284, 0.778]}, {"category": "umbrella", "bbox": [0.489, 0.713, 0.649, 0.83]}]}
|
68 |
+
{"id": "000000408439", "image": "000000408439.jpg", "captions": ["Cliffs rise on the edge of a placid lake.", "A scenic view of a river with a train on the edge of it in the distance.", "A large lake surrounded by beautiful tree covered mountains.", "a landscape scene with water, mountains and trees", "A train on a waterfront track surrounded by mountains."], "instances": [{"category": "train", "bbox": [0.008, 0.591, 0.562, 0.644]}]}
|
69 |
+
{"id": "000000474253", "image": "000000474253.jpg", "captions": ["A man riding on the back of a horse through a river.", "A person is riding a horse through water.", "Horse and rider crossing waterway during competitive event.", "A woman riding a horse splashes through a large puddle.", "A young man riding a horse through some water."], "instances": [{"category": "horse", "bbox": [0.385, 0.235, 0.651, 0.814]}, {"category": "person", "bbox": [0.396, 0.06, 0.576, 0.675]}, {"category": "person", "bbox": [0.29, 0.148, 0.355, 0.333]}, {"category": "person", "bbox": [0.129, 0.163, 0.212, 0.349]}, {"category": "person", "bbox": [0.005, 0.014, 0.038, 0.165]}, {"category": "person", "bbox": [0.144, 0.011, 0.193, 0.155]}, {"category": "person", "bbox": [0.089, 0.007, 0.133, 0.162]}]}
|
70 |
+
{"id": "000000098029", "image": "000000098029.jpg", "captions": ["a table with many plates on it with a bread basket", "A table set for four has many foods and fruits on it.", "Several objects displayed on a kitchen table including bread, oranges and plating.", "Several dishes and food items sit on a table.", "An assortment of foods sitting on a round brown table."], "instances": [{"category": "refrigerator", "bbox": [0.013, 0.004, 0.37, 0.317]}, {"category": "bottle", "bbox": [0.467, 0.517, 0.555, 0.638]}, {"category": "bottle", "bbox": [0.602, 0.536, 0.658, 0.609]}, {"category": "chair", "bbox": [0.747, 0.367, 1.0, 0.592]}, {"category": "chair", "bbox": [0.044, 0.368, 0.358, 0.544]}, {"category": "cup", "bbox": [0.296, 0.465, 0.359, 0.54]}, {"category": "cup", "bbox": [0.709, 0.67, 0.782, 0.736]}, {"category": "cup", "bbox": [0.213, 0.684, 0.294, 0.753]}, {"category": "knife", "bbox": [0.787, 0.699, 0.922, 0.797]}, {"category": "knife", "bbox": [0.161, 0.539, 0.265, 0.584]}, {"category": "spoon", "bbox": [0.813, 0.674, 0.922, 0.759]}, {"category": "spoon", "bbox": [0.156, 0.555, 0.233, 0.587]}, {"category": "spoon", "bbox": [0.596, 0.467, 0.613, 0.509]}, {"category": "bowl", "bbox": [0.241, 0.753, 0.505, 0.935]}, {"category": "banana", "bbox": [0.632, 0.138, 0.718, 0.161]}, {"category": "apple", "bbox": [0.701, 0.152, 0.758, 0.191]}, {"category": "orange", "bbox": [0.607, 0.66, 0.692, 0.716]}, {"category": "orange", "bbox": [0.565, 0.636, 0.611, 0.667]}, {"category": "orange", "bbox": [0.526, 0.624, 0.572, 0.652]}, {"category": "orange", "bbox": [0.61, 0.628, 0.656, 0.657]}, {"category": "orange", "bbox": [0.599, 0.649, 0.643, 0.677]}, {"category": "dining table", "bbox": [0.013, 0.439, 0.964, 0.986]}, {"category": "cup", "bbox": [0.612, 0.489, 0.669, 0.548]}, {"category": "knife", "bbox": [0.605, 0.457, 0.638, 0.53]}, {"category": "apple", "bbox": [0.502, 0.137, 0.537, 0.159]}, {"category": "orange", "bbox": [0.54, 0.135, 0.563, 0.151]}, {"category": "orange", "bbox": [0.527, 0.129, 0.554, 0.142]}, {"category": "orange", "bbox": [0.611, 0.155, 0.641, 0.171]}, {"category": "chair", "bbox": [0.0, 0.843, 0.29, 0.989]}, {"category": "cup", "bbox": [0.353, 0.469, 0.411, 0.511]}, {"category": "cup", "bbox": [0.609, 0.716, 0.682, 0.786]}, {"category": "orange", "bbox": [0.638, 0.158, 0.679, 0.177]}, {"category": "cake", "bbox": [0.38, 0.821, 0.481, 0.895]}, {"category": "chair", "bbox": [0.79, 0.747, 1.0, 1.0]}, {"category": "bottle", "bbox": [0.719, 0.55, 0.769, 0.616]}, {"category": "bottle", "bbox": [0.795, 0.546, 0.873, 0.613]}, {"category": "knife", "bbox": [0.17, 0.799, 0.264, 0.88]}, {"category": "cup", "bbox": [0.317, 0.695, 0.391, 0.752]}]}
|
71 |
+
{"id": "000000294073", "image": "000000294073.jpg", "captions": ["A woman and a man standing between two brown horses.", "A COUPLE WEARING YELLOW DRESS STANDING NEAR TWO HORSES.", "An older couple stands between two horses.", "A man and a woman standing with two horses", "A man and a woman stand in between two horses."], "instances": [{"category": "horse", "bbox": [0.0, 0.052, 0.49, 0.989]}, {"category": "horse", "bbox": [0.632, 0.23, 1.0, 0.989]}, {"category": "person", "bbox": [0.425, 0.326, 0.696, 0.987]}, {"category": "person", "bbox": [0.627, 0.203, 0.828, 0.986]}, {"category": "book", "bbox": [0.525, 0.597, 0.644, 0.833]}]}
|
72 |
+
{"id": "000000203629", "image": "000000203629.jpg", "captions": ["A man on a cell phone in a public area holding his thumb up.", "A group of people gathered inside of a room.", "A man on his cellphone posing for a picture.", "A man giving a thumbs up while on a cell phone.", "The man is giving a thumbs up while on his phone."], "instances": [{"category": "cell phone", "bbox": [0.43, 0.459, 0.449, 0.503]}, {"category": "cup", "bbox": [0.756, 0.838, 0.865, 0.98]}, {"category": "person", "bbox": [0.232, 0.317, 0.603, 0.98]}, {"category": "person", "bbox": [0.602, 0.405, 1.0, 0.999]}, {"category": "person", "bbox": [0.003, 0.339, 0.313, 0.987]}, {"category": "person", "bbox": [0.164, 0.379, 0.258, 0.733]}, {"category": "person", "bbox": [0.564, 0.36, 0.673, 0.645]}, {"category": "person", "bbox": [0.241, 0.379, 0.336, 0.512]}, {"category": "person", "bbox": [0.682, 0.372, 0.736, 0.502]}, {"category": "person", "bbox": [0.654, 0.428, 0.734, 0.536]}, {"category": "person", "bbox": [0.718, 0.368, 0.787, 0.508]}, {"category": "person", "bbox": [0.148, 0.362, 0.205, 0.529]}, {"category": "person", "bbox": [0.001, 0.431, 0.044, 0.564]}, {"category": "cup", "bbox": [0.901, 0.808, 0.995, 0.982]}]}
|
73 |
+
{"id": "000000119876", "image": "000000119876.jpg", "captions": ["A man dressed loudly is using his cell phone.", "A man talking on the phone while he walks down the street.", "A man with pink hair talking on a cell phone.", "A man in a purple shirt and tie and purple hair.", "a man colored his hair in purple walking on the road"], "instances": [{"category": "bicycle", "bbox": [0.525, 0.222, 0.924, 0.608]}, {"category": "bicycle", "bbox": [0.895, 0.249, 1.0, 0.642]}, {"category": "person", "bbox": [0.0, 0.0, 0.738, 1.0]}, {"category": "tie", "bbox": [0.319, 0.255, 0.423, 0.638]}, {"category": "cell phone", "bbox": [0.411, 0.13, 0.426, 0.161]}, {"category": "handbag", "bbox": [0.369, 0.205, 0.575, 0.839]}]}
|
74 |
+
{"id": "000000164255", "image": "000000164255.jpg", "captions": ["An umbrella that is standing in the sand.", "An umbrella is stuck in the sand on the beach.", "a colorful striped umbrella on the beach near the ocean", "A colorful umbrella is set up at the beach.", "The colorful umbrella is sitting by the beach,"], "instances": [{"category": "umbrella", "bbox": [0.0, 0.101, 0.567, 0.575]}]}
|
75 |
+
{"id": "000000192817", "image": "000000192817.jpg", "captions": ["A view from a window high up in the sky.", "A bunch of mountains seen from a plane window.", "The window from a plane overlooking the ground.", "The view of a mountain area from an airplane window.", "An aerial view of mountains and lakes from an airplane window."], "instances": []}
|
76 |
+
{"id": "000000258285", "image": "000000258285.jpg", "captions": ["Two large passenger jets flying over a beach filled with birds.", "A plane is flying over a bird filed lake", "Two airplanes are in the sky over blue water.", "An airplane landing over an airplane on the ground.", "A photo of two plans with water and birds surrounding it , one plane in the air one one the ground."], "instances": [{"category": "bird", "bbox": [0.507, 0.941, 0.536, 0.973]}, {"category": "bird", "bbox": [0.304, 0.933, 0.315, 0.95]}, {"category": "bird", "bbox": [0.129, 0.885, 0.143, 0.912]}, {"category": "bird", "bbox": [0.158, 0.851, 0.165, 0.87]}, {"category": "bird", "bbox": [0.404, 0.839, 0.429, 0.864]}, {"category": "bird", "bbox": [0.498, 0.833, 0.513, 0.861]}, {"category": "airplane", "bbox": [0.276, 0.085, 0.825, 0.316]}, {"category": "airplane", "bbox": [0.478, 0.252, 0.983, 0.495]}, {"category": "bird", "bbox": [0.552, 0.828, 0.564, 0.844]}, {"category": "bird", "bbox": [0.789, 0.812, 0.798, 0.836]}, {"category": "bird", "bbox": [0.927, 0.82, 0.936, 0.838]}, {"category": "bird", "bbox": [0.65, 0.828, 0.664, 0.849]}, {"category": "bird", "bbox": [0.752, 0.81, 0.763, 0.83]}, {"category": "bird", "bbox": [0.841, 0.817, 0.852, 0.828]}, {"category": "bird", "bbox": [0.292, 0.849, 0.311, 0.868]}, {"category": "bird", "bbox": [0.005, 0.727, 0.981, 0.998]}]}
|
77 |
+
{"id": "000000506483", "image": "000000506483.jpg", "captions": ["An art installation is placed by a street.", "People sit near a display of large artworks including an oversize bench and painted feline heads.", "Looking down on a giant rocking bench and large animal heads.", "An over sized wooden bench next to two massive animal art sculptures.", "artistic sculptures and images on a city street"], "instances": [{"category": "car", "bbox": [0.656, 0.939, 0.933, 1.0]}, {"category": "person", "bbox": [0.08, 0.664, 0.147, 0.805]}, {"category": "person", "bbox": [0.154, 0.646, 0.217, 0.821]}, {"category": "bench", "bbox": [0.316, 0.124, 0.951, 0.635]}, {"category": "backpack", "bbox": [0.062, 0.701, 0.097, 0.769]}, {"category": "person", "bbox": [0.0, 0.132, 0.031, 0.197]}]}
|
78 |
+
{"id": "000000502168", "image": "000000502168.jpg", "captions": ["a fleet of naval ships in the ocean", "A group of men on aircraft carrier with other boats in the distance.", "A large ship floating in the ocean next to other ships.", "Several men on a boat looking over the side.", "The men wear hardhats as they work on the aircraft carrier."], "instances": [{"category": "boat", "bbox": [0.634, 0.292, 1.0, 0.982]}, {"category": "person", "bbox": [0.675, 0.507, 0.736, 0.731]}, {"category": "person", "bbox": [0.684, 0.737, 0.817, 1.0]}, {"category": "person", "bbox": [0.803, 0.691, 0.883, 0.932]}, {"category": "person", "bbox": [0.741, 0.56, 0.798, 0.767]}, {"category": "person", "bbox": [0.924, 0.269, 0.951, 0.367]}, {"category": "boat", "bbox": [0.079, 0.171, 0.172, 0.231]}, {"category": "boat", "bbox": [0.863, 0.131, 0.961, 0.239]}, {"category": "boat", "bbox": [0.435, 0.288, 0.46, 0.313]}, {"category": "boat", "bbox": [0.591, 0.186, 0.605, 0.222]}, {"category": "person", "bbox": [0.451, 0.289, 0.455, 0.296]}, {"category": "person", "bbox": [0.446, 0.29, 0.451, 0.296]}, {"category": "person", "bbox": [0.872, 0.627, 0.957, 0.966]}, {"category": "person", "bbox": [0.44, 0.288, 0.446, 0.3]}]}
|
79 |
+
{"id": "000000319432", "image": "000000319432.jpg", "captions": ["Man holding two shirts with luggage and window", "A man holding clothes on a hanger with a suitcase in front of him.", "A man show a red and a white clothing hangers.", "A man holding his garment bags in both hands", "A man holding up some clothes in some hanger bags."], "instances": [{"category": "person", "bbox": [0.0, 0.092, 0.776, 0.852]}, {"category": "suitcase", "bbox": [0.153, 0.798, 0.587, 1.0]}]}
|
80 |
+
{"id": "000000131019", "image": "000000131019.jpg", "captions": ["Two zebras and two monkeys walking on the grass.", "Two giraffes and another animal are on green grass.", "A baboon and two zebras grazing on the savannah.", "A baboon and its baby eat by two zebras in the grass", "Monkey standing behind two zebras as they graze."], "instances": [{"category": "zebra", "bbox": [0.367, 0.258, 0.834, 0.646]}, {"category": "zebra", "bbox": [0.161, 0.13, 0.396, 0.375]}, {"category": "bird", "bbox": [0.309, 0.138, 0.34, 0.163]}]}
|
llava/eval/table/model.jsonl
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"model_id": "vicuna-13b:20230322-clean-lang", "model_name": "vicuna-13b", "model_version": "20230322-clean-lang", "model_metadata": "vicuna-13b-20230322-clean-lang"}
|
2 |
+
{"model_id": "alpaca-13b:v1", "model_name": "alpaca-13b", "model_version": "v1", "model_metadata": "alpaca-13b"}
|
3 |
+
{"model_id": "llama-13b:v1", "model_name": "llama-13b", "model_version": "v1", "model_metadata": "hf-llama-13b"}
|
4 |
+
{"model_id": "bard:20230327", "model_name": "bard", "model_version": "20230327", "model_metadata": "Google Bard 20230327"}
|
5 |
+
{"model_id": "gpt-3.5-turbo:20230327", "model_name": "gpt-3.5-turbo", "model_version": "20230327", "model_metadata": "OpenAI ChatGPT gpt-3.5-turbo Chat Completion"}
|
llava/eval/table/prompt.jsonl
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"prompt_id": 1, "system_prompt": "You are a helpful and precise assistant for checking the quality of the answer.", "prompt_template": "[Question]\n{question}\n\n[Assistant 1]\n{answer_1}\n\n[End of Assistant 1]\n\n[Assistant 2]\n{answer_2}\n\n[End of Assistant 2]\n\n[System]\n{prompt}\n\n", "defaults": {"prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."}, "description": "Prompt for general questions"}
|
2 |
+
{"prompt_id": 2, "system_prompt": "You are a helpful and precise assistant for checking the quality of the answer.", "prompt_template": "[Question]\n{question}\n\n[Assistant 1]\n{answer_1}\n\n[End of Assistant 1]\n\n[Assistant 2]\n{answer_2}\n\n[End of Assistant 2]\n\n[System]\n{prompt}\n\n", "defaults": {"prompt": "Your task is to evaluate the coding abilities of the above two assistants. They have been asked to implement a program to solve a given problem. Please review their code submissions, paying close attention to their problem-solving approach, code structure, readability, and the inclusion of helpful comments.\n\nPlease ensure that the assistants' submissions:\n\n1. Correctly implement the given problem statement.\n2. Contain accurate and efficient code.\n3. Include clear and concise comments that explain the code's logic and functionality.\n4. Adhere to proper coding standards and best practices.\n\nOnce you have carefully reviewed both submissions, provide detailed feedback on their strengths and weaknesses, along with any suggestions for improvement. You should first output a single line containing two scores on the scale of 1-10 (1: no code/no sense; 10: perfect) for Assistant 1 and 2, respectively. Then give extra comments starting from the next line."}, "description": "Prompt for coding questions"}
|
3 |
+
{"prompt_id": 3, "system_prompt": "You are a helpful and precise assistant for checking the quality of the answer.", "prompt_template": "[Question]\n{question}\n\n[Assistant 1]\n{answer_1}\n\n[End of Assistant 1]\n\n[Assistant 2]\n{answer_2}\n\n[End of Assistant 2]\n\n[System]\n{prompt}\n\n", "defaults": {"prompt": "We would like to request your feedback on the mathematical proficiency of two AI assistants regarding the given user question.\nFirstly, please solve the problem independently, without referring to the answers provided by Assistant 1 and Assistant 2.\nAfterward, please examine the problem-solving process of Assistant 1 and Assistant 2 step-by-step to ensure their correctness, identifying any incorrect steps if present. Your evaluation should take into account not only the answer but also the problem-solving steps.\nFinally, please output a Python tuple containing two numerical scores for Assistant 1 and Assistant 2, ranging from 1 to 10, respectively. If applicable, explain the reasons for any variations in their scores and determine which assistant performed better."}, "description": "Prompt for math questions"}
|
4 |
+
{"prompt_id": 4, "system_prompt": "You are a helpful and precise assistant for checking the quality of the answer.", "prompt_template": "[Visual Context]\n{context}\n[Question]\n{question}\n\n[Assistant 1]\n{answer_1}\n\n[End of Assistant 1]\n\n[Assistant 2]\n{answer_2}\n\n[End of Assistant 2]\n\n[System]\n{prompt}\n\n", "defaults": {"prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with five descriptive sentences describing the same image and the bounding box coordinates of each object in the scene. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."}, "description": "Prompt for visual questions"}
|
llava/eval/table/question.jsonl
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"question_id": 1, "text": "How can I improve my time management skills?", "category": "generic"}
|
2 |
+
{"question_id": 2, "text": "What are the most effective ways to deal with stress?", "category": "generic"}
|
3 |
+
{"question_id": 3, "text": "What are the main differences between Python and JavaScript programming languages?", "category": "generic"}
|
4 |
+
{"question_id": 4, "text": "How can I increase my productivity while working from home?", "category": "generic"}
|
5 |
+
{"question_id": 5, "text": "Can you explain the basics of quantum computing?", "category": "generic"}
|
6 |
+
{"question_id": 6, "text": "What are the differences between plant-based and animal-based protein sources?", "category": "generic"}
|
7 |
+
{"question_id": 7, "text": "How can I develop my critical thinking skills?", "category": "generic"}
|
8 |
+
{"question_id": 8, "text": "What are the major challenges faced by the education sector today?", "category": "generic"}
|
9 |
+
{"question_id": 9, "text": "What are the primary factors that influence consumer behavior?", "category": "generic"}
|
10 |
+
{"question_id": 10, "text": "What are the most effective strategies for conflict resolution in the workplace?", "category": "generic"}
|
11 |
+
{"question_id": 11, "text": "What are some potential implications of using a single-use plastic bottle versus a reusable bottle on both the environment and human health?", "category": "knowledge"}
|
12 |
+
{"question_id": 12, "text": "What factors would you consider when designing an inclusive and accessible public transportation system?", "category": "knowledge"}
|
13 |
+
{"question_id": 13, "text": "How can governments utilize fiscal and monetary policies to combat economic recessions?", "category": "knowledge"}
|
14 |
+
{"question_id": 14, "text": "How do language and cultural barriers affect the way people communicate and form relationships in multicultural societies?", "category": "knowledge"}
|
15 |
+
{"question_id": 15, "text": "Describe a scenario where artificial intelligence could be used to improve the quality and efficiency of healthcare delivery.", "category": "knowledge"}
|
16 |
+
{"question_id": 16, "text": "Explain the process of gene editing using CRISPR-Cas9 technology, and discuss its potential applications and ethical implications.", "category": "knowledge"}
|
17 |
+
{"question_id": 17, "text": "How do vaccinations work to protect individuals and communities from infectious diseases, and what is herd immunity?", "category": "knowledge"}
|
18 |
+
{"question_id": 18, "text": "How do social media platforms influence the way people consume and share news, and what are the potential implications for the spread of misinformation?", "category": "knowledge"}
|
19 |
+
{"question_id": 19, "text": "How do cultural, social, and economic factors influence people's food choices, and how can this knowledge be used to promote healthier diets?", "category": "knowledge"}
|
20 |
+
{"question_id": 20, "text": "Explain the process of natural selection and how it contributes to the evolution and adaptation of species.", "category": "knowledge"}
|
21 |
+
{"question_id": 21, "text": "How would you introduce yourself as a medieval knight at a royal banquet?", "category": "roleplay"}
|
22 |
+
{"question_id": 22, "text": "As a pirate captain, what would you say to your crew to motivate them to search for hidden treasure?", "category": "roleplay"}
|
23 |
+
{"question_id": 23, "text": "If you were a Shakespearean character, how would you declare your love for someone in a soliloquy?", "category": "roleplay"}
|
24 |
+
{"question_id": 24, "text": "As a superhero, how would you explain your origin story to a curious child?", "category": "roleplay"}
|
25 |
+
{"question_id": 25, "text": "Imagine you are a time traveler from the year 3000. What technological advancements would you tell people about?", "category": "roleplay"}
|
26 |
+
{"question_id": 26, "text": "As a sports commentator, describe the winning play in the final seconds of a championship game.", "category": "roleplay"}
|
27 |
+
{"question_id": 27, "text": "Pretend to be a world-famous chef. How would you describe your signature dish to a panel of judges?", "category": "roleplay"}
|
28 |
+
{"question_id": 28, "text": "You are a mountain climber reaching the summit of Mount Everest. Describe your emotions and the view from the top.", "category": "roleplay"}
|
29 |
+
{"question_id": 29, "text": "As a space colonist on Mars, describe your daily life and the challenges you face living on another planet.", "category": "roleplay"}
|
30 |
+
{"question_id": 30, "text": "Pretend to be a character in a post-apocalyptic world. Describe how you survive and the allies you encounter.", "category": "roleplay"}
|
31 |
+
{"question_id": 31, "text": "How can you determine if a restaurant is popular among locals or mainly attracts tourists, and why might this information be useful?", "category": "common-sense"}
|
32 |
+
{"question_id": 32, "text": "What are some subtle clues that suggest someone is pretending to understand a topic or conversation when they are actually confused or uninformed?", "category": "common-sense"}
|
33 |
+
{"question_id": 33, "text": "Why might someone choose to use a paper map or ask for directions instead of relying on a GPS device or smartphone app?", "category": "common-sense"}
|
34 |
+
{"question_id": 34, "text": "How can you determine if a person is genuinely interested in a conversation or simply being polite?", "category": "common-sense"}
|
35 |
+
{"question_id": 35, "text": "Why might someone prefer to shop at a small, locally-owned business instead of a large chain store, even if the prices are higher?", "category": "common-sense"}
|
36 |
+
{"question_id": 36, "text": "How can you assess the credibility of a source of information, such as a news article or blog post, without relying solely on the reputation of the author or publisher?", "category": "common-sense"}
|
37 |
+
{"question_id": 37, "text": "Why do some people enjoy the sensation of being scared, such as by watching horror movies or going on roller coasters, while others avoid these experiences?", "category": "common-sense"}
|
38 |
+
{"question_id": 38, "text": "How can observing the behavior of other people in a social situation provide clues about cultural norms and expectations?", "category": "common-sense"}
|
39 |
+
{"question_id": 39, "text": "Do we have a moral obligation to explore space, or should we focus on solving Earth's problems first?", "category": "common-sense"}
|
40 |
+
{"question_id": 40, "text": "In a world where automation is becoming increasingly prevalent, is it more important to prioritize job creation or technological progress?", "category": "common-sense"}
|
41 |
+
{"question_id": 41, "text": "How many times does the average human blink in a lifetime? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
|
42 |
+
{"question_id": 42, "text": "How many atoms are in a grain of salt? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
|
43 |
+
{"question_id": 43, "text": "How many lightning strikes occur on Earth each day? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
|
44 |
+
{"question_id": 44, "text": "How many balloons would it take to lift a house like in the movie \"Up\"? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
|
45 |
+
{"question_id": 45, "text": "How many text messages are sent globally in a minute? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
|
46 |
+
{"question_id": 46, "text": "How many words are spoken daily on Earth? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
|
47 |
+
{"question_id": 47, "text": "How many snowflakes fall during a typical winter? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
|
48 |
+
{"question_id": 48, "text": "How many pages are in all the books ever written? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
|
49 |
+
{"question_id": 49, "text": "How many times has the Earth orbited the Sun since the beginning of life? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
|
50 |
+
{"question_id": 50, "text": "How many songs have been recorded throughout history? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
|
51 |
+
{"question_id": 51, "text": "What if the Internet had been invented during the Renaissance period?", "category": "counterfactual"}
|
52 |
+
{"question_id": 52, "text": "What if the Aztecs had successfully repelled the Spanish conquistadors?", "category": "counterfactual"}
|
53 |
+
{"question_id": 53, "text": "What if the Black Death had not occurred in the 14th century?", "category": "counterfactual"}
|
54 |
+
{"question_id": 54, "text": "What if Isaac Newton had focused on biology instead of physics?", "category": "counterfactual"}
|
55 |
+
{"question_id": 55, "text": "What if the Beatles had never formed as a band?", "category": "counterfactual"}
|
56 |
+
{"question_id": 56, "text": "What if Alan Turing had not cracked the Enigma code during World War II?", "category": "counterfactual"}
|
57 |
+
{"question_id": 57, "text": "What if the Suez Canal had never been constructed?", "category": "counterfactual"}
|
58 |
+
{"question_id": 58, "text": "What if the Maya civilization had never mysteriously collapsed?", "category": "counterfactual"}
|
59 |
+
{"question_id": 59, "text": "What if Christopher Columbus had not discovered the Americas?", "category": "counterfactual"}
|
60 |
+
{"question_id": 60, "text": "What if Vincent van Gogh had been a successful artist during his lifetime?", "category": "counterfactual"}
|
61 |
+
{"question_id": 61, "text": "Develop a C++ program that reads a text file line by line and counts the number of occurrences of a specific word in the file.", "category": "coding"}
|
62 |
+
{"question_id": 62, "text": "Implement a Python function to find the longest common subsequence of two input strings using dynamic programming.", "category": "coding"}
|
63 |
+
{"question_id": 63, "text": "Implement a regular expression in Python to validate an email address.", "category": "coding"}
|
64 |
+
{"question_id": 64, "text": "Write a program to find the nth Fibonacci number using dynamic programming.", "category": "coding"}
|
65 |
+
{"question_id": 65, "text": "Implement a binary search algorithm to find a specific element in a sorted array.", "category": "coding"}
|
66 |
+
{"question_id": 66, "text": "Implement a queue data structure using two stacks in Python.", "category": "coding"}
|
67 |
+
{"question_id": 67, "text": "Implement a program to find the common elements in two arrays without using any extra data structures.", "category": "coding"}
|
68 |
+
{"question_id": 68, "text": "Given that f(x) = 5x^3 - 2x + 3, find the value of f(2).", "category": "math"}
|
69 |
+
{"question_id": 69, "text": "Solve for x in the equation 3x + 10 = 5(x - 2).", "category": "math"}
|
70 |
+
{"question_id": 70, "text": "If the endpoints of a line segment are (2, -2) and (10, 4), what is the length of the segment?", "category": "math"}
|
71 |
+
{"question_id": 71, "text": "Can you help me write a formal email to a potential business partner proposing a joint venture?", "category": "writing"}
|
72 |
+
{"question_id": 72, "text": "Can you help me write a resignation letter to my current employer, while leaving on good terms and expressing gratitude for the opportunities provided?", "category": "writing"}
|
73 |
+
{"question_id": 73, "text": "Use an appropriate format to structure a formal letter of recommendation for a student applying to a prestigious graduate program in computer science.", "category": "writing"}
|
74 |
+
{"question_id": 74, "text": "Write a compelling product launch announcement email to inform our customers of our new software solution.", "category": "writing"}
|
75 |
+
{"question_id": 75, "text": "Draft an apology email to a customer who experienced a delay in their order, and provide reassurance that the issue has been resolved.", "category": "writing"}
|
76 |
+
{"question_id": 76, "text": "Write a script for a YouTube video exploring the history and cultural significance of jazz.", "category": "writing"}
|
77 |
+
{"question_id": 77, "text": "Compose an engaging travel blog post about a recent trip to Hawaii, highlighting cultural experiences and must-see attractions.", "category": "writing"}
|
78 |
+
{"question_id": 78, "text": "Write a captivating movie review for a recently released science fiction film, discussing its plot, characters, and special effects.", "category": "writing"}
|
79 |
+
{"question_id": 79, "text": "Structure a podcast script for an episode discussing the influence of streaming platforms on the music industry.", "category": "writing"}
|
80 |
+
{"question_id": 80, "text": "Write a symphony concert review, discussing the orchestra's performance and overall audience experience.", "category": "writing"}
|
llava/eval/table/results/test_sqa_llava_13b_v0.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|