Upload CLIPSegForImageSegmentation
Browse files- config.json +0 -153
- pytorch_model.bin +2 -2
config.json
CHANGED
@@ -94,82 +94,6 @@
|
|
94 |
"use_bfloat16": false,
|
95 |
"vocab_size": 49408
|
96 |
},
|
97 |
-
"text_config_dict": {
|
98 |
-
"_name_or_path": "",
|
99 |
-
"add_cross_attention": false,
|
100 |
-
"architectures": null,
|
101 |
-
"attention_dropout": 0.0,
|
102 |
-
"bad_words_ids": null,
|
103 |
-
"begin_suppress_tokens": null,
|
104 |
-
"bos_token_id": 0,
|
105 |
-
"chunk_size_feed_forward": 0,
|
106 |
-
"cross_attention_hidden_size": null,
|
107 |
-
"decoder_start_token_id": null,
|
108 |
-
"diversity_penalty": 0.0,
|
109 |
-
"do_sample": false,
|
110 |
-
"dropout": 0.0,
|
111 |
-
"early_stopping": false,
|
112 |
-
"encoder_no_repeat_ngram_size": 0,
|
113 |
-
"eos_token_id": 2,
|
114 |
-
"exponential_decay_length_penalty": null,
|
115 |
-
"finetuning_task": null,
|
116 |
-
"forced_bos_token_id": null,
|
117 |
-
"forced_eos_token_id": null,
|
118 |
-
"hidden_act": "quick_gelu",
|
119 |
-
"hidden_size": 512,
|
120 |
-
"id2label": {
|
121 |
-
"0": "LABEL_0",
|
122 |
-
"1": "LABEL_1"
|
123 |
-
},
|
124 |
-
"initializer_factor": 1.0,
|
125 |
-
"initializer_range": 0.02,
|
126 |
-
"intermediate_size": 2048,
|
127 |
-
"is_decoder": false,
|
128 |
-
"is_encoder_decoder": false,
|
129 |
-
"label2id": {
|
130 |
-
"LABEL_0": 0,
|
131 |
-
"LABEL_1": 1
|
132 |
-
},
|
133 |
-
"layer_norm_eps": 1e-05,
|
134 |
-
"length_penalty": 1.0,
|
135 |
-
"max_length": 20,
|
136 |
-
"max_position_embeddings": 77,
|
137 |
-
"min_length": 0,
|
138 |
-
"model_type": "clipseg_text_model",
|
139 |
-
"no_repeat_ngram_size": 0,
|
140 |
-
"num_attention_heads": 8,
|
141 |
-
"num_beam_groups": 1,
|
142 |
-
"num_beams": 1,
|
143 |
-
"num_hidden_layers": 12,
|
144 |
-
"num_return_sequences": 1,
|
145 |
-
"output_attentions": false,
|
146 |
-
"output_hidden_states": false,
|
147 |
-
"output_scores": false,
|
148 |
-
"pad_token_id": 1,
|
149 |
-
"prefix": null,
|
150 |
-
"problem_type": null,
|
151 |
-
"pruned_heads": {},
|
152 |
-
"remove_invalid_values": false,
|
153 |
-
"repetition_penalty": 1.0,
|
154 |
-
"return_dict": true,
|
155 |
-
"return_dict_in_generate": false,
|
156 |
-
"sep_token_id": null,
|
157 |
-
"suppress_tokens": null,
|
158 |
-
"task_specific_params": null,
|
159 |
-
"temperature": 1.0,
|
160 |
-
"tf_legacy_loss": false,
|
161 |
-
"tie_encoder_decoder": false,
|
162 |
-
"tie_word_embeddings": true,
|
163 |
-
"tokenizer_class": null,
|
164 |
-
"top_k": 50,
|
165 |
-
"top_p": 1.0,
|
166 |
-
"torch_dtype": null,
|
167 |
-
"torchscript": false,
|
168 |
-
"transformers_version": "4.25.0.dev0",
|
169 |
-
"typical_p": 1.0,
|
170 |
-
"use_bfloat16": false,
|
171 |
-
"vocab_size": 49408
|
172 |
-
},
|
173 |
"torch_dtype": "float32",
|
174 |
"transformers_version": null,
|
175 |
"use_complex_transposed_convolution": true,
|
@@ -249,82 +173,5 @@
|
|
249 |
"transformers_version": "4.25.0.dev0",
|
250 |
"typical_p": 1.0,
|
251 |
"use_bfloat16": false
|
252 |
-
},
|
253 |
-
"vision_config_dict": {
|
254 |
-
"_name_or_path": "",
|
255 |
-
"add_cross_attention": false,
|
256 |
-
"architectures": null,
|
257 |
-
"attention_dropout": 0.0,
|
258 |
-
"bad_words_ids": null,
|
259 |
-
"begin_suppress_tokens": null,
|
260 |
-
"bos_token_id": null,
|
261 |
-
"chunk_size_feed_forward": 0,
|
262 |
-
"cross_attention_hidden_size": null,
|
263 |
-
"decoder_start_token_id": null,
|
264 |
-
"diversity_penalty": 0.0,
|
265 |
-
"do_sample": false,
|
266 |
-
"dropout": 0.0,
|
267 |
-
"early_stopping": false,
|
268 |
-
"encoder_no_repeat_ngram_size": 0,
|
269 |
-
"eos_token_id": null,
|
270 |
-
"exponential_decay_length_penalty": null,
|
271 |
-
"finetuning_task": null,
|
272 |
-
"forced_bos_token_id": null,
|
273 |
-
"forced_eos_token_id": null,
|
274 |
-
"hidden_act": "quick_gelu",
|
275 |
-
"hidden_size": 768,
|
276 |
-
"id2label": {
|
277 |
-
"0": "LABEL_0",
|
278 |
-
"1": "LABEL_1"
|
279 |
-
},
|
280 |
-
"image_size": 224,
|
281 |
-
"initializer_factor": 1.0,
|
282 |
-
"initializer_range": 0.02,
|
283 |
-
"intermediate_size": 3072,
|
284 |
-
"is_decoder": false,
|
285 |
-
"is_encoder_decoder": false,
|
286 |
-
"label2id": {
|
287 |
-
"LABEL_0": 0,
|
288 |
-
"LABEL_1": 1
|
289 |
-
},
|
290 |
-
"layer_norm_eps": 1e-05,
|
291 |
-
"length_penalty": 1.0,
|
292 |
-
"max_length": 20,
|
293 |
-
"min_length": 0,
|
294 |
-
"model_type": "clipseg_vision_model",
|
295 |
-
"no_repeat_ngram_size": 0,
|
296 |
-
"num_attention_heads": 12,
|
297 |
-
"num_beam_groups": 1,
|
298 |
-
"num_beams": 1,
|
299 |
-
"num_channels": 3,
|
300 |
-
"num_hidden_layers": 12,
|
301 |
-
"num_return_sequences": 1,
|
302 |
-
"output_attentions": false,
|
303 |
-
"output_hidden_states": false,
|
304 |
-
"output_scores": false,
|
305 |
-
"pad_token_id": null,
|
306 |
-
"patch_size": 16,
|
307 |
-
"prefix": null,
|
308 |
-
"problem_type": null,
|
309 |
-
"pruned_heads": {},
|
310 |
-
"remove_invalid_values": false,
|
311 |
-
"repetition_penalty": 1.0,
|
312 |
-
"return_dict": true,
|
313 |
-
"return_dict_in_generate": false,
|
314 |
-
"sep_token_id": null,
|
315 |
-
"suppress_tokens": null,
|
316 |
-
"task_specific_params": null,
|
317 |
-
"temperature": 1.0,
|
318 |
-
"tf_legacy_loss": false,
|
319 |
-
"tie_encoder_decoder": false,
|
320 |
-
"tie_word_embeddings": true,
|
321 |
-
"tokenizer_class": null,
|
322 |
-
"top_k": 50,
|
323 |
-
"top_p": 1.0,
|
324 |
-
"torch_dtype": null,
|
325 |
-
"torchscript": false,
|
326 |
-
"transformers_version": "4.25.0.dev0",
|
327 |
-
"typical_p": 1.0,
|
328 |
-
"use_bfloat16": false
|
329 |
}
|
330 |
}
|
|
|
94 |
"use_bfloat16": false,
|
95 |
"vocab_size": 49408
|
96 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
"torch_dtype": "float32",
|
98 |
"transformers_version": null,
|
99 |
"use_complex_transposed_convolution": true,
|
|
|
173 |
"transformers_version": "4.25.0.dev0",
|
174 |
"typical_p": 1.0,
|
175 |
"use_bfloat16": false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
176 |
}
|
177 |
}
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dd9308225b8314bb7236f207e6ea72b22db5d90dba03fe3dc7d654f54dcfd08a
|
3 |
+
size 603143713
|