cristianglezm commited on
Commit
8cc1bed
1 Parent(s): 0f7f53b

upload model

Browse files
README.md CHANGED
@@ -1,3 +1,93 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ tags:
5
+ - image-to-text
6
+ - image-captioning
7
+ license: apache-2.0
8
+ base_model: nlpconnect/vit-gpt2-image-captioning
9
+ widget:
10
+ - src: >-
11
+ https://huggingface.co/datasets/cristianglezm/FlowerEvolver-Dataset/resolve/main/flowers/001.png
12
+ example_title: Flower 1
13
+ - src: >-
14
+ https://huggingface.co/datasets/cristianglezm/FlowerEvolver-Dataset/resolve/main/flowers/002.png
15
+ example_title: Flower 2
16
+ - src: >-
17
+ https://huggingface.co/datasets/cristianglezm/FlowerEvolver-Dataset/resolve/main/flowers/003.png
18
+ example_title: Flower 3
19
+ datasets:
20
+ - cristianglezm/FlowerEvolver-Dataset
21
+ metrics:
22
+ - rouge
23
+ pipeline_tag: image-to-text
24
+ library_name: transformers.js
25
+ ---
26
+
27
+ # ViT-GPT2-FlowerCaptioner-ONNX
28
+
29
+ This model is a fine-tuned version of [nlpconnect/vit-gpt2-image-captioning](https://huggingface.co/nlpconnect/vit-gpt2-image-captioning) on the [FlowerEvolver-dataset](https://huggingface.co/datasets/cristianglezm/FlowerEvolver-Dataset) dataset.
30
+ It achieves the following results on the evaluation set:
31
+ - Loss: 0.3075
32
+ - Rouge1: 66.3702
33
+ - Rouge2: 45.5642
34
+ - Rougel: 61.401
35
+ - Rougelsum: 64.0587
36
+ - Gen Len: 49.97
37
+
38
+ ## sample running code
39
+
40
+ with python
41
+
42
+ ```python
43
+ from transformers import pipeline
44
+
45
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
46
+ FlowerCaptioner = pipeline("image-to-text", model="cristianglezm/ViT-GPT2-FlowerCaptioner", device=device)
47
+ FlowerCaptioner(["flower1.png"])
48
+ # A flower with 12 petals in a smooth gradient of green and blue.
49
+ # The center is green with black accents. The stem is long and green.
50
+ ```
51
+
52
+ with javascript
53
+
54
+ ```javascript
55
+ import { pipeline } from '@xenova/transformers';
56
+
57
+ // Allocate a pipeline for image-to-text
58
+ let pipe = await pipeline('image-to-text', 'cristianglezm/ViT-GPT2-FlowerCaptioner-ONNX');
59
+
60
+ let out = await pipe('flower image url');
61
+ // A flower with 12 petals in a smooth gradient of green and blue.
62
+ // The center is green with black accents. The stem is long and green.
63
+ ```
64
+
65
+ ## Training procedure
66
+
67
+ ### Training hyperparameters
68
+
69
+ The following hyperparameters were used during training:
70
+ - learning_rate: 5e-05
71
+ - train_batch_size: 4
72
+ - eval_batch_size: 4
73
+ - seed: 42
74
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
75
+ - lr_scheduler_type: linear
76
+ - lr_scheduler_warmup_steps: 500
77
+ - num_epochs: 3
78
+
79
+ ### Training results
80
+
81
+ | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
82
+ |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:|
83
+ | 0.6755 | 1.0 | 100 | 0.5339 | 60.9402 | 39.3331 | 54.6889 | 59.45 | 36.75 |
84
+ | 0.3666 | 2.0 | 200 | 0.3331 | 65.5149 | 43.0245 | 59.3121 | 62.7329 | 52.82 |
85
+ | 0.2983 | 3.0 | 300 | 0.3075 | 66.3702 | 45.5642 | 61.401 | 64.0587 | 49.97 |
86
+
87
+
88
+ ### Framework versions
89
+
90
+ - Transformers 4.33.2
91
+ - Pytorch 2.4.1+cu124
92
+ - Datasets 2.20.0
93
+ - Tokenizers 0.13.3
config.json ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./models/FlowerCaptioner",
3
+ "architectures": [
4
+ "VisionEncoderDecoderModel"
5
+ ],
6
+ "bos_token_id": 50256,
7
+ "decoder": {
8
+ "_name_or_path": "",
9
+ "activation_function": "gelu_new",
10
+ "add_cross_attention": true,
11
+ "architectures": [
12
+ "GPT2LMHeadModel"
13
+ ],
14
+ "attn_pdrop": 0.1,
15
+ "bad_words_ids": null,
16
+ "begin_suppress_tokens": null,
17
+ "bos_token_id": 50256,
18
+ "chunk_size_feed_forward": 0,
19
+ "cross_attention_hidden_size": null,
20
+ "decoder_start_token_id": 50256,
21
+ "diversity_penalty": 0.0,
22
+ "do_sample": false,
23
+ "early_stopping": false,
24
+ "embd_pdrop": 0.1,
25
+ "encoder_no_repeat_ngram_size": 0,
26
+ "eos_token_id": 50256,
27
+ "exponential_decay_length_penalty": null,
28
+ "finetuning_task": null,
29
+ "forced_bos_token_id": null,
30
+ "forced_eos_token_id": null,
31
+ "id2label": {
32
+ "0": "LABEL_0",
33
+ "1": "LABEL_1"
34
+ },
35
+ "initializer_range": 0.02,
36
+ "is_decoder": true,
37
+ "is_encoder_decoder": false,
38
+ "label2id": {
39
+ "LABEL_0": 0,
40
+ "LABEL_1": 1
41
+ },
42
+ "layer_norm_epsilon": 1e-05,
43
+ "length_penalty": 1.0,
44
+ "max_length": 20,
45
+ "min_length": 0,
46
+ "model_type": "gpt2",
47
+ "n_ctx": 1024,
48
+ "n_embd": 768,
49
+ "n_head": 12,
50
+ "n_inner": null,
51
+ "n_layer": 12,
52
+ "n_positions": 1024,
53
+ "no_repeat_ngram_size": 0,
54
+ "num_beam_groups": 1,
55
+ "num_beams": 1,
56
+ "num_return_sequences": 1,
57
+ "output_attentions": false,
58
+ "output_hidden_states": false,
59
+ "output_scores": false,
60
+ "pad_token_id": 50256,
61
+ "prefix": null,
62
+ "problem_type": null,
63
+ "pruned_heads": {},
64
+ "remove_invalid_values": false,
65
+ "reorder_and_upcast_attn": false,
66
+ "repetition_penalty": 1.0,
67
+ "resid_pdrop": 0.1,
68
+ "return_dict": true,
69
+ "return_dict_in_generate": false,
70
+ "scale_attn_by_inverse_layer_idx": false,
71
+ "scale_attn_weights": true,
72
+ "sep_token_id": null,
73
+ "summary_activation": null,
74
+ "summary_first_dropout": 0.1,
75
+ "summary_proj_to_labels": true,
76
+ "summary_type": "cls_index",
77
+ "summary_use_proj": true,
78
+ "suppress_tokens": null,
79
+ "task_specific_params": {
80
+ "text-generation": {
81
+ "do_sample": true,
82
+ "max_length": 50
83
+ }
84
+ },
85
+ "temperature": 1.0,
86
+ "tf_legacy_loss": false,
87
+ "tie_encoder_decoder": false,
88
+ "tie_word_embeddings": true,
89
+ "tokenizer_class": null,
90
+ "top_k": 50,
91
+ "top_p": 1.0,
92
+ "torch_dtype": null,
93
+ "torchscript": false,
94
+ "typical_p": 1.0,
95
+ "use_bfloat16": false,
96
+ "use_cache": true,
97
+ "vocab_size": 50257
98
+ },
99
+ "decoder_start_token_id": 50256,
100
+ "encoder": {
101
+ "_name_or_path": "",
102
+ "add_cross_attention": false,
103
+ "architectures": [
104
+ "ViTModel"
105
+ ],
106
+ "attention_probs_dropout_prob": 0.0,
107
+ "bad_words_ids": null,
108
+ "begin_suppress_tokens": null,
109
+ "bos_token_id": null,
110
+ "chunk_size_feed_forward": 0,
111
+ "cross_attention_hidden_size": null,
112
+ "decoder_start_token_id": null,
113
+ "diversity_penalty": 0.0,
114
+ "do_sample": false,
115
+ "early_stopping": false,
116
+ "encoder_no_repeat_ngram_size": 0,
117
+ "encoder_stride": 16,
118
+ "eos_token_id": null,
119
+ "exponential_decay_length_penalty": null,
120
+ "finetuning_task": null,
121
+ "forced_bos_token_id": null,
122
+ "forced_eos_token_id": null,
123
+ "hidden_act": "gelu",
124
+ "hidden_dropout_prob": 0.0,
125
+ "hidden_size": 768,
126
+ "id2label": {
127
+ "0": "LABEL_0",
128
+ "1": "LABEL_1"
129
+ },
130
+ "image_size": 224,
131
+ "initializer_range": 0.02,
132
+ "intermediate_size": 3072,
133
+ "is_decoder": false,
134
+ "is_encoder_decoder": false,
135
+ "label2id": {
136
+ "LABEL_0": 0,
137
+ "LABEL_1": 1
138
+ },
139
+ "layer_norm_eps": 1e-12,
140
+ "length_penalty": 1.0,
141
+ "max_length": 20,
142
+ "min_length": 0,
143
+ "model_type": "vit",
144
+ "no_repeat_ngram_size": 0,
145
+ "num_attention_heads": 12,
146
+ "num_beam_groups": 1,
147
+ "num_beams": 1,
148
+ "num_channels": 3,
149
+ "num_hidden_layers": 12,
150
+ "num_return_sequences": 1,
151
+ "output_attentions": false,
152
+ "output_hidden_states": false,
153
+ "output_scores": false,
154
+ "pad_token_id": null,
155
+ "patch_size": 16,
156
+ "prefix": null,
157
+ "problem_type": null,
158
+ "pruned_heads": {},
159
+ "qkv_bias": true,
160
+ "remove_invalid_values": false,
161
+ "repetition_penalty": 1.0,
162
+ "return_dict": true,
163
+ "return_dict_in_generate": false,
164
+ "sep_token_id": null,
165
+ "suppress_tokens": null,
166
+ "task_specific_params": null,
167
+ "temperature": 1.0,
168
+ "tf_legacy_loss": false,
169
+ "tie_encoder_decoder": false,
170
+ "tie_word_embeddings": true,
171
+ "tokenizer_class": null,
172
+ "top_k": 50,
173
+ "top_p": 1.0,
174
+ "torch_dtype": null,
175
+ "torchscript": false,
176
+ "typical_p": 1.0,
177
+ "use_bfloat16": false
178
+ },
179
+ "eos_token_id": 50256,
180
+ "is_encoder_decoder": true,
181
+ "model_type": "vision-encoder-decoder",
182
+ "pad_token_id": 50256,
183
+ "tie_word_embeddings": false,
184
+ "transformers_version": "4.33.2"
185
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "decoder_start_token_id": 50256,
3
+ "eos_token_id": 50256,
4
+ "max_new_tokens": 128,
5
+ "pad_token_id": 50256,
6
+ "transformers_version": "4.33.2"
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
onnx/decoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca2595f2af7fb6d879eb3e80f4e0ee2958c5c2dce039ec9bfaddd677a7001b43
3
+ size 613153019
onnx/decoder_model_merged.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdc6df54706d46d199dbe8bee353757c905d9dd2f4355b9f7771a33ca7a24f8d
3
+ size 615070521
onnx/decoder_model_merged_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2585e1276bdb3da15ed1041c53593268721a174b0b39c0a28863544db6245ad2
3
+ size 158063351
onnx/decoder_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d67a2e2c9bd64f893cd4afe1126550ec9175309bfb522663205e6ed8bec23ba1
3
+ size 155710792
onnx/decoder_with_past_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21dd43d250fc5300859594e7a111e393adc5822798186aa1c18a125351258197
3
+ size 613149344
onnx/decoder_with_past_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c23e86396e4ce9b690ae55f5922f68cd58cffcd8ad1068deaefbcff4e3cea1c
3
+ size 155701341
onnx/encoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:091df2b5b67fde5a63f2323d5346063e0e1b312caa3e8aeeb62c382c4344d77c
3
+ size 343410667
onnx/encoder_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fabd01cd11696da81627401da9ef9bc6dea70d4995be902034b0ddfa2acdacbd
3
+ size 87000252
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "feature_extractor_type": "ViTFeatureExtractor",
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "ViTFeatureExtractor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
quantize_config.json ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "per_channel": false,
3
+ "reduce_range": false,
4
+ "per_model_config": {
5
+ "decoder_model": {
6
+ "op_types": [
7
+ "Squeeze",
8
+ "MatMul",
9
+ "Pow",
10
+ "Tanh",
11
+ "Gather",
12
+ "Sub",
13
+ "ConstantOfShape",
14
+ "Mul",
15
+ "Where",
16
+ "Concat",
17
+ "Transpose",
18
+ "Div",
19
+ "Add",
20
+ "Slice",
21
+ "Unsqueeze",
22
+ "Softmax",
23
+ "Gemm",
24
+ "Constant",
25
+ "Range",
26
+ "LayerNormalization",
27
+ "Reshape",
28
+ "Shape",
29
+ "Cast",
30
+ "Split"
31
+ ],
32
+ "weight_type": "QInt8"
33
+ },
34
+ "decoder_model_merged": {
35
+ "op_types": [
36
+ "Squeeze",
37
+ "MatMul",
38
+ "Pow",
39
+ "Tanh",
40
+ "Gather",
41
+ "If",
42
+ "Sub",
43
+ "ConstantOfShape",
44
+ "Mul",
45
+ "Where",
46
+ "Concat",
47
+ "Transpose",
48
+ "Div",
49
+ "Add",
50
+ "Slice",
51
+ "Unsqueeze",
52
+ "Softmax",
53
+ "Gemm",
54
+ "Constant",
55
+ "Range",
56
+ "LayerNormalization",
57
+ "Reshape",
58
+ "Shape",
59
+ "Cast",
60
+ "Split"
61
+ ],
62
+ "weight_type": "QInt8"
63
+ },
64
+ "decoder_with_past_model": {
65
+ "op_types": [
66
+ "Squeeze",
67
+ "MatMul",
68
+ "Pow",
69
+ "Tanh",
70
+ "Gather",
71
+ "Sub",
72
+ "ConstantOfShape",
73
+ "Mul",
74
+ "Where",
75
+ "Concat",
76
+ "Transpose",
77
+ "Div",
78
+ "Add",
79
+ "Slice",
80
+ "Unsqueeze",
81
+ "Softmax",
82
+ "Gemm",
83
+ "Constant",
84
+ "Range",
85
+ "LayerNormalization",
86
+ "Reshape",
87
+ "Shape",
88
+ "Cast",
89
+ "Split"
90
+ ],
91
+ "weight_type": "QInt8"
92
+ },
93
+ "encoder_model": {
94
+ "op_types": [
95
+ "MatMul",
96
+ "Gather",
97
+ "ConstantOfShape",
98
+ "Where",
99
+ "Mul",
100
+ "Expand",
101
+ "Concat",
102
+ "Transpose",
103
+ "Conv",
104
+ "Div",
105
+ "Add",
106
+ "Equal",
107
+ "Slice",
108
+ "Unsqueeze",
109
+ "Softmax",
110
+ "Constant",
111
+ "LayerNormalization",
112
+ "Reshape",
113
+ "Shape",
114
+ "Erf"
115
+ ],
116
+ "weight_type": "QUInt8"
117
+ }
118
+ }
119
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "clean_up_tokenization_spaces": true,
5
+ "eos_token": "<|endoftext|>",
6
+ "max_length": 32,
7
+ "model_max_length": 1024,
8
+ "pad_to_multiple_of": null,
9
+ "pad_token": "<|endoftext|>",
10
+ "pad_token_type_id": 0,
11
+ "padding_side": "right",
12
+ "stride": 0,
13
+ "tokenizer_class": "GPT2Tokenizer",
14
+ "truncation_side": "right",
15
+ "truncation_strategy": "longest_first",
16
+ "unk_token": "<|endoftext|>"
17
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff