Qingyun commited on
Commit
64873d7
1 Parent(s): 42a8982

upload checkpoint files

Browse files
added_tokens.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</box>": 92552,
3
+ "</img>": 92545,
4
+ "</quad>": 92548,
5
+ "</ref>": 92550,
6
+ "<IMG_CONTEXT>": 92546,
7
+ "<box>": 92551,
8
+ "<img>": 92544,
9
+ "<quad>": 92547,
10
+ "<ref>": 92549
11
+ }
config.json ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "architectures": [
4
+ "InternVLChatModel"
5
+ ],
6
+ "downsample_ratio": 0.5,
7
+ "dynamic_image_size": false,
8
+ "force_image_size": 448,
9
+ "llm_config": {
10
+ "_name_or_path": "./pretrained/internlm2-chat-7b",
11
+ "add_cross_attention": false,
12
+ "architectures": [
13
+ "InternLM2ForCausalLM"
14
+ ],
15
+ "attn_implementation": "flash_attention_2",
16
+ "auto_map": {
17
+ "AutoConfig": "configuration_internlm2.InternLM2Config",
18
+ "AutoModel": "modeling_internlm2.InternLM2ForCausalLM",
19
+ "AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM"
20
+ },
21
+ "bad_words_ids": null,
22
+ "begin_suppress_tokens": null,
23
+ "bias": false,
24
+ "bos_token_id": 1,
25
+ "chunk_size_feed_forward": 0,
26
+ "cross_attention_hidden_size": null,
27
+ "decoder_start_token_id": null,
28
+ "diversity_penalty": 0.0,
29
+ "do_sample": false,
30
+ "early_stopping": false,
31
+ "encoder_no_repeat_ngram_size": 0,
32
+ "eos_token_id": 2,
33
+ "exponential_decay_length_penalty": null,
34
+ "finetuning_task": null,
35
+ "forced_bos_token_id": null,
36
+ "forced_eos_token_id": null,
37
+ "hidden_act": "silu",
38
+ "hidden_size": 4096,
39
+ "id2label": {
40
+ "0": "LABEL_0",
41
+ "1": "LABEL_1"
42
+ },
43
+ "initializer_range": 0.02,
44
+ "intermediate_size": 14336,
45
+ "is_decoder": false,
46
+ "is_encoder_decoder": false,
47
+ "label2id": {
48
+ "LABEL_0": 0,
49
+ "LABEL_1": 1
50
+ },
51
+ "length_penalty": 1.0,
52
+ "max_length": 20,
53
+ "max_position_embeddings": 32768,
54
+ "min_length": 0,
55
+ "model_type": "internlm2",
56
+ "no_repeat_ngram_size": 0,
57
+ "num_attention_heads": 32,
58
+ "num_beam_groups": 1,
59
+ "num_beams": 1,
60
+ "num_hidden_layers": 32,
61
+ "num_key_value_heads": 8,
62
+ "num_return_sequences": 1,
63
+ "output_attentions": false,
64
+ "output_hidden_states": false,
65
+ "output_scores": false,
66
+ "pad_token_id": 2,
67
+ "prefix": null,
68
+ "problem_type": null,
69
+ "pruned_heads": {},
70
+ "remove_invalid_values": false,
71
+ "repetition_penalty": 1.0,
72
+ "return_dict": true,
73
+ "return_dict_in_generate": false,
74
+ "rms_norm_eps": 1e-05,
75
+ "rope_scaling": null,
76
+ "rope_theta": 1000000,
77
+ "sep_token_id": null,
78
+ "suppress_tokens": null,
79
+ "task_specific_params": null,
80
+ "temperature": 1.0,
81
+ "tf_legacy_loss": false,
82
+ "tie_encoder_decoder": false,
83
+ "tie_word_embeddings": false,
84
+ "tokenizer_class": null,
85
+ "top_k": 50,
86
+ "top_p": 1.0,
87
+ "torch_dtype": "bfloat16",
88
+ "torchscript": false,
89
+ "transformers_version": "4.37.2",
90
+ "typical_p": 1.0,
91
+ "use_bfloat16": false,
92
+ "use_cache": true,
93
+ "vocab_size": 92553
94
+ },
95
+ "max_dynamic_patch": 1,
96
+ "min_dynamic_patch": 1,
97
+ "model_type": "internvl_chat",
98
+ "pad2square": false,
99
+ "ps_version": "v2",
100
+ "select_layer": -1,
101
+ "template": "internlm2-chat",
102
+ "torch_dtype": "bfloat16",
103
+ "transformers_version": null,
104
+ "use_backbone_lora": 0,
105
+ "use_llm_lora": 0,
106
+ "use_thumbnail": false,
107
+ "vision_config": {
108
+ "_name_or_path": "",
109
+ "add_cross_attention": false,
110
+ "architectures": [
111
+ "InternVisionModel"
112
+ ],
113
+ "attention_dropout": 0.0,
114
+ "auto_map": {
115
+ "AutoConfig": "configuration_intern_vit.InternVisionConfig",
116
+ "AutoModel": "modeling_intern_vit.InternVisionModel"
117
+ },
118
+ "bad_words_ids": null,
119
+ "begin_suppress_tokens": null,
120
+ "bos_token_id": null,
121
+ "chunk_size_feed_forward": 0,
122
+ "cross_attention_hidden_size": null,
123
+ "decoder_start_token_id": null,
124
+ "diversity_penalty": 0.0,
125
+ "do_sample": false,
126
+ "drop_path_rate": 0.0,
127
+ "dropout": 0.0,
128
+ "early_stopping": false,
129
+ "encoder_no_repeat_ngram_size": 0,
130
+ "eos_token_id": null,
131
+ "exponential_decay_length_penalty": null,
132
+ "finetuning_task": null,
133
+ "forced_bos_token_id": null,
134
+ "forced_eos_token_id": null,
135
+ "hidden_act": "gelu",
136
+ "hidden_size": 1024,
137
+ "id2label": {
138
+ "0": "LABEL_0",
139
+ "1": "LABEL_1"
140
+ },
141
+ "image_size": 448,
142
+ "initializer_factor": 1.0,
143
+ "initializer_range": 0.02,
144
+ "intermediate_size": 4096,
145
+ "is_decoder": false,
146
+ "is_encoder_decoder": false,
147
+ "label2id": {
148
+ "LABEL_0": 0,
149
+ "LABEL_1": 1
150
+ },
151
+ "layer_norm_eps": 1e-06,
152
+ "length_penalty": 1.0,
153
+ "max_length": 20,
154
+ "min_length": 0,
155
+ "model_type": "intern_vit_6b", # actually internvit-300m, this is only model type name
156
+ "no_repeat_ngram_size": 0,
157
+ "norm_type": "layer_norm",
158
+ "num_attention_heads": 16,
159
+ "num_beam_groups": 1,
160
+ "num_beams": 1,
161
+ "num_channels": 3,
162
+ "num_hidden_layers": 24,
163
+ "num_return_sequences": 1,
164
+ "output_attentions": false,
165
+ "output_hidden_states": false,
166
+ "output_scores": false,
167
+ "pad_token_id": null,
168
+ "patch_size": 14,
169
+ "prefix": null,
170
+ "problem_type": null,
171
+ "pruned_heads": {},
172
+ "qk_normalization": false,
173
+ "qkv_bias": true,
174
+ "remove_invalid_values": false,
175
+ "repetition_penalty": 1.0,
176
+ "return_dict": true,
177
+ "return_dict_in_generate": false,
178
+ "sep_token_id": null,
179
+ "suppress_tokens": null,
180
+ "task_specific_params": null,
181
+ "temperature": 1.0,
182
+ "tf_legacy_loss": false,
183
+ "tie_encoder_decoder": false,
184
+ "tie_word_embeddings": true,
185
+ "tokenizer_class": null,
186
+ "top_k": 50,
187
+ "top_p": 1.0,
188
+ "torch_dtype": "bfloat16",
189
+ "torchscript": false,
190
+ "transformers_version": "4.37.2",
191
+ "typical_p": 1.0,
192
+ "use_bfloat16": false,
193
+ "use_flash_attn": true
194
+ }
195
+ }
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.37.2"
4
+ }
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d4032e354b998117c167bb102a7d159c6aab6e49c7d09ba8aa058e5871cf814
3
+ size 4939944336
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5919e9c6666e88e4ad9b2ffcaa90780b00eb3aea5a391e549bd5ebb1de28c69
3
+ size 4915914584
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a95c8e81b8c5b39cc0f9128df5f26a343d101693622c125802f3bdae3a62d2f
3
+ size 4915914592
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f8fb8f75fbba82842ba2ffa77360e53abb9f244a944bd3934c73c9839d118d4
3
+ size 1379026920
model.safetensors.index.json ADDED
@@ -0,0 +1,580 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 16150730752
4
+ },
5
+ "weight_map": {
6
+ "language_model.model.layers.0.attention.wo.weight": "model-00001-of-00004.safetensors",
7
+ "language_model.model.layers.0.attention.wqkv.weight": "model-00001-of-00004.safetensors",
8
+ "language_model.model.layers.0.attention_norm.weight": "model-00001-of-00004.safetensors",
9
+ "language_model.model.layers.0.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
10
+ "language_model.model.layers.0.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
11
+ "language_model.model.layers.0.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
12
+ "language_model.model.layers.0.ffn_norm.weight": "model-00001-of-00004.safetensors",
13
+ "language_model.model.layers.1.attention.wo.weight": "model-00001-of-00004.safetensors",
14
+ "language_model.model.layers.1.attention.wqkv.weight": "model-00001-of-00004.safetensors",
15
+ "language_model.model.layers.1.attention_norm.weight": "model-00001-of-00004.safetensors",
16
+ "language_model.model.layers.1.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
17
+ "language_model.model.layers.1.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
18
+ "language_model.model.layers.1.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
19
+ "language_model.model.layers.1.ffn_norm.weight": "model-00001-of-00004.safetensors",
20
+ "language_model.model.layers.10.attention.wo.weight": "model-00002-of-00004.safetensors",
21
+ "language_model.model.layers.10.attention.wqkv.weight": "model-00002-of-00004.safetensors",
22
+ "language_model.model.layers.10.attention_norm.weight": "model-00002-of-00004.safetensors",
23
+ "language_model.model.layers.10.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
24
+ "language_model.model.layers.10.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
25
+ "language_model.model.layers.10.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
26
+ "language_model.model.layers.10.ffn_norm.weight": "model-00002-of-00004.safetensors",
27
+ "language_model.model.layers.11.attention.wo.weight": "model-00002-of-00004.safetensors",
28
+ "language_model.model.layers.11.attention.wqkv.weight": "model-00002-of-00004.safetensors",
29
+ "language_model.model.layers.11.attention_norm.weight": "model-00002-of-00004.safetensors",
30
+ "language_model.model.layers.11.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
31
+ "language_model.model.layers.11.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
32
+ "language_model.model.layers.11.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
33
+ "language_model.model.layers.11.ffn_norm.weight": "model-00002-of-00004.safetensors",
34
+ "language_model.model.layers.12.attention.wo.weight": "model-00002-of-00004.safetensors",
35
+ "language_model.model.layers.12.attention.wqkv.weight": "model-00002-of-00004.safetensors",
36
+ "language_model.model.layers.12.attention_norm.weight": "model-00002-of-00004.safetensors",
37
+ "language_model.model.layers.12.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
38
+ "language_model.model.layers.12.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
39
+ "language_model.model.layers.12.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
40
+ "language_model.model.layers.12.ffn_norm.weight": "model-00002-of-00004.safetensors",
41
+ "language_model.model.layers.13.attention.wo.weight": "model-00002-of-00004.safetensors",
42
+ "language_model.model.layers.13.attention.wqkv.weight": "model-00002-of-00004.safetensors",
43
+ "language_model.model.layers.13.attention_norm.weight": "model-00002-of-00004.safetensors",
44
+ "language_model.model.layers.13.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
45
+ "language_model.model.layers.13.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
46
+ "language_model.model.layers.13.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
47
+ "language_model.model.layers.13.ffn_norm.weight": "model-00002-of-00004.safetensors",
48
+ "language_model.model.layers.14.attention.wo.weight": "model-00002-of-00004.safetensors",
49
+ "language_model.model.layers.14.attention.wqkv.weight": "model-00002-of-00004.safetensors",
50
+ "language_model.model.layers.14.attention_norm.weight": "model-00002-of-00004.safetensors",
51
+ "language_model.model.layers.14.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
52
+ "language_model.model.layers.14.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
53
+ "language_model.model.layers.14.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
54
+ "language_model.model.layers.14.ffn_norm.weight": "model-00002-of-00004.safetensors",
55
+ "language_model.model.layers.15.attention.wo.weight": "model-00002-of-00004.safetensors",
56
+ "language_model.model.layers.15.attention.wqkv.weight": "model-00002-of-00004.safetensors",
57
+ "language_model.model.layers.15.attention_norm.weight": "model-00002-of-00004.safetensors",
58
+ "language_model.model.layers.15.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
59
+ "language_model.model.layers.15.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
60
+ "language_model.model.layers.15.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
61
+ "language_model.model.layers.15.ffn_norm.weight": "model-00002-of-00004.safetensors",
62
+ "language_model.model.layers.16.attention.wo.weight": "model-00002-of-00004.safetensors",
63
+ "language_model.model.layers.16.attention.wqkv.weight": "model-00002-of-00004.safetensors",
64
+ "language_model.model.layers.16.attention_norm.weight": "model-00002-of-00004.safetensors",
65
+ "language_model.model.layers.16.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
66
+ "language_model.model.layers.16.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
67
+ "language_model.model.layers.16.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
68
+ "language_model.model.layers.16.ffn_norm.weight": "model-00002-of-00004.safetensors",
69
+ "language_model.model.layers.17.attention.wo.weight": "model-00002-of-00004.safetensors",
70
+ "language_model.model.layers.17.attention.wqkv.weight": "model-00002-of-00004.safetensors",
71
+ "language_model.model.layers.17.attention_norm.weight": "model-00002-of-00004.safetensors",
72
+ "language_model.model.layers.17.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
73
+ "language_model.model.layers.17.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
74
+ "language_model.model.layers.17.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
75
+ "language_model.model.layers.17.ffn_norm.weight": "model-00002-of-00004.safetensors",
76
+ "language_model.model.layers.18.attention.wo.weight": "model-00002-of-00004.safetensors",
77
+ "language_model.model.layers.18.attention.wqkv.weight": "model-00002-of-00004.safetensors",
78
+ "language_model.model.layers.18.attention_norm.weight": "model-00002-of-00004.safetensors",
79
+ "language_model.model.layers.18.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
80
+ "language_model.model.layers.18.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
81
+ "language_model.model.layers.18.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
82
+ "language_model.model.layers.18.ffn_norm.weight": "model-00002-of-00004.safetensors",
83
+ "language_model.model.layers.19.attention.wo.weight": "model-00002-of-00004.safetensors",
84
+ "language_model.model.layers.19.attention.wqkv.weight": "model-00002-of-00004.safetensors",
85
+ "language_model.model.layers.19.attention_norm.weight": "model-00003-of-00004.safetensors",
86
+ "language_model.model.layers.19.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
87
+ "language_model.model.layers.19.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
88
+ "language_model.model.layers.19.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
89
+ "language_model.model.layers.19.ffn_norm.weight": "model-00003-of-00004.safetensors",
90
+ "language_model.model.layers.2.attention.wo.weight": "model-00001-of-00004.safetensors",
91
+ "language_model.model.layers.2.attention.wqkv.weight": "model-00001-of-00004.safetensors",
92
+ "language_model.model.layers.2.attention_norm.weight": "model-00001-of-00004.safetensors",
93
+ "language_model.model.layers.2.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
94
+ "language_model.model.layers.2.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
95
+ "language_model.model.layers.2.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
96
+ "language_model.model.layers.2.ffn_norm.weight": "model-00001-of-00004.safetensors",
97
+ "language_model.model.layers.20.attention.wo.weight": "model-00003-of-00004.safetensors",
98
+ "language_model.model.layers.20.attention.wqkv.weight": "model-00003-of-00004.safetensors",
99
+ "language_model.model.layers.20.attention_norm.weight": "model-00003-of-00004.safetensors",
100
+ "language_model.model.layers.20.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
101
+ "language_model.model.layers.20.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
102
+ "language_model.model.layers.20.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
103
+ "language_model.model.layers.20.ffn_norm.weight": "model-00003-of-00004.safetensors",
104
+ "language_model.model.layers.21.attention.wo.weight": "model-00003-of-00004.safetensors",
105
+ "language_model.model.layers.21.attention.wqkv.weight": "model-00003-of-00004.safetensors",
106
+ "language_model.model.layers.21.attention_norm.weight": "model-00003-of-00004.safetensors",
107
+ "language_model.model.layers.21.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
108
+ "language_model.model.layers.21.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
109
+ "language_model.model.layers.21.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
110
+ "language_model.model.layers.21.ffn_norm.weight": "model-00003-of-00004.safetensors",
111
+ "language_model.model.layers.22.attention.wo.weight": "model-00003-of-00004.safetensors",
112
+ "language_model.model.layers.22.attention.wqkv.weight": "model-00003-of-00004.safetensors",
113
+ "language_model.model.layers.22.attention_norm.weight": "model-00003-of-00004.safetensors",
114
+ "language_model.model.layers.22.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
115
+ "language_model.model.layers.22.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
116
+ "language_model.model.layers.22.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
117
+ "language_model.model.layers.22.ffn_norm.weight": "model-00003-of-00004.safetensors",
118
+ "language_model.model.layers.23.attention.wo.weight": "model-00003-of-00004.safetensors",
119
+ "language_model.model.layers.23.attention.wqkv.weight": "model-00003-of-00004.safetensors",
120
+ "language_model.model.layers.23.attention_norm.weight": "model-00003-of-00004.safetensors",
121
+ "language_model.model.layers.23.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
122
+ "language_model.model.layers.23.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
123
+ "language_model.model.layers.23.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
124
+ "language_model.model.layers.23.ffn_norm.weight": "model-00003-of-00004.safetensors",
125
+ "language_model.model.layers.24.attention.wo.weight": "model-00003-of-00004.safetensors",
126
+ "language_model.model.layers.24.attention.wqkv.weight": "model-00003-of-00004.safetensors",
127
+ "language_model.model.layers.24.attention_norm.weight": "model-00003-of-00004.safetensors",
128
+ "language_model.model.layers.24.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
129
+ "language_model.model.layers.24.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
130
+ "language_model.model.layers.24.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
131
+ "language_model.model.layers.24.ffn_norm.weight": "model-00003-of-00004.safetensors",
132
+ "language_model.model.layers.25.attention.wo.weight": "model-00003-of-00004.safetensors",
133
+ "language_model.model.layers.25.attention.wqkv.weight": "model-00003-of-00004.safetensors",
134
+ "language_model.model.layers.25.attention_norm.weight": "model-00003-of-00004.safetensors",
135
+ "language_model.model.layers.25.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
136
+ "language_model.model.layers.25.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
137
+ "language_model.model.layers.25.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
138
+ "language_model.model.layers.25.ffn_norm.weight": "model-00003-of-00004.safetensors",
139
+ "language_model.model.layers.26.attention.wo.weight": "model-00003-of-00004.safetensors",
140
+ "language_model.model.layers.26.attention.wqkv.weight": "model-00003-of-00004.safetensors",
141
+ "language_model.model.layers.26.attention_norm.weight": "model-00003-of-00004.safetensors",
142
+ "language_model.model.layers.26.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
143
+ "language_model.model.layers.26.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
144
+ "language_model.model.layers.26.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
145
+ "language_model.model.layers.26.ffn_norm.weight": "model-00003-of-00004.safetensors",
146
+ "language_model.model.layers.27.attention.wo.weight": "model-00003-of-00004.safetensors",
147
+ "language_model.model.layers.27.attention.wqkv.weight": "model-00003-of-00004.safetensors",
148
+ "language_model.model.layers.27.attention_norm.weight": "model-00003-of-00004.safetensors",
149
+ "language_model.model.layers.27.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
150
+ "language_model.model.layers.27.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
151
+ "language_model.model.layers.27.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
152
+ "language_model.model.layers.27.ffn_norm.weight": "model-00003-of-00004.safetensors",
153
+ "language_model.model.layers.28.attention.wo.weight": "model-00003-of-00004.safetensors",
154
+ "language_model.model.layers.28.attention.wqkv.weight": "model-00003-of-00004.safetensors",
155
+ "language_model.model.layers.28.attention_norm.weight": "model-00003-of-00004.safetensors",
156
+ "language_model.model.layers.28.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
157
+ "language_model.model.layers.28.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
158
+ "language_model.model.layers.28.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
159
+ "language_model.model.layers.28.ffn_norm.weight": "model-00003-of-00004.safetensors",
160
+ "language_model.model.layers.29.attention.wo.weight": "model-00003-of-00004.safetensors",
161
+ "language_model.model.layers.29.attention.wqkv.weight": "model-00003-of-00004.safetensors",
162
+ "language_model.model.layers.29.attention_norm.weight": "model-00003-of-00004.safetensors",
163
+ "language_model.model.layers.29.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
164
+ "language_model.model.layers.29.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
165
+ "language_model.model.layers.29.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
166
+ "language_model.model.layers.29.ffn_norm.weight": "model-00003-of-00004.safetensors",
167
+ "language_model.model.layers.3.attention.wo.weight": "model-00001-of-00004.safetensors",
168
+ "language_model.model.layers.3.attention.wqkv.weight": "model-00001-of-00004.safetensors",
169
+ "language_model.model.layers.3.attention_norm.weight": "model-00001-of-00004.safetensors",
170
+ "language_model.model.layers.3.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
171
+ "language_model.model.layers.3.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
172
+ "language_model.model.layers.3.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
173
+ "language_model.model.layers.3.ffn_norm.weight": "model-00001-of-00004.safetensors",
174
+ "language_model.model.layers.30.attention.wo.weight": "model-00003-of-00004.safetensors",
175
+ "language_model.model.layers.30.attention.wqkv.weight": "model-00003-of-00004.safetensors",
176
+ "language_model.model.layers.30.attention_norm.weight": "model-00004-of-00004.safetensors",
177
+ "language_model.model.layers.30.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
178
+ "language_model.model.layers.30.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
179
+ "language_model.model.layers.30.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
180
+ "language_model.model.layers.30.ffn_norm.weight": "model-00004-of-00004.safetensors",
181
+ "language_model.model.layers.31.attention.wo.weight": "model-00004-of-00004.safetensors",
182
+ "language_model.model.layers.31.attention.wqkv.weight": "model-00004-of-00004.safetensors",
183
+ "language_model.model.layers.31.attention_norm.weight": "model-00004-of-00004.safetensors",
184
+ "language_model.model.layers.31.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
185
+ "language_model.model.layers.31.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
186
+ "language_model.model.layers.31.feed_forward.w3.weight": "model-00004-of-00004.safetensors",
187
+ "language_model.model.layers.31.ffn_norm.weight": "model-00004-of-00004.safetensors",
188
+ "language_model.model.layers.4.attention.wo.weight": "model-00001-of-00004.safetensors",
189
+ "language_model.model.layers.4.attention.wqkv.weight": "model-00001-of-00004.safetensors",
190
+ "language_model.model.layers.4.attention_norm.weight": "model-00001-of-00004.safetensors",
191
+ "language_model.model.layers.4.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
192
+ "language_model.model.layers.4.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
193
+ "language_model.model.layers.4.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
194
+ "language_model.model.layers.4.ffn_norm.weight": "model-00001-of-00004.safetensors",
195
+ "language_model.model.layers.5.attention.wo.weight": "model-00001-of-00004.safetensors",
196
+ "language_model.model.layers.5.attention.wqkv.weight": "model-00001-of-00004.safetensors",
197
+ "language_model.model.layers.5.attention_norm.weight": "model-00001-of-00004.safetensors",
198
+ "language_model.model.layers.5.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
199
+ "language_model.model.layers.5.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
200
+ "language_model.model.layers.5.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
201
+ "language_model.model.layers.5.ffn_norm.weight": "model-00001-of-00004.safetensors",
202
+ "language_model.model.layers.6.attention.wo.weight": "model-00001-of-00004.safetensors",
203
+ "language_model.model.layers.6.attention.wqkv.weight": "model-00001-of-00004.safetensors",
204
+ "language_model.model.layers.6.attention_norm.weight": "model-00001-of-00004.safetensors",
205
+ "language_model.model.layers.6.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
206
+ "language_model.model.layers.6.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
207
+ "language_model.model.layers.6.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
208
+ "language_model.model.layers.6.ffn_norm.weight": "model-00001-of-00004.safetensors",
209
+ "language_model.model.layers.7.attention.wo.weight": "model-00001-of-00004.safetensors",
210
+ "language_model.model.layers.7.attention.wqkv.weight": "model-00001-of-00004.safetensors",
211
+ "language_model.model.layers.7.attention_norm.weight": "model-00001-of-00004.safetensors",
212
+ "language_model.model.layers.7.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
213
+ "language_model.model.layers.7.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
214
+ "language_model.model.layers.7.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
215
+ "language_model.model.layers.7.ffn_norm.weight": "model-00001-of-00004.safetensors",
216
+ "language_model.model.layers.8.attention.wo.weight": "model-00001-of-00004.safetensors",
217
+ "language_model.model.layers.8.attention.wqkv.weight": "model-00001-of-00004.safetensors",
218
+ "language_model.model.layers.8.attention_norm.weight": "model-00002-of-00004.safetensors",
219
+ "language_model.model.layers.8.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
220
+ "language_model.model.layers.8.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
221
+ "language_model.model.layers.8.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
222
+ "language_model.model.layers.8.ffn_norm.weight": "model-00002-of-00004.safetensors",
223
+ "language_model.model.layers.9.attention.wo.weight": "model-00002-of-00004.safetensors",
224
+ "language_model.model.layers.9.attention.wqkv.weight": "model-00002-of-00004.safetensors",
225
+ "language_model.model.layers.9.attention_norm.weight": "model-00002-of-00004.safetensors",
226
+ "language_model.model.layers.9.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
227
+ "language_model.model.layers.9.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
228
+ "language_model.model.layers.9.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
229
+ "language_model.model.layers.9.ffn_norm.weight": "model-00002-of-00004.safetensors",
230
+ "language_model.model.norm.weight": "model-00004-of-00004.safetensors",
231
+ "language_model.model.tok_embeddings.weight": "model-00001-of-00004.safetensors",
232
+ "language_model.output.weight": "model-00004-of-00004.safetensors",
233
+ "mlp1.0.bias": "model-00004-of-00004.safetensors",
234
+ "mlp1.0.weight": "model-00004-of-00004.safetensors",
235
+ "mlp1.1.bias": "model-00004-of-00004.safetensors",
236
+ "mlp1.1.weight": "model-00004-of-00004.safetensors",
237
+ "mlp1.3.bias": "model-00004-of-00004.safetensors",
238
+ "mlp1.3.weight": "model-00004-of-00004.safetensors",
239
+ "vision_model.embeddings.class_embedding": "model-00001-of-00004.safetensors",
240
+ "vision_model.embeddings.patch_embedding.bias": "model-00001-of-00004.safetensors",
241
+ "vision_model.embeddings.patch_embedding.weight": "model-00001-of-00004.safetensors",
242
+ "vision_model.embeddings.position_embedding": "model-00001-of-00004.safetensors",
243
+ "vision_model.encoder.layers.0.attn.proj.bias": "model-00001-of-00004.safetensors",
244
+ "vision_model.encoder.layers.0.attn.proj.weight": "model-00001-of-00004.safetensors",
245
+ "vision_model.encoder.layers.0.attn.qkv.bias": "model-00001-of-00004.safetensors",
246
+ "vision_model.encoder.layers.0.attn.qkv.weight": "model-00001-of-00004.safetensors",
247
+ "vision_model.encoder.layers.0.ls1": "model-00001-of-00004.safetensors",
248
+ "vision_model.encoder.layers.0.ls2": "model-00001-of-00004.safetensors",
249
+ "vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00004.safetensors",
250
+ "vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00004.safetensors",
251
+ "vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00004.safetensors",
252
+ "vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00004.safetensors",
253
+ "vision_model.encoder.layers.0.norm1.bias": "model-00001-of-00004.safetensors",
254
+ "vision_model.encoder.layers.0.norm1.weight": "model-00001-of-00004.safetensors",
255
+ "vision_model.encoder.layers.0.norm2.bias": "model-00001-of-00004.safetensors",
256
+ "vision_model.encoder.layers.0.norm2.weight": "model-00001-of-00004.safetensors",
257
+ "vision_model.encoder.layers.1.attn.proj.bias": "model-00001-of-00004.safetensors",
258
+ "vision_model.encoder.layers.1.attn.proj.weight": "model-00001-of-00004.safetensors",
259
+ "vision_model.encoder.layers.1.attn.qkv.bias": "model-00001-of-00004.safetensors",
260
+ "vision_model.encoder.layers.1.attn.qkv.weight": "model-00001-of-00004.safetensors",
261
+ "vision_model.encoder.layers.1.ls1": "model-00001-of-00004.safetensors",
262
+ "vision_model.encoder.layers.1.ls2": "model-00001-of-00004.safetensors",
263
+ "vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00004.safetensors",
264
+ "vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00004.safetensors",
265
+ "vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00004.safetensors",
266
+ "vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00004.safetensors",
267
+ "vision_model.encoder.layers.1.norm1.bias": "model-00001-of-00004.safetensors",
268
+ "vision_model.encoder.layers.1.norm1.weight": "model-00001-of-00004.safetensors",
269
+ "vision_model.encoder.layers.1.norm2.bias": "model-00001-of-00004.safetensors",
270
+ "vision_model.encoder.layers.1.norm2.weight": "model-00001-of-00004.safetensors",
271
+ "vision_model.encoder.layers.10.attn.proj.bias": "model-00001-of-00004.safetensors",
272
+ "vision_model.encoder.layers.10.attn.proj.weight": "model-00001-of-00004.safetensors",
273
+ "vision_model.encoder.layers.10.attn.qkv.bias": "model-00001-of-00004.safetensors",
274
+ "vision_model.encoder.layers.10.attn.qkv.weight": "model-00001-of-00004.safetensors",
275
+ "vision_model.encoder.layers.10.ls1": "model-00001-of-00004.safetensors",
276
+ "vision_model.encoder.layers.10.ls2": "model-00001-of-00004.safetensors",
277
+ "vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00004.safetensors",
278
+ "vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00004.safetensors",
279
+ "vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00004.safetensors",
280
+ "vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00004.safetensors",
281
+ "vision_model.encoder.layers.10.norm1.bias": "model-00001-of-00004.safetensors",
282
+ "vision_model.encoder.layers.10.norm1.weight": "model-00001-of-00004.safetensors",
283
+ "vision_model.encoder.layers.10.norm2.bias": "model-00001-of-00004.safetensors",
284
+ "vision_model.encoder.layers.10.norm2.weight": "model-00001-of-00004.safetensors",
285
+ "vision_model.encoder.layers.11.attn.proj.bias": "model-00001-of-00004.safetensors",
286
+ "vision_model.encoder.layers.11.attn.proj.weight": "model-00001-of-00004.safetensors",
287
+ "vision_model.encoder.layers.11.attn.qkv.bias": "model-00001-of-00004.safetensors",
288
+ "vision_model.encoder.layers.11.attn.qkv.weight": "model-00001-of-00004.safetensors",
289
+ "vision_model.encoder.layers.11.ls1": "model-00001-of-00004.safetensors",
290
+ "vision_model.encoder.layers.11.ls2": "model-00001-of-00004.safetensors",
291
+ "vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00004.safetensors",
292
+ "vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00004.safetensors",
293
+ "vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00004.safetensors",
294
+ "vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00004.safetensors",
295
+ "vision_model.encoder.layers.11.norm1.bias": "model-00001-of-00004.safetensors",
296
+ "vision_model.encoder.layers.11.norm1.weight": "model-00001-of-00004.safetensors",
297
+ "vision_model.encoder.layers.11.norm2.bias": "model-00001-of-00004.safetensors",
298
+ "vision_model.encoder.layers.11.norm2.weight": "model-00001-of-00004.safetensors",
299
+ "vision_model.encoder.layers.12.attn.proj.bias": "model-00001-of-00004.safetensors",
300
+ "vision_model.encoder.layers.12.attn.proj.weight": "model-00001-of-00004.safetensors",
301
+ "vision_model.encoder.layers.12.attn.qkv.bias": "model-00001-of-00004.safetensors",
302
+ "vision_model.encoder.layers.12.attn.qkv.weight": "model-00001-of-00004.safetensors",
303
+ "vision_model.encoder.layers.12.ls1": "model-00001-of-00004.safetensors",
304
+ "vision_model.encoder.layers.12.ls2": "model-00001-of-00004.safetensors",
305
+ "vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00004.safetensors",
306
+ "vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00004.safetensors",
307
+ "vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00004.safetensors",
308
+ "vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00004.safetensors",
309
+ "vision_model.encoder.layers.12.norm1.bias": "model-00001-of-00004.safetensors",
310
+ "vision_model.encoder.layers.12.norm1.weight": "model-00001-of-00004.safetensors",
311
+ "vision_model.encoder.layers.12.norm2.bias": "model-00001-of-00004.safetensors",
312
+ "vision_model.encoder.layers.12.norm2.weight": "model-00001-of-00004.safetensors",
313
+ "vision_model.encoder.layers.13.attn.proj.bias": "model-00001-of-00004.safetensors",
314
+ "vision_model.encoder.layers.13.attn.proj.weight": "model-00001-of-00004.safetensors",
315
+ "vision_model.encoder.layers.13.attn.qkv.bias": "model-00001-of-00004.safetensors",
316
+ "vision_model.encoder.layers.13.attn.qkv.weight": "model-00001-of-00004.safetensors",
317
+ "vision_model.encoder.layers.13.ls1": "model-00001-of-00004.safetensors",
318
+ "vision_model.encoder.layers.13.ls2": "model-00001-of-00004.safetensors",
319
+ "vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00004.safetensors",
320
+ "vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00004.safetensors",
321
+ "vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00004.safetensors",
322
+ "vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00004.safetensors",
323
+ "vision_model.encoder.layers.13.norm1.bias": "model-00001-of-00004.safetensors",
324
+ "vision_model.encoder.layers.13.norm1.weight": "model-00001-of-00004.safetensors",
325
+ "vision_model.encoder.layers.13.norm2.bias": "model-00001-of-00004.safetensors",
326
+ "vision_model.encoder.layers.13.norm2.weight": "model-00001-of-00004.safetensors",
327
+ "vision_model.encoder.layers.14.attn.proj.bias": "model-00001-of-00004.safetensors",
328
+ "vision_model.encoder.layers.14.attn.proj.weight": "model-00001-of-00004.safetensors",
329
+ "vision_model.encoder.layers.14.attn.qkv.bias": "model-00001-of-00004.safetensors",
330
+ "vision_model.encoder.layers.14.attn.qkv.weight": "model-00001-of-00004.safetensors",
331
+ "vision_model.encoder.layers.14.ls1": "model-00001-of-00004.safetensors",
332
+ "vision_model.encoder.layers.14.ls2": "model-00001-of-00004.safetensors",
333
+ "vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00004.safetensors",
334
+ "vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00004.safetensors",
335
+ "vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00004.safetensors",
336
+ "vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00004.safetensors",
337
+ "vision_model.encoder.layers.14.norm1.bias": "model-00001-of-00004.safetensors",
338
+ "vision_model.encoder.layers.14.norm1.weight": "model-00001-of-00004.safetensors",
339
+ "vision_model.encoder.layers.14.norm2.bias": "model-00001-of-00004.safetensors",
340
+ "vision_model.encoder.layers.14.norm2.weight": "model-00001-of-00004.safetensors",
341
+ "vision_model.encoder.layers.15.attn.proj.bias": "model-00001-of-00004.safetensors",
342
+ "vision_model.encoder.layers.15.attn.proj.weight": "model-00001-of-00004.safetensors",
343
+ "vision_model.encoder.layers.15.attn.qkv.bias": "model-00001-of-00004.safetensors",
344
+ "vision_model.encoder.layers.15.attn.qkv.weight": "model-00001-of-00004.safetensors",
345
+ "vision_model.encoder.layers.15.ls1": "model-00001-of-00004.safetensors",
346
+ "vision_model.encoder.layers.15.ls2": "model-00001-of-00004.safetensors",
347
+ "vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00004.safetensors",
348
+ "vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00004.safetensors",
349
+ "vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00004.safetensors",
350
+ "vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00004.safetensors",
351
+ "vision_model.encoder.layers.15.norm1.bias": "model-00001-of-00004.safetensors",
352
+ "vision_model.encoder.layers.15.norm1.weight": "model-00001-of-00004.safetensors",
353
+ "vision_model.encoder.layers.15.norm2.bias": "model-00001-of-00004.safetensors",
354
+ "vision_model.encoder.layers.15.norm2.weight": "model-00001-of-00004.safetensors",
355
+ "vision_model.encoder.layers.16.attn.proj.bias": "model-00001-of-00004.safetensors",
356
+ "vision_model.encoder.layers.16.attn.proj.weight": "model-00001-of-00004.safetensors",
357
+ "vision_model.encoder.layers.16.attn.qkv.bias": "model-00001-of-00004.safetensors",
358
+ "vision_model.encoder.layers.16.attn.qkv.weight": "model-00001-of-00004.safetensors",
359
+ "vision_model.encoder.layers.16.ls1": "model-00001-of-00004.safetensors",
360
+ "vision_model.encoder.layers.16.ls2": "model-00001-of-00004.safetensors",
361
+ "vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00004.safetensors",
362
+ "vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00004.safetensors",
363
+ "vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00004.safetensors",
364
+ "vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00004.safetensors",
365
+ "vision_model.encoder.layers.16.norm1.bias": "model-00001-of-00004.safetensors",
366
+ "vision_model.encoder.layers.16.norm1.weight": "model-00001-of-00004.safetensors",
367
+ "vision_model.encoder.layers.16.norm2.bias": "model-00001-of-00004.safetensors",
368
+ "vision_model.encoder.layers.16.norm2.weight": "model-00001-of-00004.safetensors",
369
+ "vision_model.encoder.layers.17.attn.proj.bias": "model-00001-of-00004.safetensors",
370
+ "vision_model.encoder.layers.17.attn.proj.weight": "model-00001-of-00004.safetensors",
371
+ "vision_model.encoder.layers.17.attn.qkv.bias": "model-00001-of-00004.safetensors",
372
+ "vision_model.encoder.layers.17.attn.qkv.weight": "model-00001-of-00004.safetensors",
373
+ "vision_model.encoder.layers.17.ls1": "model-00001-of-00004.safetensors",
374
+ "vision_model.encoder.layers.17.ls2": "model-00001-of-00004.safetensors",
375
+ "vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00004.safetensors",
376
+ "vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00004.safetensors",
377
+ "vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00004.safetensors",
378
+ "vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00004.safetensors",
379
+ "vision_model.encoder.layers.17.norm1.bias": "model-00001-of-00004.safetensors",
380
+ "vision_model.encoder.layers.17.norm1.weight": "model-00001-of-00004.safetensors",
381
+ "vision_model.encoder.layers.17.norm2.bias": "model-00001-of-00004.safetensors",
382
+ "vision_model.encoder.layers.17.norm2.weight": "model-00001-of-00004.safetensors",
383
+ "vision_model.encoder.layers.18.attn.proj.bias": "model-00001-of-00004.safetensors",
384
+ "vision_model.encoder.layers.18.attn.proj.weight": "model-00001-of-00004.safetensors",
385
+ "vision_model.encoder.layers.18.attn.qkv.bias": "model-00001-of-00004.safetensors",
386
+ "vision_model.encoder.layers.18.attn.qkv.weight": "model-00001-of-00004.safetensors",
387
+ "vision_model.encoder.layers.18.ls1": "model-00001-of-00004.safetensors",
388
+ "vision_model.encoder.layers.18.ls2": "model-00001-of-00004.safetensors",
389
+ "vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00004.safetensors",
390
+ "vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00004.safetensors",
391
+ "vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00004.safetensors",
392
+ "vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00004.safetensors",
393
+ "vision_model.encoder.layers.18.norm1.bias": "model-00001-of-00004.safetensors",
394
+ "vision_model.encoder.layers.18.norm1.weight": "model-00001-of-00004.safetensors",
395
+ "vision_model.encoder.layers.18.norm2.bias": "model-00001-of-00004.safetensors",
396
+ "vision_model.encoder.layers.18.norm2.weight": "model-00001-of-00004.safetensors",
397
+ "vision_model.encoder.layers.19.attn.proj.bias": "model-00001-of-00004.safetensors",
398
+ "vision_model.encoder.layers.19.attn.proj.weight": "model-00001-of-00004.safetensors",
399
+ "vision_model.encoder.layers.19.attn.qkv.bias": "model-00001-of-00004.safetensors",
400
+ "vision_model.encoder.layers.19.attn.qkv.weight": "model-00001-of-00004.safetensors",
401
+ "vision_model.encoder.layers.19.ls1": "model-00001-of-00004.safetensors",
402
+ "vision_model.encoder.layers.19.ls2": "model-00001-of-00004.safetensors",
403
+ "vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00004.safetensors",
404
+ "vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00004.safetensors",
405
+ "vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00004.safetensors",
406
+ "vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00004.safetensors",
407
+ "vision_model.encoder.layers.19.norm1.bias": "model-00001-of-00004.safetensors",
408
+ "vision_model.encoder.layers.19.norm1.weight": "model-00001-of-00004.safetensors",
409
+ "vision_model.encoder.layers.19.norm2.bias": "model-00001-of-00004.safetensors",
410
+ "vision_model.encoder.layers.19.norm2.weight": "model-00001-of-00004.safetensors",
411
+ "vision_model.encoder.layers.2.attn.proj.bias": "model-00001-of-00004.safetensors",
412
+ "vision_model.encoder.layers.2.attn.proj.weight": "model-00001-of-00004.safetensors",
413
+ "vision_model.encoder.layers.2.attn.qkv.bias": "model-00001-of-00004.safetensors",
414
+ "vision_model.encoder.layers.2.attn.qkv.weight": "model-00001-of-00004.safetensors",
415
+ "vision_model.encoder.layers.2.ls1": "model-00001-of-00004.safetensors",
416
+ "vision_model.encoder.layers.2.ls2": "model-00001-of-00004.safetensors",
417
+ "vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00004.safetensors",
418
+ "vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00004.safetensors",
419
+ "vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00004.safetensors",
420
+ "vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00004.safetensors",
421
+ "vision_model.encoder.layers.2.norm1.bias": "model-00001-of-00004.safetensors",
422
+ "vision_model.encoder.layers.2.norm1.weight": "model-00001-of-00004.safetensors",
423
+ "vision_model.encoder.layers.2.norm2.bias": "model-00001-of-00004.safetensors",
424
+ "vision_model.encoder.layers.2.norm2.weight": "model-00001-of-00004.safetensors",
425
+ "vision_model.encoder.layers.20.attn.proj.bias": "model-00001-of-00004.safetensors",
426
+ "vision_model.encoder.layers.20.attn.proj.weight": "model-00001-of-00004.safetensors",
427
+ "vision_model.encoder.layers.20.attn.qkv.bias": "model-00001-of-00004.safetensors",
428
+ "vision_model.encoder.layers.20.attn.qkv.weight": "model-00001-of-00004.safetensors",
429
+ "vision_model.encoder.layers.20.ls1": "model-00001-of-00004.safetensors",
430
+ "vision_model.encoder.layers.20.ls2": "model-00001-of-00004.safetensors",
431
+ "vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00004.safetensors",
432
+ "vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00004.safetensors",
433
+ "vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00004.safetensors",
434
+ "vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00004.safetensors",
435
+ "vision_model.encoder.layers.20.norm1.bias": "model-00001-of-00004.safetensors",
436
+ "vision_model.encoder.layers.20.norm1.weight": "model-00001-of-00004.safetensors",
437
+ "vision_model.encoder.layers.20.norm2.bias": "model-00001-of-00004.safetensors",
438
+ "vision_model.encoder.layers.20.norm2.weight": "model-00001-of-00004.safetensors",
439
+ "vision_model.encoder.layers.21.attn.proj.bias": "model-00001-of-00004.safetensors",
440
+ "vision_model.encoder.layers.21.attn.proj.weight": "model-00001-of-00004.safetensors",
441
+ "vision_model.encoder.layers.21.attn.qkv.bias": "model-00001-of-00004.safetensors",
442
+ "vision_model.encoder.layers.21.attn.qkv.weight": "model-00001-of-00004.safetensors",
443
+ "vision_model.encoder.layers.21.ls1": "model-00001-of-00004.safetensors",
444
+ "vision_model.encoder.layers.21.ls2": "model-00001-of-00004.safetensors",
445
+ "vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00004.safetensors",
446
+ "vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00004.safetensors",
447
+ "vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00004.safetensors",
448
+ "vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00004.safetensors",
449
+ "vision_model.encoder.layers.21.norm1.bias": "model-00001-of-00004.safetensors",
450
+ "vision_model.encoder.layers.21.norm1.weight": "model-00001-of-00004.safetensors",
451
+ "vision_model.encoder.layers.21.norm2.bias": "model-00001-of-00004.safetensors",
452
+ "vision_model.encoder.layers.21.norm2.weight": "model-00001-of-00004.safetensors",
453
+ "vision_model.encoder.layers.22.attn.proj.bias": "model-00001-of-00004.safetensors",
454
+ "vision_model.encoder.layers.22.attn.proj.weight": "model-00001-of-00004.safetensors",
455
+ "vision_model.encoder.layers.22.attn.qkv.bias": "model-00001-of-00004.safetensors",
456
+ "vision_model.encoder.layers.22.attn.qkv.weight": "model-00001-of-00004.safetensors",
457
+ "vision_model.encoder.layers.22.ls1": "model-00001-of-00004.safetensors",
458
+ "vision_model.encoder.layers.22.ls2": "model-00001-of-00004.safetensors",
459
+ "vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00004.safetensors",
460
+ "vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00004.safetensors",
461
+ "vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00004.safetensors",
462
+ "vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00004.safetensors",
463
+ "vision_model.encoder.layers.22.norm1.bias": "model-00001-of-00004.safetensors",
464
+ "vision_model.encoder.layers.22.norm1.weight": "model-00001-of-00004.safetensors",
465
+ "vision_model.encoder.layers.22.norm2.bias": "model-00001-of-00004.safetensors",
466
+ "vision_model.encoder.layers.22.norm2.weight": "model-00001-of-00004.safetensors",
467
+ "vision_model.encoder.layers.23.attn.proj.bias": "model-00001-of-00004.safetensors",
468
+ "vision_model.encoder.layers.23.attn.proj.weight": "model-00001-of-00004.safetensors",
469
+ "vision_model.encoder.layers.23.attn.qkv.bias": "model-00001-of-00004.safetensors",
470
+ "vision_model.encoder.layers.23.attn.qkv.weight": "model-00001-of-00004.safetensors",
471
+ "vision_model.encoder.layers.23.ls1": "model-00001-of-00004.safetensors",
472
+ "vision_model.encoder.layers.23.ls2": "model-00001-of-00004.safetensors",
473
+ "vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00004.safetensors",
474
+ "vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00004.safetensors",
475
+ "vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00004.safetensors",
476
+ "vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00004.safetensors",
477
+ "vision_model.encoder.layers.23.norm1.bias": "model-00001-of-00004.safetensors",
478
+ "vision_model.encoder.layers.23.norm1.weight": "model-00001-of-00004.safetensors",
479
+ "vision_model.encoder.layers.23.norm2.bias": "model-00001-of-00004.safetensors",
480
+ "vision_model.encoder.layers.23.norm2.weight": "model-00001-of-00004.safetensors",
481
+ "vision_model.encoder.layers.3.attn.proj.bias": "model-00001-of-00004.safetensors",
482
+ "vision_model.encoder.layers.3.attn.proj.weight": "model-00001-of-00004.safetensors",
483
+ "vision_model.encoder.layers.3.attn.qkv.bias": "model-00001-of-00004.safetensors",
484
+ "vision_model.encoder.layers.3.attn.qkv.weight": "model-00001-of-00004.safetensors",
485
+ "vision_model.encoder.layers.3.ls1": "model-00001-of-00004.safetensors",
486
+ "vision_model.encoder.layers.3.ls2": "model-00001-of-00004.safetensors",
487
+ "vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00004.safetensors",
488
+ "vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00004.safetensors",
489
+ "vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00004.safetensors",
490
+ "vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00004.safetensors",
491
+ "vision_model.encoder.layers.3.norm1.bias": "model-00001-of-00004.safetensors",
492
+ "vision_model.encoder.layers.3.norm1.weight": "model-00001-of-00004.safetensors",
493
+ "vision_model.encoder.layers.3.norm2.bias": "model-00001-of-00004.safetensors",
494
+ "vision_model.encoder.layers.3.norm2.weight": "model-00001-of-00004.safetensors",
495
+ "vision_model.encoder.layers.4.attn.proj.bias": "model-00001-of-00004.safetensors",
496
+ "vision_model.encoder.layers.4.attn.proj.weight": "model-00001-of-00004.safetensors",
497
+ "vision_model.encoder.layers.4.attn.qkv.bias": "model-00001-of-00004.safetensors",
498
+ "vision_model.encoder.layers.4.attn.qkv.weight": "model-00001-of-00004.safetensors",
499
+ "vision_model.encoder.layers.4.ls1": "model-00001-of-00004.safetensors",
500
+ "vision_model.encoder.layers.4.ls2": "model-00001-of-00004.safetensors",
501
+ "vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00004.safetensors",
502
+ "vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00004.safetensors",
503
+ "vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00004.safetensors",
504
+ "vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00004.safetensors",
505
+ "vision_model.encoder.layers.4.norm1.bias": "model-00001-of-00004.safetensors",
506
+ "vision_model.encoder.layers.4.norm1.weight": "model-00001-of-00004.safetensors",
507
+ "vision_model.encoder.layers.4.norm2.bias": "model-00001-of-00004.safetensors",
508
+ "vision_model.encoder.layers.4.norm2.weight": "model-00001-of-00004.safetensors",
509
+ "vision_model.encoder.layers.5.attn.proj.bias": "model-00001-of-00004.safetensors",
510
+ "vision_model.encoder.layers.5.attn.proj.weight": "model-00001-of-00004.safetensors",
511
+ "vision_model.encoder.layers.5.attn.qkv.bias": "model-00001-of-00004.safetensors",
512
+ "vision_model.encoder.layers.5.attn.qkv.weight": "model-00001-of-00004.safetensors",
513
+ "vision_model.encoder.layers.5.ls1": "model-00001-of-00004.safetensors",
514
+ "vision_model.encoder.layers.5.ls2": "model-00001-of-00004.safetensors",
515
+ "vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00004.safetensors",
516
+ "vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00004.safetensors",
517
+ "vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00004.safetensors",
518
+ "vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00004.safetensors",
519
+ "vision_model.encoder.layers.5.norm1.bias": "model-00001-of-00004.safetensors",
520
+ "vision_model.encoder.layers.5.norm1.weight": "model-00001-of-00004.safetensors",
521
+ "vision_model.encoder.layers.5.norm2.bias": "model-00001-of-00004.safetensors",
522
+ "vision_model.encoder.layers.5.norm2.weight": "model-00001-of-00004.safetensors",
523
+ "vision_model.encoder.layers.6.attn.proj.bias": "model-00001-of-00004.safetensors",
524
+ "vision_model.encoder.layers.6.attn.proj.weight": "model-00001-of-00004.safetensors",
525
+ "vision_model.encoder.layers.6.attn.qkv.bias": "model-00001-of-00004.safetensors",
526
+ "vision_model.encoder.layers.6.attn.qkv.weight": "model-00001-of-00004.safetensors",
527
+ "vision_model.encoder.layers.6.ls1": "model-00001-of-00004.safetensors",
528
+ "vision_model.encoder.layers.6.ls2": "model-00001-of-00004.safetensors",
529
+ "vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00004.safetensors",
530
+ "vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00004.safetensors",
531
+ "vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00004.safetensors",
532
+ "vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00004.safetensors",
533
+ "vision_model.encoder.layers.6.norm1.bias": "model-00001-of-00004.safetensors",
534
+ "vision_model.encoder.layers.6.norm1.weight": "model-00001-of-00004.safetensors",
535
+ "vision_model.encoder.layers.6.norm2.bias": "model-00001-of-00004.safetensors",
536
+ "vision_model.encoder.layers.6.norm2.weight": "model-00001-of-00004.safetensors",
537
+ "vision_model.encoder.layers.7.attn.proj.bias": "model-00001-of-00004.safetensors",
538
+ "vision_model.encoder.layers.7.attn.proj.weight": "model-00001-of-00004.safetensors",
539
+ "vision_model.encoder.layers.7.attn.qkv.bias": "model-00001-of-00004.safetensors",
540
+ "vision_model.encoder.layers.7.attn.qkv.weight": "model-00001-of-00004.safetensors",
541
+ "vision_model.encoder.layers.7.ls1": "model-00001-of-00004.safetensors",
542
+ "vision_model.encoder.layers.7.ls2": "model-00001-of-00004.safetensors",
543
+ "vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00004.safetensors",
544
+ "vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00004.safetensors",
545
+ "vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00004.safetensors",
546
+ "vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00004.safetensors",
547
+ "vision_model.encoder.layers.7.norm1.bias": "model-00001-of-00004.safetensors",
548
+ "vision_model.encoder.layers.7.norm1.weight": "model-00001-of-00004.safetensors",
549
+ "vision_model.encoder.layers.7.norm2.bias": "model-00001-of-00004.safetensors",
550
+ "vision_model.encoder.layers.7.norm2.weight": "model-00001-of-00004.safetensors",
551
+ "vision_model.encoder.layers.8.attn.proj.bias": "model-00001-of-00004.safetensors",
552
+ "vision_model.encoder.layers.8.attn.proj.weight": "model-00001-of-00004.safetensors",
553
+ "vision_model.encoder.layers.8.attn.qkv.bias": "model-00001-of-00004.safetensors",
554
+ "vision_model.encoder.layers.8.attn.qkv.weight": "model-00001-of-00004.safetensors",
555
+ "vision_model.encoder.layers.8.ls1": "model-00001-of-00004.safetensors",
556
+ "vision_model.encoder.layers.8.ls2": "model-00001-of-00004.safetensors",
557
+ "vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00004.safetensors",
558
+ "vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00004.safetensors",
559
+ "vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00004.safetensors",
560
+ "vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00004.safetensors",
561
+ "vision_model.encoder.layers.8.norm1.bias": "model-00001-of-00004.safetensors",
562
+ "vision_model.encoder.layers.8.norm1.weight": "model-00001-of-00004.safetensors",
563
+ "vision_model.encoder.layers.8.norm2.bias": "model-00001-of-00004.safetensors",
564
+ "vision_model.encoder.layers.8.norm2.weight": "model-00001-of-00004.safetensors",
565
+ "vision_model.encoder.layers.9.attn.proj.bias": "model-00001-of-00004.safetensors",
566
+ "vision_model.encoder.layers.9.attn.proj.weight": "model-00001-of-00004.safetensors",
567
+ "vision_model.encoder.layers.9.attn.qkv.bias": "model-00001-of-00004.safetensors",
568
+ "vision_model.encoder.layers.9.attn.qkv.weight": "model-00001-of-00004.safetensors",
569
+ "vision_model.encoder.layers.9.ls1": "model-00001-of-00004.safetensors",
570
+ "vision_model.encoder.layers.9.ls2": "model-00001-of-00004.safetensors",
571
+ "vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00004.safetensors",
572
+ "vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00004.safetensors",
573
+ "vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00004.safetensors",
574
+ "vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00004.safetensors",
575
+ "vision_model.encoder.layers.9.norm1.bias": "model-00001-of-00004.safetensors",
576
+ "vision_model.encoder.layers.9.norm1.weight": "model-00001-of-00004.safetensors",
577
+ "vision_model.encoder.layers.9.norm2.bias": "model-00001-of-00004.safetensors",
578
+ "vision_model.encoder.layers.9.norm2.weight": "model-00001-of-00004.safetensors"
579
+ }
580
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|action_start|>",
6
+ "<|action_end|>",
7
+ "<|interpreter|>",
8
+ "<|plugin|>",
9
+ {
10
+ "content": "<img>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ {
17
+ "content": "</img>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ {
24
+ "content": "<IMG_CONTEXT>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ {
31
+ "content": "<quad>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ {
38
+ "content": "</quad>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ {
45
+ "content": "<ref>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ },
51
+ {
52
+ "content": "</ref>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false
57
+ },
58
+ {
59
+ "content": "<box>",
60
+ "lstrip": false,
61
+ "normalized": false,
62
+ "rstrip": false,
63
+ "single_word": false
64
+ },
65
+ {
66
+ "content": "</box>",
67
+ "lstrip": false,
68
+ "normalized": false,
69
+ "rstrip": false,
70
+ "single_word": false
71
+ }
72
+ ],
73
+ "bos_token": {
74
+ "content": "<s>",
75
+ "lstrip": false,
76
+ "normalized": false,
77
+ "rstrip": false,
78
+ "single_word": false
79
+ },
80
+ "eos_token": {
81
+ "content": "</s>",
82
+ "lstrip": false,
83
+ "normalized": false,
84
+ "rstrip": false,
85
+ "single_word": false
86
+ },
87
+ "pad_token": {
88
+ "content": "</s>",
89
+ "lstrip": false,
90
+ "normalized": false,
91
+ "rstrip": false,
92
+ "single_word": false
93
+ },
94
+ "unk_token": {
95
+ "content": "<unk>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false
100
+ }
101
+ }
tokenization_internlm2.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on transformers/src/transformers/models/llama/tokenization_llama.py
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ """Tokenization classes for InternLM."""
19
+ import os
20
+ from shutil import copyfile
21
+ from typing import Any, Dict, List, Optional, Tuple
22
+
23
+ import sentencepiece as spm
24
+ from transformers.tokenization_utils import PreTrainedTokenizer
25
+ from transformers.utils import logging
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
30
+
31
+ PRETRAINED_VOCAB_FILES_MAP = {}
32
+
33
+
34
+ # Modified from transformers.model.llama.tokenization_llama.LlamaTokenizer
35
+ class InternLM2Tokenizer(PreTrainedTokenizer):
36
+ """
37
+ Construct a InternLM2 tokenizer. Based on byte-level Byte-Pair-Encoding.
38
+
39
+ Args:
40
+ vocab_file (`str`):
41
+ Path to the vocabulary file.
42
+ """
43
+
44
+ vocab_files_names = VOCAB_FILES_NAMES
45
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
46
+ model_input_names = ["input_ids", "attention_mask"]
47
+ _auto_class = "AutoTokenizer"
48
+
49
+ def __init__(
50
+ self,
51
+ vocab_file,
52
+ unk_token="<unk>",
53
+ bos_token="<s>",
54
+ eos_token="</s>",
55
+ pad_token="</s>",
56
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
57
+ add_bos_token=True,
58
+ add_eos_token=False,
59
+ decode_with_prefix_space=False,
60
+ clean_up_tokenization_spaces=False,
61
+ **kwargs,
62
+ ):
63
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
64
+ self.vocab_file = vocab_file
65
+ self.add_bos_token = add_bos_token
66
+ self.add_eos_token = add_eos_token
67
+ self.decode_with_prefix_space = decode_with_prefix_space
68
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
69
+ self.sp_model.Load(vocab_file)
70
+ self._no_prefix_space_tokens = None
71
+ super().__init__(
72
+ bos_token=bos_token,
73
+ eos_token=eos_token,
74
+ unk_token=unk_token,
75
+ pad_token=pad_token,
76
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
77
+ **kwargs,
78
+ )
79
+
80
+ @property
81
+ def no_prefix_space_tokens(self):
82
+ if self._no_prefix_space_tokens is None:
83
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
84
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("▁")}
85
+ return self._no_prefix_space_tokens
86
+
87
+ @property
88
+ def vocab_size(self):
89
+ """Returns vocab size"""
90
+ return self.sp_model.get_piece_size()
91
+
92
+ @property
93
+ def bos_token_id(self) -> Optional[int]:
94
+ return self.sp_model.bos_id()
95
+
96
+ @property
97
+ def eos_token_id(self) -> Optional[int]:
98
+ return self.sp_model.eos_id()
99
+
100
+ def get_vocab(self):
101
+ """Returns vocab as a dict"""
102
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
103
+ vocab.update(self.added_tokens_encoder)
104
+ return vocab
105
+
106
+ def _tokenize(self, text):
107
+ """Returns a tokenized string."""
108
+ return self.sp_model.encode(text, out_type=str)
109
+
110
+ def _convert_token_to_id(self, token):
111
+ """Converts a token (str) in an id using the vocab."""
112
+ return self.sp_model.piece_to_id(token)
113
+
114
+ def _convert_id_to_token(self, index):
115
+ """Converts an index (integer) in a token (str) using the vocab."""
116
+ token = self.sp_model.IdToPiece(index)
117
+ return token
118
+
119
+ def _maybe_add_prefix_space(self, tokens, decoded):
120
+ if tokens and tokens[0] not in self.no_prefix_space_tokens:
121
+ return " " + decoded
122
+ else:
123
+ return decoded
124
+
125
+ def convert_tokens_to_string(self, tokens):
126
+ """Converts a sequence of tokens (string) in a single string."""
127
+ current_sub_tokens = []
128
+ out_string = ""
129
+ prev_is_special = False
130
+ for token in tokens:
131
+ # make sure that special tokens are not decoded using sentencepiece model
132
+ if token in self.all_special_tokens:
133
+ if not prev_is_special:
134
+ out_string += " "
135
+ out_string += self.sp_model.decode(current_sub_tokens) + token
136
+ prev_is_special = True
137
+ current_sub_tokens = []
138
+ else:
139
+ current_sub_tokens.append(token)
140
+ prev_is_special = False
141
+ out_string += self.sp_model.decode(current_sub_tokens)
142
+ out_string = self.clean_up_tokenization(out_string)
143
+ out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
144
+ return out_string[1:]
145
+
146
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
147
+ """
148
+ Save the vocabulary and special tokens file to a directory.
149
+
150
+ Args:
151
+ save_directory (`str`):
152
+ The directory in which to save the vocabulary.
153
+
154
+ Returns:
155
+ `Tuple(str)`: Paths to the files saved.
156
+ """
157
+ if not os.path.isdir(save_directory):
158
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
159
+ return
160
+ out_vocab_file = os.path.join(
161
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
162
+ )
163
+
164
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
165
+ copyfile(self.vocab_file, out_vocab_file)
166
+ elif not os.path.isfile(self.vocab_file):
167
+ with open(out_vocab_file, "wb") as fi:
168
+ content_spiece_model = self.sp_model.serialized_model_proto()
169
+ fi.write(content_spiece_model)
170
+
171
+ return (out_vocab_file,)
172
+
173
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
174
+ if self.add_bos_token:
175
+ bos_token_ids = [self.bos_token_id]
176
+ else:
177
+ bos_token_ids = []
178
+
179
+ output = bos_token_ids + token_ids_0
180
+
181
+ if token_ids_1 is not None:
182
+ output = output + token_ids_1
183
+
184
+ if self.add_eos_token:
185
+ output = output + [self.eos_token_id]
186
+
187
+ return output
188
+
189
+ def get_special_tokens_mask(
190
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
191
+ ) -> List[int]:
192
+ """
193
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
194
+ special tokens using the tokenizer `prepare_for_model` method.
195
+
196
+ Args:
197
+ token_ids_0 (`List[int]`):
198
+ List of IDs.
199
+ token_ids_1 (`List[int]`, *optional*):
200
+ Optional second list of IDs for sequence pairs.
201
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
202
+ Whether or not the token list is already formatted with special tokens for the model.
203
+
204
+ Returns:
205
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
206
+ """
207
+ if already_has_special_tokens:
208
+ return super().get_special_tokens_mask(
209
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
210
+ )
211
+
212
+ if token_ids_1 is None:
213
+ return [1] + ([0] * len(token_ids_0)) + [1]
214
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
215
+
216
+ def create_token_type_ids_from_sequences(
217
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
218
+ ) -> List[int]:
219
+ """
220
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
221
+ use of token type ids, therefore a list of zeros is returned.
222
+
223
+ Args:
224
+ token_ids_0 (`List[int]`):
225
+ List of IDs.
226
+ token_ids_1 (`List[int]`, *optional*):
227
+ Optional second list of IDs for sequence pairs.
228
+
229
+ Returns:
230
+ `List[int]`: List of zeros.
231
+ """
232
+ eos = [self.eos_token_id]
233
+
234
+ if token_ids_1 is None:
235
+ return len(token_ids_0 + eos) * [0]
236
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f868398fc4e05ee1e8aeba95ddf18ddcc45b8bce55d5093bead5bbf80429b48b
3
+ size 1477754
tokenizer_config.json ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "92538": {
28
+ "content": "<|plugin|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "92539": {
36
+ "content": "<|interpreter|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "92540": {
44
+ "content": "<|action_end|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "92541": {
52
+ "content": "<|action_start|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "92542": {
60
+ "content": "<|im_end|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "92543": {
68
+ "content": "<|im_start|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "92544": {
76
+ "content": "<img>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "92545": {
84
+ "content": "</img>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "92546": {
92
+ "content": "<IMG_CONTEXT>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "92547": {
100
+ "content": "<quad>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "92548": {
108
+ "content": "</quad>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "92549": {
116
+ "content": "<ref>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "92550": {
124
+ "content": "</ref>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "92551": {
132
+ "content": "<box>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "92552": {
140
+ "content": "</box>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ }
147
+ },
148
+ "additional_special_tokens": [
149
+ "<|im_start|>",
150
+ "<|im_end|>",
151
+ "<|action_start|>",
152
+ "<|action_end|>",
153
+ "<|interpreter|>",
154
+ "<|plugin|>",
155
+ "<img>",
156
+ "</img>",
157
+ "<IMG_CONTEXT>",
158
+ "<quad>",
159
+ "</quad>",
160
+ "<ref>",
161
+ "</ref>",
162
+ "<box>",
163
+ "</box>"
164
+ ],
165
+ "auto_map": {
166
+ "AutoTokenizer": [
167
+ "tokenization_internlm2.InternLM2Tokenizer",
168
+ null
169
+ ]
170
+ },
171
+ "bos_token": "<s>",
172
+ "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
173
+ "clean_up_tokenization_spaces": false,
174
+ "eos_token": "</s>",
175
+ "model_max_length": 2048,
176
+ "pad_token": "</s>",
177
+ "tokenizer_class": "InternLM2Tokenizer",
178
+ "unk_token": "<unk>"
179
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:025df7bf693e81e1b24077cefc5d5196d333b2e24a84ac192157045d87c41b93
3
+ size 6200
zero_to_fp32.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage <= 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dicts.append(torch.load(f, map_location=device))
147
+
148
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
149
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
150
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
151
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
152
+
153
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
154
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
155
+ # use the max of the partition_count to get the dp world_size.
156
+
157
+ if type(world_size) is list:
158
+ world_size = max(world_size)
159
+
160
+ if world_size != total_files:
161
+ raise ValueError(
162
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
163
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
164
+ )
165
+
166
+ # the groups are named differently in each stage
167
+ if zero_stage <= 2:
168
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
169
+ elif zero_stage == 3:
170
+ fp32_groups_key = FP32_FLAT_GROUPS
171
+ else:
172
+ raise ValueError(f"unknown zero stage {zero_stage}")
173
+
174
+ if zero_stage <= 2:
175
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
176
+ elif zero_stage == 3:
177
+ # if there is more than one param group, there will be multiple flattened tensors - one
178
+ # flattened tensor per group - for simplicity merge them into a single tensor
179
+ #
180
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
181
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
182
+
183
+ fp32_flat_groups = [
184
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
185
+ ]
186
+
187
+ return zero_stage, world_size, fp32_flat_groups
188
+
189
+
190
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
191
+ """
192
+ Returns fp32 state_dict reconstructed from ds checkpoint
193
+
194
+ Args:
195
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
196
+
197
+ """
198
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
199
+
200
+ optim_files = get_optim_files(ds_checkpoint_dir)
201
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
202
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
203
+
204
+ model_files = get_model_state_files(ds_checkpoint_dir)
205
+
206
+ zero_model_states = parse_model_states(model_files)
207
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
208
+
209
+ if zero_stage <= 2:
210
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
211
+ elif zero_stage == 3:
212
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
248
+ param_shapes = zero_model_states[0].param_shapes
249
+
250
+ # Reconstruction protocol:
251
+ #
252
+ # XXX: document this
253
+
254
+ if debug:
255
+ for i in range(world_size):
256
+ for j in range(len(fp32_flat_groups[0])):
257
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
258
+
259
+ # XXX: memory usage doubles here (zero2)
260
+ num_param_groups = len(fp32_flat_groups[0])
261
+ merged_single_partition_of_fp32_groups = []
262
+ for i in range(num_param_groups):
263
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
264
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
265
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
266
+ avail_numel = sum(
267
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
268
+
269
+ if debug:
270
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
271
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
272
+ # not asserting if there is a mismatch due to possible padding
273
+ print(f"Have {avail_numel} numels to process.")
274
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
275
+
276
+ # params
277
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
278
+ # out-of-core computing solution
279
+ total_numel = 0
280
+ total_params = 0
281
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
282
+ offset = 0
283
+ avail_numel = full_single_fp32_vector.numel()
284
+ for name, shape in shapes.items():
285
+
286
+ unpartitioned_numel = shape.numel()
287
+ total_numel += unpartitioned_numel
288
+ total_params += 1
289
+
290
+ if debug:
291
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
292
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
293
+ offset += unpartitioned_numel
294
+
295
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
296
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
297
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
298
+ # live optimizer object, so we are checking that the numbers are within the right range
299
+ align_to = 2 * world_size
300
+
301
+ def zero2_align(x):
302
+ return align_to * math.ceil(x / align_to)
303
+
304
+ if debug:
305
+ print(f"original offset={offset}, avail_numel={avail_numel}")
306
+
307
+ offset = zero2_align(offset)
308
+ avail_numel = zero2_align(avail_numel)
309
+
310
+ if debug:
311
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
312
+
313
+ # Sanity check
314
+ if offset != avail_numel:
315
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
316
+
317
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
318
+
319
+
320
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
321
+ state_dict = OrderedDict()
322
+
323
+ # buffers
324
+ buffers = zero_model_states[0].buffers
325
+ state_dict.update(buffers)
326
+ if debug:
327
+ print(f"added {len(buffers)} buffers")
328
+
329
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
330
+
331
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
332
+
333
+ # recover shared parameters
334
+ for pair in zero_model_states[0].shared_params:
335
+ if pair[1] in state_dict:
336
+ state_dict[pair[0]] = state_dict[pair[1]]
337
+
338
+ return state_dict
339
+
340
+
341
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
342
+ remainder = unpartitioned_numel % world_size
343
+ padding_numel = (world_size - remainder) if remainder else 0
344
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
345
+ return partitioned_numel, padding_numel
346
+
347
+
348
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
349
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
350
+ return
351
+
352
+ if debug:
353
+ for i in range(world_size):
354
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
355
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
356
+
357
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
358
+ wanted_params = len(frozen_param_shapes)
359
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
360
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
361
+ print(f'Frozen params: Have {avail_numel} numels to process.')
362
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
363
+
364
+ total_params = 0
365
+ total_numel = 0
366
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
367
+ total_params += 1
368
+ unpartitioned_numel = shape.numel()
369
+ total_numel += unpartitioned_numel
370
+
371
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
372
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
373
+
374
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
375
+
376
+ if debug:
377
+ print(
378
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
379
+ )
380
+
381
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
382
+
383
+
384
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
385
+ param_shapes = zero_model_states[0].param_shapes
386
+ avail_numel = fp32_flat_groups[0].numel() * world_size
387
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
388
+ # param, re-consolidating each param, while dealing with padding if any
389
+
390
+ # merge list of dicts, preserving order
391
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
392
+
393
+ if debug:
394
+ for i in range(world_size):
395
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
396
+
397
+ wanted_params = len(param_shapes)
398
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
399
+ # not asserting if there is a mismatch due to possible padding
400
+ avail_numel = fp32_flat_groups[0].numel() * world_size
401
+ print(f"Trainable params: Have {avail_numel} numels to process.")
402
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
403
+
404
+ # params
405
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
406
+ # out-of-core computing solution
407
+ offset = 0
408
+ total_numel = 0
409
+ total_params = 0
410
+ for name, shape in param_shapes.items():
411
+
412
+ unpartitioned_numel = shape.numel()
413
+ total_numel += unpartitioned_numel
414
+ total_params += 1
415
+
416
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
417
+
418
+ if debug:
419
+ print(
420
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
421
+ )
422
+
423
+ # XXX: memory usage doubles here
424
+ state_dict[name] = torch.cat(
425
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
426
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
427
+ offset += partitioned_numel
428
+
429
+ offset *= world_size
430
+
431
+ # Sanity check
432
+ if offset != avail_numel:
433
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
434
+
435
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
436
+
437
+
438
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
439
+ state_dict = OrderedDict()
440
+
441
+ # buffers
442
+ buffers = zero_model_states[0].buffers
443
+ state_dict.update(buffers)
444
+ if debug:
445
+ print(f"added {len(buffers)} buffers")
446
+
447
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
448
+
449
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
450
+
451
+ # recover shared parameters
452
+ for pair in zero_model_states[0].shared_params:
453
+ if pair[1] in state_dict:
454
+ state_dict[pair[0]] = state_dict[pair[1]]
455
+
456
+ return state_dict
457
+
458
+
459
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
460
+ """
461
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
462
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
463
+ via a model hub.
464
+
465
+ Args:
466
+ - ``checkpoint_dir``: path to the desired checkpoint folder
467
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
468
+
469
+ Returns:
470
+ - pytorch ``state_dict``
471
+
472
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
473
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
474
+ the checkpoint.
475
+
476
+ A typical usage might be ::
477
+
478
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
479
+ # do the training and checkpoint saving
480
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
481
+ model = model.cpu() # move to cpu
482
+ model.load_state_dict(state_dict)
483
+ # submit to model hub or save the model to share with others
484
+
485
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
486
+ application. i.e. you will need to re-initialize the deepspeed engine, since
487
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
488
+
489
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
490
+
491
+ """
492
+ if tag is None:
493
+ latest_path = os.path.join(checkpoint_dir, 'latest')
494
+ if os.path.isfile(latest_path):
495
+ with open(latest_path, 'r') as fd:
496
+ tag = fd.read().strip()
497
+ else:
498
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
499
+
500
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
501
+
502
+ if not os.path.isdir(ds_checkpoint_dir):
503
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
504
+
505
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
506
+
507
+
508
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
509
+ """
510
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
511
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
512
+
513
+ Args:
514
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
515
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
516
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
517
+ """
518
+
519
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
520
+ print(f"Saving fp32 state dict to {output_file}")
521
+ torch.save(state_dict, output_file)
522
+
523
+
524
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
525
+ """
526
+ 1. Put the provided model to cpu
527
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
528
+ 3. Load it into the provided model
529
+
530
+ Args:
531
+ - ``model``: the model object to update
532
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
533
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
534
+
535
+ Returns:
536
+ - ``model`: modified model
537
+
538
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
539
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
540
+ conveniently placed for you in the checkpoint folder.
541
+
542
+ A typical usage might be ::
543
+
544
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
545
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
546
+ # submit to model hub or save the model to share with others
547
+
548
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
549
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
550
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
551
+
552
+ """
553
+ logger.info(f"Extracting fp32 weights")
554
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
555
+
556
+ logger.info(f"Overwriting model with fp32 weights")
557
+ model = model.cpu()
558
+ model.load_state_dict(state_dict, strict=False)
559
+
560
+ return model
561
+
562
+
563
+ if __name__ == "__main__":
564
+
565
+ parser = argparse.ArgumentParser()
566
+ parser.add_argument("checkpoint_dir",
567
+ type=str,
568
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
569
+ parser.add_argument(
570
+ "output_file",
571
+ type=str,
572
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
573
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
574
+ args = parser.parse_args()
575
+
576
+ debug = args.debug
577
+
578
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)