Bo1015 commited on
Commit
35de208
1 Parent(s): b809a8e

Upload 19 files

Browse files
config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "biomap-research/xtrimopglm-10b-mlm",
3
+ "add_bias_linear": true,
4
+ "add_qkv_bias": true,
5
+ "apply_query_key_layer_scaling": true,
6
+ "apply_residual_connection_post_layernorm": true,
7
+ "architectures": [
8
+ "xTrimoPGLMModel"
9
+ ],
10
+ "attention_dropout": 0.0,
11
+ "attention_softmax_in_fp32": true,
12
+ "auto_map": {
13
+ "AutoConfig": "configuration_xtrimopglm.xTrimoPGLMConfig",
14
+ "AutoModel": "modeling_xtrimopglm.xTrimoPGLMForMaskedLM",
15
+ "AutoModelForCausalLM": "modeling_xtrimopglm.xTrimoPGLMForCasualLM",
16
+ "AutoModelForMaskedLM": "modeling_xtrimopglm.xTrimoPGLMForMaskedLM",
17
+ "AutoModelForSequenceClassification": "modeling_xtrimopglm.xTrimoPGLMForSequenceClassification",
18
+ "AutoModelForTokenClassification": "modeling_xtrimopglm.xTrimoPGLMForTokenClassification"
19
+ },
20
+ "bias_dropout_fusion": true,
21
+ "deepnorm": true,
22
+ "experts_per_token": 0,
23
+ "ffn_hidden_size": 11606,
24
+ "fp32_residual_connection": false,
25
+ "glu_activation": "geglu",
26
+ "head_num": 1,
27
+ "hidden_dropout": 0.0,
28
+ "hidden_size": 4352,
29
+ "initializer_range": 0.02,
30
+ "is_causal": false,
31
+ "kv_channels": 136,
32
+ "layernorm_epsilon": 1e-05,
33
+ "model_type": "xTrimoPGLM",
34
+ "moe": false,
35
+ "multi_query_attention": false,
36
+ "multi_query_group_num": 1,
37
+ "num_attention_heads": 32,
38
+ "num_experts": 0,
39
+ "num_layers": 47,
40
+ "padded_vocab_size": 128,
41
+ "post_layer_norm": true,
42
+ "quantization_bit": 0,
43
+ "rmsnorm": false,
44
+ "rotary_embedding_2d": false,
45
+ "seq_length": 1024,
46
+ "torch_dtype": "float32",
47
+ "transformers_version": "4.41.2",
48
+ "untie_head": false,
49
+ "use_cache": true,
50
+ "use_pytorch_sdpa": true,
51
+ "vocab_size": 128
52
+ }
configuration_xtrimopglm.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class xTrimoPGLMConfig(PretrainedConfig):
5
+ model_type = "xTrimoPGLM"
6
+ def __init__(
7
+ self,
8
+ num_layers=47,
9
+ padded_vocab_size=128,
10
+ hidden_size=4352,
11
+ ffn_hidden_size=11606,
12
+ kv_channels=136,
13
+ num_attention_heads=32,
14
+ seq_length=1024,
15
+ hidden_dropout=0.0,
16
+ attention_dropout=0.0,
17
+ layernorm_epsilon=1e-5,
18
+ initializer_range=0.02,
19
+ glu_activation='geglu',
20
+ rmsnorm=False,
21
+ deepnorm=True,
22
+ apply_residual_connection_post_layernorm=True,
23
+ post_layer_norm=True,
24
+ add_bias_linear=True,
25
+ add_qkv_bias=True,
26
+ bias_dropout_fusion=True,
27
+ multi_query_attention=False,
28
+ multi_query_group_num=1,
29
+ apply_query_key_layer_scaling=True,
30
+ attention_softmax_in_fp32=True,
31
+ fp32_residual_connection=False,
32
+ quantization_bit=0,
33
+ rotary_embedding_2d=False,
34
+ use_pytorch_sdpa=True,
35
+ is_causal=False,
36
+ use_cache=True,
37
+ moe=False,
38
+ num_experts=0,
39
+ experts_per_token=0,
40
+ untie_head=False,
41
+ head_num=1,
42
+ **kwargs
43
+ ):
44
+
45
+ if not deepnorm and apply_residual_connection_post_layernorm:
46
+ print(f"Warning: deepnorm is False and apply_residual_connection_post_layernorm is True")
47
+
48
+ if deepnorm:
49
+ apply_residual_connection_post_layernorm = True
50
+
51
+ self.num_layers = num_layers
52
+ self.vocab_size = padded_vocab_size
53
+ self.padded_vocab_size = padded_vocab_size
54
+ self.hidden_size = hidden_size
55
+ self.ffn_hidden_size = ffn_hidden_size
56
+ self.kv_channels = kv_channels
57
+ self.num_attention_heads = num_attention_heads
58
+ self.seq_length = seq_length
59
+ self.hidden_dropout = hidden_dropout
60
+ self.attention_dropout = attention_dropout
61
+ self.layernorm_epsilon = layernorm_epsilon
62
+ self.glu_activation = glu_activation
63
+ self.initializer_range = initializer_range
64
+ self.rmsnorm = rmsnorm
65
+ self.deepnorm = deepnorm
66
+ self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
67
+ self.post_layer_norm = post_layer_norm
68
+ self.add_bias_linear = add_bias_linear
69
+ self.add_qkv_bias = add_qkv_bias
70
+ self.bias_dropout_fusion = bias_dropout_fusion
71
+ self.multi_query_attention = multi_query_attention
72
+ self.multi_query_group_num = multi_query_group_num
73
+ self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
74
+ self.attention_softmax_in_fp32 = attention_softmax_in_fp32
75
+ self.fp32_residual_connection = fp32_residual_connection
76
+ self.quantization_bit = quantization_bit
77
+ self.rotary_embedding_2d = rotary_embedding_2d
78
+ self.is_causal = is_causal
79
+ self.use_cache=use_cache
80
+ self.use_pytorch_sdpa = use_pytorch_sdpa
81
+ self.moe = moe
82
+ self.num_experts = num_experts
83
+ self.experts_per_token = experts_per_token
84
+ self.untie_head = untie_head
85
+ self.head_num=head_num
86
+ super().__init__(**kwargs)
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.41.2"
4
+ }
model-00001-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0deb4c65175681668e79b3369f7728a21f3e6be2ee617ab574a395ed1b6a8c2
3
+ size 4852416024
model-00002-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:850926a99f21ce4daee7d26ca4b59913f734c761d2b00d0a9555b2337cb235d3
3
+ size 4951176104
model-00003-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae5f762486078526a71bc7548e5119d2301d6fa00d6f713fb541dec9e026070d
3
+ size 4976429904
model-00004-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3aeb4ae86dab96c57261064fd93785642fdbe0918f7416ea938deb17ed913d3
3
+ size 4622821344
model-00005-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10453c5037351f01d1a99eeff56c006650ee323921cf8c631a60e5434538057d
3
+ size 4951176184
model-00006-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:885db3d0cf0037e4a98569848ee7049506dcb74fdfe4d0045bde84a71d058c5d
3
+ size 4976429904
model-00007-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:577442882871b9408c32a7d270664664adb5dcb4b5a3979c288fcca1c45c9c74
3
+ size 4622821344
model-00008-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1931a02012097828c211cb65f1a2c85b67a566ada892cb0714748addf62c6eb4
3
+ size 4951176184
model-00009-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c547b39209ea0c633ff0ea5234b8821af57e0c88ea7a6feb4fea473c95892e1
3
+ size 3841925128
model.safetensors.index.json ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 42746291264
4
+ },
5
+ "weight_map": {
6
+ "transformer.embedding.word_embeddings.weight": "model-00001-of-00009.safetensors",
7
+ "transformer.encoder.final_layernorm.bias": "model-00009-of-00009.safetensors",
8
+ "transformer.encoder.final_layernorm.weight": "model-00009-of-00009.safetensors",
9
+ "transformer.encoder.layers.0.input_layernorm.bias": "model-00001-of-00009.safetensors",
10
+ "transformer.encoder.layers.0.input_layernorm.weight": "model-00001-of-00009.safetensors",
11
+ "transformer.encoder.layers.0.mlp.dense_4h_to_h.bias": "model-00001-of-00009.safetensors",
12
+ "transformer.encoder.layers.0.mlp.dense_4h_to_h.weight": "model-00001-of-00009.safetensors",
13
+ "transformer.encoder.layers.0.mlp.dense_h_to_4h.bias": "model-00001-of-00009.safetensors",
14
+ "transformer.encoder.layers.0.mlp.dense_h_to_4h.weight": "model-00001-of-00009.safetensors",
15
+ "transformer.encoder.layers.0.post_attention_layernorm.bias": "model-00001-of-00009.safetensors",
16
+ "transformer.encoder.layers.0.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
17
+ "transformer.encoder.layers.0.self_attention.dense.bias": "model-00001-of-00009.safetensors",
18
+ "transformer.encoder.layers.0.self_attention.dense.weight": "model-00001-of-00009.safetensors",
19
+ "transformer.encoder.layers.0.self_attention.query_key_value.bias": "model-00001-of-00009.safetensors",
20
+ "transformer.encoder.layers.0.self_attention.query_key_value.weight": "model-00001-of-00009.safetensors",
21
+ "transformer.encoder.layers.0.self_attention.rotary_emb.inv_freq": "model-00001-of-00009.safetensors",
22
+ "transformer.encoder.layers.1.input_layernorm.bias": "model-00001-of-00009.safetensors",
23
+ "transformer.encoder.layers.1.input_layernorm.weight": "model-00001-of-00009.safetensors",
24
+ "transformer.encoder.layers.1.mlp.dense_4h_to_h.bias": "model-00001-of-00009.safetensors",
25
+ "transformer.encoder.layers.1.mlp.dense_4h_to_h.weight": "model-00001-of-00009.safetensors",
26
+ "transformer.encoder.layers.1.mlp.dense_h_to_4h.bias": "model-00001-of-00009.safetensors",
27
+ "transformer.encoder.layers.1.mlp.dense_h_to_4h.weight": "model-00001-of-00009.safetensors",
28
+ "transformer.encoder.layers.1.post_attention_layernorm.bias": "model-00001-of-00009.safetensors",
29
+ "transformer.encoder.layers.1.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
30
+ "transformer.encoder.layers.1.self_attention.dense.bias": "model-00001-of-00009.safetensors",
31
+ "transformer.encoder.layers.1.self_attention.dense.weight": "model-00001-of-00009.safetensors",
32
+ "transformer.encoder.layers.1.self_attention.query_key_value.bias": "model-00001-of-00009.safetensors",
33
+ "transformer.encoder.layers.1.self_attention.query_key_value.weight": "model-00001-of-00009.safetensors",
34
+ "transformer.encoder.layers.1.self_attention.rotary_emb.inv_freq": "model-00001-of-00009.safetensors",
35
+ "transformer.encoder.layers.10.input_layernorm.bias": "model-00002-of-00009.safetensors",
36
+ "transformer.encoder.layers.10.input_layernorm.weight": "model-00002-of-00009.safetensors",
37
+ "transformer.encoder.layers.10.mlp.dense_4h_to_h.bias": "model-00003-of-00009.safetensors",
38
+ "transformer.encoder.layers.10.mlp.dense_4h_to_h.weight": "model-00003-of-00009.safetensors",
39
+ "transformer.encoder.layers.10.mlp.dense_h_to_4h.bias": "model-00002-of-00009.safetensors",
40
+ "transformer.encoder.layers.10.mlp.dense_h_to_4h.weight": "model-00002-of-00009.safetensors",
41
+ "transformer.encoder.layers.10.post_attention_layernorm.bias": "model-00002-of-00009.safetensors",
42
+ "transformer.encoder.layers.10.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
43
+ "transformer.encoder.layers.10.self_attention.dense.bias": "model-00002-of-00009.safetensors",
44
+ "transformer.encoder.layers.10.self_attention.dense.weight": "model-00002-of-00009.safetensors",
45
+ "transformer.encoder.layers.10.self_attention.query_key_value.bias": "model-00002-of-00009.safetensors",
46
+ "transformer.encoder.layers.10.self_attention.query_key_value.weight": "model-00002-of-00009.safetensors",
47
+ "transformer.encoder.layers.10.self_attention.rotary_emb.inv_freq": "model-00002-of-00009.safetensors",
48
+ "transformer.encoder.layers.11.input_layernorm.bias": "model-00003-of-00009.safetensors",
49
+ "transformer.encoder.layers.11.input_layernorm.weight": "model-00003-of-00009.safetensors",
50
+ "transformer.encoder.layers.11.mlp.dense_4h_to_h.bias": "model-00003-of-00009.safetensors",
51
+ "transformer.encoder.layers.11.mlp.dense_4h_to_h.weight": "model-00003-of-00009.safetensors",
52
+ "transformer.encoder.layers.11.mlp.dense_h_to_4h.bias": "model-00003-of-00009.safetensors",
53
+ "transformer.encoder.layers.11.mlp.dense_h_to_4h.weight": "model-00003-of-00009.safetensors",
54
+ "transformer.encoder.layers.11.post_attention_layernorm.bias": "model-00003-of-00009.safetensors",
55
+ "transformer.encoder.layers.11.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
56
+ "transformer.encoder.layers.11.self_attention.dense.bias": "model-00003-of-00009.safetensors",
57
+ "transformer.encoder.layers.11.self_attention.dense.weight": "model-00003-of-00009.safetensors",
58
+ "transformer.encoder.layers.11.self_attention.query_key_value.bias": "model-00003-of-00009.safetensors",
59
+ "transformer.encoder.layers.11.self_attention.query_key_value.weight": "model-00003-of-00009.safetensors",
60
+ "transformer.encoder.layers.11.self_attention.rotary_emb.inv_freq": "model-00003-of-00009.safetensors",
61
+ "transformer.encoder.layers.12.input_layernorm.bias": "model-00003-of-00009.safetensors",
62
+ "transformer.encoder.layers.12.input_layernorm.weight": "model-00003-of-00009.safetensors",
63
+ "transformer.encoder.layers.12.mlp.dense_4h_to_h.bias": "model-00003-of-00009.safetensors",
64
+ "transformer.encoder.layers.12.mlp.dense_4h_to_h.weight": "model-00003-of-00009.safetensors",
65
+ "transformer.encoder.layers.12.mlp.dense_h_to_4h.bias": "model-00003-of-00009.safetensors",
66
+ "transformer.encoder.layers.12.mlp.dense_h_to_4h.weight": "model-00003-of-00009.safetensors",
67
+ "transformer.encoder.layers.12.post_attention_layernorm.bias": "model-00003-of-00009.safetensors",
68
+ "transformer.encoder.layers.12.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
69
+ "transformer.encoder.layers.12.self_attention.dense.bias": "model-00003-of-00009.safetensors",
70
+ "transformer.encoder.layers.12.self_attention.dense.weight": "model-00003-of-00009.safetensors",
71
+ "transformer.encoder.layers.12.self_attention.query_key_value.bias": "model-00003-of-00009.safetensors",
72
+ "transformer.encoder.layers.12.self_attention.query_key_value.weight": "model-00003-of-00009.safetensors",
73
+ "transformer.encoder.layers.12.self_attention.rotary_emb.inv_freq": "model-00003-of-00009.safetensors",
74
+ "transformer.encoder.layers.13.input_layernorm.bias": "model-00003-of-00009.safetensors",
75
+ "transformer.encoder.layers.13.input_layernorm.weight": "model-00003-of-00009.safetensors",
76
+ "transformer.encoder.layers.13.mlp.dense_4h_to_h.bias": "model-00003-of-00009.safetensors",
77
+ "transformer.encoder.layers.13.mlp.dense_4h_to_h.weight": "model-00003-of-00009.safetensors",
78
+ "transformer.encoder.layers.13.mlp.dense_h_to_4h.bias": "model-00003-of-00009.safetensors",
79
+ "transformer.encoder.layers.13.mlp.dense_h_to_4h.weight": "model-00003-of-00009.safetensors",
80
+ "transformer.encoder.layers.13.post_attention_layernorm.bias": "model-00003-of-00009.safetensors",
81
+ "transformer.encoder.layers.13.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
82
+ "transformer.encoder.layers.13.self_attention.dense.bias": "model-00003-of-00009.safetensors",
83
+ "transformer.encoder.layers.13.self_attention.dense.weight": "model-00003-of-00009.safetensors",
84
+ "transformer.encoder.layers.13.self_attention.query_key_value.bias": "model-00003-of-00009.safetensors",
85
+ "transformer.encoder.layers.13.self_attention.query_key_value.weight": "model-00003-of-00009.safetensors",
86
+ "transformer.encoder.layers.13.self_attention.rotary_emb.inv_freq": "model-00003-of-00009.safetensors",
87
+ "transformer.encoder.layers.14.input_layernorm.bias": "model-00003-of-00009.safetensors",
88
+ "transformer.encoder.layers.14.input_layernorm.weight": "model-00003-of-00009.safetensors",
89
+ "transformer.encoder.layers.14.mlp.dense_4h_to_h.bias": "model-00003-of-00009.safetensors",
90
+ "transformer.encoder.layers.14.mlp.dense_4h_to_h.weight": "model-00003-of-00009.safetensors",
91
+ "transformer.encoder.layers.14.mlp.dense_h_to_4h.bias": "model-00003-of-00009.safetensors",
92
+ "transformer.encoder.layers.14.mlp.dense_h_to_4h.weight": "model-00003-of-00009.safetensors",
93
+ "transformer.encoder.layers.14.post_attention_layernorm.bias": "model-00003-of-00009.safetensors",
94
+ "transformer.encoder.layers.14.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
95
+ "transformer.encoder.layers.14.self_attention.dense.bias": "model-00003-of-00009.safetensors",
96
+ "transformer.encoder.layers.14.self_attention.dense.weight": "model-00003-of-00009.safetensors",
97
+ "transformer.encoder.layers.14.self_attention.query_key_value.bias": "model-00003-of-00009.safetensors",
98
+ "transformer.encoder.layers.14.self_attention.query_key_value.weight": "model-00003-of-00009.safetensors",
99
+ "transformer.encoder.layers.14.self_attention.rotary_emb.inv_freq": "model-00003-of-00009.safetensors",
100
+ "transformer.encoder.layers.15.input_layernorm.bias": "model-00003-of-00009.safetensors",
101
+ "transformer.encoder.layers.15.input_layernorm.weight": "model-00003-of-00009.safetensors",
102
+ "transformer.encoder.layers.15.mlp.dense_4h_to_h.bias": "model-00003-of-00009.safetensors",
103
+ "transformer.encoder.layers.15.mlp.dense_4h_to_h.weight": "model-00003-of-00009.safetensors",
104
+ "transformer.encoder.layers.15.mlp.dense_h_to_4h.bias": "model-00003-of-00009.safetensors",
105
+ "transformer.encoder.layers.15.mlp.dense_h_to_4h.weight": "model-00003-of-00009.safetensors",
106
+ "transformer.encoder.layers.15.post_attention_layernorm.bias": "model-00003-of-00009.safetensors",
107
+ "transformer.encoder.layers.15.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
108
+ "transformer.encoder.layers.15.self_attention.dense.bias": "model-00003-of-00009.safetensors",
109
+ "transformer.encoder.layers.15.self_attention.dense.weight": "model-00003-of-00009.safetensors",
110
+ "transformer.encoder.layers.15.self_attention.query_key_value.bias": "model-00003-of-00009.safetensors",
111
+ "transformer.encoder.layers.15.self_attention.query_key_value.weight": "model-00003-of-00009.safetensors",
112
+ "transformer.encoder.layers.15.self_attention.rotary_emb.inv_freq": "model-00003-of-00009.safetensors",
113
+ "transformer.encoder.layers.16.input_layernorm.bias": "model-00003-of-00009.safetensors",
114
+ "transformer.encoder.layers.16.input_layernorm.weight": "model-00003-of-00009.safetensors",
115
+ "transformer.encoder.layers.16.mlp.dense_4h_to_h.bias": "model-00004-of-00009.safetensors",
116
+ "transformer.encoder.layers.16.mlp.dense_4h_to_h.weight": "model-00004-of-00009.safetensors",
117
+ "transformer.encoder.layers.16.mlp.dense_h_to_4h.bias": "model-00004-of-00009.safetensors",
118
+ "transformer.encoder.layers.16.mlp.dense_h_to_4h.weight": "model-00004-of-00009.safetensors",
119
+ "transformer.encoder.layers.16.post_attention_layernorm.bias": "model-00004-of-00009.safetensors",
120
+ "transformer.encoder.layers.16.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
121
+ "transformer.encoder.layers.16.self_attention.dense.bias": "model-00004-of-00009.safetensors",
122
+ "transformer.encoder.layers.16.self_attention.dense.weight": "model-00004-of-00009.safetensors",
123
+ "transformer.encoder.layers.16.self_attention.query_key_value.bias": "model-00003-of-00009.safetensors",
124
+ "transformer.encoder.layers.16.self_attention.query_key_value.weight": "model-00003-of-00009.safetensors",
125
+ "transformer.encoder.layers.16.self_attention.rotary_emb.inv_freq": "model-00004-of-00009.safetensors",
126
+ "transformer.encoder.layers.17.input_layernorm.bias": "model-00004-of-00009.safetensors",
127
+ "transformer.encoder.layers.17.input_layernorm.weight": "model-00004-of-00009.safetensors",
128
+ "transformer.encoder.layers.17.mlp.dense_4h_to_h.bias": "model-00004-of-00009.safetensors",
129
+ "transformer.encoder.layers.17.mlp.dense_4h_to_h.weight": "model-00004-of-00009.safetensors",
130
+ "transformer.encoder.layers.17.mlp.dense_h_to_4h.bias": "model-00004-of-00009.safetensors",
131
+ "transformer.encoder.layers.17.mlp.dense_h_to_4h.weight": "model-00004-of-00009.safetensors",
132
+ "transformer.encoder.layers.17.post_attention_layernorm.bias": "model-00004-of-00009.safetensors",
133
+ "transformer.encoder.layers.17.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
134
+ "transformer.encoder.layers.17.self_attention.dense.bias": "model-00004-of-00009.safetensors",
135
+ "transformer.encoder.layers.17.self_attention.dense.weight": "model-00004-of-00009.safetensors",
136
+ "transformer.encoder.layers.17.self_attention.query_key_value.bias": "model-00004-of-00009.safetensors",
137
+ "transformer.encoder.layers.17.self_attention.query_key_value.weight": "model-00004-of-00009.safetensors",
138
+ "transformer.encoder.layers.17.self_attention.rotary_emb.inv_freq": "model-00004-of-00009.safetensors",
139
+ "transformer.encoder.layers.18.input_layernorm.bias": "model-00004-of-00009.safetensors",
140
+ "transformer.encoder.layers.18.input_layernorm.weight": "model-00004-of-00009.safetensors",
141
+ "transformer.encoder.layers.18.mlp.dense_4h_to_h.bias": "model-00004-of-00009.safetensors",
142
+ "transformer.encoder.layers.18.mlp.dense_4h_to_h.weight": "model-00004-of-00009.safetensors",
143
+ "transformer.encoder.layers.18.mlp.dense_h_to_4h.bias": "model-00004-of-00009.safetensors",
144
+ "transformer.encoder.layers.18.mlp.dense_h_to_4h.weight": "model-00004-of-00009.safetensors",
145
+ "transformer.encoder.layers.18.post_attention_layernorm.bias": "model-00004-of-00009.safetensors",
146
+ "transformer.encoder.layers.18.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
147
+ "transformer.encoder.layers.18.self_attention.dense.bias": "model-00004-of-00009.safetensors",
148
+ "transformer.encoder.layers.18.self_attention.dense.weight": "model-00004-of-00009.safetensors",
149
+ "transformer.encoder.layers.18.self_attention.query_key_value.bias": "model-00004-of-00009.safetensors",
150
+ "transformer.encoder.layers.18.self_attention.query_key_value.weight": "model-00004-of-00009.safetensors",
151
+ "transformer.encoder.layers.18.self_attention.rotary_emb.inv_freq": "model-00004-of-00009.safetensors",
152
+ "transformer.encoder.layers.19.input_layernorm.bias": "model-00004-of-00009.safetensors",
153
+ "transformer.encoder.layers.19.input_layernorm.weight": "model-00004-of-00009.safetensors",
154
+ "transformer.encoder.layers.19.mlp.dense_4h_to_h.bias": "model-00004-of-00009.safetensors",
155
+ "transformer.encoder.layers.19.mlp.dense_4h_to_h.weight": "model-00004-of-00009.safetensors",
156
+ "transformer.encoder.layers.19.mlp.dense_h_to_4h.bias": "model-00004-of-00009.safetensors",
157
+ "transformer.encoder.layers.19.mlp.dense_h_to_4h.weight": "model-00004-of-00009.safetensors",
158
+ "transformer.encoder.layers.19.post_attention_layernorm.bias": "model-00004-of-00009.safetensors",
159
+ "transformer.encoder.layers.19.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
160
+ "transformer.encoder.layers.19.self_attention.dense.bias": "model-00004-of-00009.safetensors",
161
+ "transformer.encoder.layers.19.self_attention.dense.weight": "model-00004-of-00009.safetensors",
162
+ "transformer.encoder.layers.19.self_attention.query_key_value.bias": "model-00004-of-00009.safetensors",
163
+ "transformer.encoder.layers.19.self_attention.query_key_value.weight": "model-00004-of-00009.safetensors",
164
+ "transformer.encoder.layers.19.self_attention.rotary_emb.inv_freq": "model-00004-of-00009.safetensors",
165
+ "transformer.encoder.layers.2.input_layernorm.bias": "model-00001-of-00009.safetensors",
166
+ "transformer.encoder.layers.2.input_layernorm.weight": "model-00001-of-00009.safetensors",
167
+ "transformer.encoder.layers.2.mlp.dense_4h_to_h.bias": "model-00001-of-00009.safetensors",
168
+ "transformer.encoder.layers.2.mlp.dense_4h_to_h.weight": "model-00001-of-00009.safetensors",
169
+ "transformer.encoder.layers.2.mlp.dense_h_to_4h.bias": "model-00001-of-00009.safetensors",
170
+ "transformer.encoder.layers.2.mlp.dense_h_to_4h.weight": "model-00001-of-00009.safetensors",
171
+ "transformer.encoder.layers.2.post_attention_layernorm.bias": "model-00001-of-00009.safetensors",
172
+ "transformer.encoder.layers.2.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
173
+ "transformer.encoder.layers.2.self_attention.dense.bias": "model-00001-of-00009.safetensors",
174
+ "transformer.encoder.layers.2.self_attention.dense.weight": "model-00001-of-00009.safetensors",
175
+ "transformer.encoder.layers.2.self_attention.query_key_value.bias": "model-00001-of-00009.safetensors",
176
+ "transformer.encoder.layers.2.self_attention.query_key_value.weight": "model-00001-of-00009.safetensors",
177
+ "transformer.encoder.layers.2.self_attention.rotary_emb.inv_freq": "model-00001-of-00009.safetensors",
178
+ "transformer.encoder.layers.20.input_layernorm.bias": "model-00004-of-00009.safetensors",
179
+ "transformer.encoder.layers.20.input_layernorm.weight": "model-00004-of-00009.safetensors",
180
+ "transformer.encoder.layers.20.mlp.dense_4h_to_h.bias": "model-00004-of-00009.safetensors",
181
+ "transformer.encoder.layers.20.mlp.dense_4h_to_h.weight": "model-00004-of-00009.safetensors",
182
+ "transformer.encoder.layers.20.mlp.dense_h_to_4h.bias": "model-00004-of-00009.safetensors",
183
+ "transformer.encoder.layers.20.mlp.dense_h_to_4h.weight": "model-00004-of-00009.safetensors",
184
+ "transformer.encoder.layers.20.post_attention_layernorm.bias": "model-00004-of-00009.safetensors",
185
+ "transformer.encoder.layers.20.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
186
+ "transformer.encoder.layers.20.self_attention.dense.bias": "model-00004-of-00009.safetensors",
187
+ "transformer.encoder.layers.20.self_attention.dense.weight": "model-00004-of-00009.safetensors",
188
+ "transformer.encoder.layers.20.self_attention.query_key_value.bias": "model-00004-of-00009.safetensors",
189
+ "transformer.encoder.layers.20.self_attention.query_key_value.weight": "model-00004-of-00009.safetensors",
190
+ "transformer.encoder.layers.20.self_attention.rotary_emb.inv_freq": "model-00004-of-00009.safetensors",
191
+ "transformer.encoder.layers.21.input_layernorm.bias": "model-00004-of-00009.safetensors",
192
+ "transformer.encoder.layers.21.input_layernorm.weight": "model-00004-of-00009.safetensors",
193
+ "transformer.encoder.layers.21.mlp.dense_4h_to_h.bias": "model-00005-of-00009.safetensors",
194
+ "transformer.encoder.layers.21.mlp.dense_4h_to_h.weight": "model-00005-of-00009.safetensors",
195
+ "transformer.encoder.layers.21.mlp.dense_h_to_4h.bias": "model-00005-of-00009.safetensors",
196
+ "transformer.encoder.layers.21.mlp.dense_h_to_4h.weight": "model-00005-of-00009.safetensors",
197
+ "transformer.encoder.layers.21.post_attention_layernorm.bias": "model-00004-of-00009.safetensors",
198
+ "transformer.encoder.layers.21.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
199
+ "transformer.encoder.layers.21.self_attention.dense.bias": "model-00004-of-00009.safetensors",
200
+ "transformer.encoder.layers.21.self_attention.dense.weight": "model-00004-of-00009.safetensors",
201
+ "transformer.encoder.layers.21.self_attention.query_key_value.bias": "model-00004-of-00009.safetensors",
202
+ "transformer.encoder.layers.21.self_attention.query_key_value.weight": "model-00004-of-00009.safetensors",
203
+ "transformer.encoder.layers.21.self_attention.rotary_emb.inv_freq": "model-00004-of-00009.safetensors",
204
+ "transformer.encoder.layers.22.input_layernorm.bias": "model-00005-of-00009.safetensors",
205
+ "transformer.encoder.layers.22.input_layernorm.weight": "model-00005-of-00009.safetensors",
206
+ "transformer.encoder.layers.22.mlp.dense_4h_to_h.bias": "model-00005-of-00009.safetensors",
207
+ "transformer.encoder.layers.22.mlp.dense_4h_to_h.weight": "model-00005-of-00009.safetensors",
208
+ "transformer.encoder.layers.22.mlp.dense_h_to_4h.bias": "model-00005-of-00009.safetensors",
209
+ "transformer.encoder.layers.22.mlp.dense_h_to_4h.weight": "model-00005-of-00009.safetensors",
210
+ "transformer.encoder.layers.22.post_attention_layernorm.bias": "model-00005-of-00009.safetensors",
211
+ "transformer.encoder.layers.22.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
212
+ "transformer.encoder.layers.22.self_attention.dense.bias": "model-00005-of-00009.safetensors",
213
+ "transformer.encoder.layers.22.self_attention.dense.weight": "model-00005-of-00009.safetensors",
214
+ "transformer.encoder.layers.22.self_attention.query_key_value.bias": "model-00005-of-00009.safetensors",
215
+ "transformer.encoder.layers.22.self_attention.query_key_value.weight": "model-00005-of-00009.safetensors",
216
+ "transformer.encoder.layers.22.self_attention.rotary_emb.inv_freq": "model-00005-of-00009.safetensors",
217
+ "transformer.encoder.layers.23.input_layernorm.bias": "model-00005-of-00009.safetensors",
218
+ "transformer.encoder.layers.23.input_layernorm.weight": "model-00005-of-00009.safetensors",
219
+ "transformer.encoder.layers.23.mlp.dense_4h_to_h.bias": "model-00005-of-00009.safetensors",
220
+ "transformer.encoder.layers.23.mlp.dense_4h_to_h.weight": "model-00005-of-00009.safetensors",
221
+ "transformer.encoder.layers.23.mlp.dense_h_to_4h.bias": "model-00005-of-00009.safetensors",
222
+ "transformer.encoder.layers.23.mlp.dense_h_to_4h.weight": "model-00005-of-00009.safetensors",
223
+ "transformer.encoder.layers.23.post_attention_layernorm.bias": "model-00005-of-00009.safetensors",
224
+ "transformer.encoder.layers.23.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
225
+ "transformer.encoder.layers.23.self_attention.dense.bias": "model-00005-of-00009.safetensors",
226
+ "transformer.encoder.layers.23.self_attention.dense.weight": "model-00005-of-00009.safetensors",
227
+ "transformer.encoder.layers.23.self_attention.query_key_value.bias": "model-00005-of-00009.safetensors",
228
+ "transformer.encoder.layers.23.self_attention.query_key_value.weight": "model-00005-of-00009.safetensors",
229
+ "transformer.encoder.layers.23.self_attention.rotary_emb.inv_freq": "model-00005-of-00009.safetensors",
230
+ "transformer.encoder.layers.24.input_layernorm.bias": "model-00005-of-00009.safetensors",
231
+ "transformer.encoder.layers.24.input_layernorm.weight": "model-00005-of-00009.safetensors",
232
+ "transformer.encoder.layers.24.mlp.dense_4h_to_h.bias": "model-00005-of-00009.safetensors",
233
+ "transformer.encoder.layers.24.mlp.dense_4h_to_h.weight": "model-00005-of-00009.safetensors",
234
+ "transformer.encoder.layers.24.mlp.dense_h_to_4h.bias": "model-00005-of-00009.safetensors",
235
+ "transformer.encoder.layers.24.mlp.dense_h_to_4h.weight": "model-00005-of-00009.safetensors",
236
+ "transformer.encoder.layers.24.post_attention_layernorm.bias": "model-00005-of-00009.safetensors",
237
+ "transformer.encoder.layers.24.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
238
+ "transformer.encoder.layers.24.self_attention.dense.bias": "model-00005-of-00009.safetensors",
239
+ "transformer.encoder.layers.24.self_attention.dense.weight": "model-00005-of-00009.safetensors",
240
+ "transformer.encoder.layers.24.self_attention.query_key_value.bias": "model-00005-of-00009.safetensors",
241
+ "transformer.encoder.layers.24.self_attention.query_key_value.weight": "model-00005-of-00009.safetensors",
242
+ "transformer.encoder.layers.24.self_attention.rotary_emb.inv_freq": "model-00005-of-00009.safetensors",
243
+ "transformer.encoder.layers.25.input_layernorm.bias": "model-00005-of-00009.safetensors",
244
+ "transformer.encoder.layers.25.input_layernorm.weight": "model-00005-of-00009.safetensors",
245
+ "transformer.encoder.layers.25.mlp.dense_4h_to_h.bias": "model-00005-of-00009.safetensors",
246
+ "transformer.encoder.layers.25.mlp.dense_4h_to_h.weight": "model-00005-of-00009.safetensors",
247
+ "transformer.encoder.layers.25.mlp.dense_h_to_4h.bias": "model-00005-of-00009.safetensors",
248
+ "transformer.encoder.layers.25.mlp.dense_h_to_4h.weight": "model-00005-of-00009.safetensors",
249
+ "transformer.encoder.layers.25.post_attention_layernorm.bias": "model-00005-of-00009.safetensors",
250
+ "transformer.encoder.layers.25.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
251
+ "transformer.encoder.layers.25.self_attention.dense.bias": "model-00005-of-00009.safetensors",
252
+ "transformer.encoder.layers.25.self_attention.dense.weight": "model-00005-of-00009.safetensors",
253
+ "transformer.encoder.layers.25.self_attention.query_key_value.bias": "model-00005-of-00009.safetensors",
254
+ "transformer.encoder.layers.25.self_attention.query_key_value.weight": "model-00005-of-00009.safetensors",
255
+ "transformer.encoder.layers.25.self_attention.rotary_emb.inv_freq": "model-00005-of-00009.safetensors",
256
+ "transformer.encoder.layers.26.input_layernorm.bias": "model-00005-of-00009.safetensors",
257
+ "transformer.encoder.layers.26.input_layernorm.weight": "model-00005-of-00009.safetensors",
258
+ "transformer.encoder.layers.26.mlp.dense_4h_to_h.bias": "model-00006-of-00009.safetensors",
259
+ "transformer.encoder.layers.26.mlp.dense_4h_to_h.weight": "model-00006-of-00009.safetensors",
260
+ "transformer.encoder.layers.26.mlp.dense_h_to_4h.bias": "model-00005-of-00009.safetensors",
261
+ "transformer.encoder.layers.26.mlp.dense_h_to_4h.weight": "model-00005-of-00009.safetensors",
262
+ "transformer.encoder.layers.26.post_attention_layernorm.bias": "model-00005-of-00009.safetensors",
263
+ "transformer.encoder.layers.26.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
264
+ "transformer.encoder.layers.26.self_attention.dense.bias": "model-00005-of-00009.safetensors",
265
+ "transformer.encoder.layers.26.self_attention.dense.weight": "model-00005-of-00009.safetensors",
266
+ "transformer.encoder.layers.26.self_attention.query_key_value.bias": "model-00005-of-00009.safetensors",
267
+ "transformer.encoder.layers.26.self_attention.query_key_value.weight": "model-00005-of-00009.safetensors",
268
+ "transformer.encoder.layers.26.self_attention.rotary_emb.inv_freq": "model-00005-of-00009.safetensors",
269
+ "transformer.encoder.layers.27.input_layernorm.bias": "model-00006-of-00009.safetensors",
270
+ "transformer.encoder.layers.27.input_layernorm.weight": "model-00006-of-00009.safetensors",
271
+ "transformer.encoder.layers.27.mlp.dense_4h_to_h.bias": "model-00006-of-00009.safetensors",
272
+ "transformer.encoder.layers.27.mlp.dense_4h_to_h.weight": "model-00006-of-00009.safetensors",
273
+ "transformer.encoder.layers.27.mlp.dense_h_to_4h.bias": "model-00006-of-00009.safetensors",
274
+ "transformer.encoder.layers.27.mlp.dense_h_to_4h.weight": "model-00006-of-00009.safetensors",
275
+ "transformer.encoder.layers.27.post_attention_layernorm.bias": "model-00006-of-00009.safetensors",
276
+ "transformer.encoder.layers.27.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
277
+ "transformer.encoder.layers.27.self_attention.dense.bias": "model-00006-of-00009.safetensors",
278
+ "transformer.encoder.layers.27.self_attention.dense.weight": "model-00006-of-00009.safetensors",
279
+ "transformer.encoder.layers.27.self_attention.query_key_value.bias": "model-00006-of-00009.safetensors",
280
+ "transformer.encoder.layers.27.self_attention.query_key_value.weight": "model-00006-of-00009.safetensors",
281
+ "transformer.encoder.layers.27.self_attention.rotary_emb.inv_freq": "model-00006-of-00009.safetensors",
282
+ "transformer.encoder.layers.28.input_layernorm.bias": "model-00006-of-00009.safetensors",
283
+ "transformer.encoder.layers.28.input_layernorm.weight": "model-00006-of-00009.safetensors",
284
+ "transformer.encoder.layers.28.mlp.dense_4h_to_h.bias": "model-00006-of-00009.safetensors",
285
+ "transformer.encoder.layers.28.mlp.dense_4h_to_h.weight": "model-00006-of-00009.safetensors",
286
+ "transformer.encoder.layers.28.mlp.dense_h_to_4h.bias": "model-00006-of-00009.safetensors",
287
+ "transformer.encoder.layers.28.mlp.dense_h_to_4h.weight": "model-00006-of-00009.safetensors",
288
+ "transformer.encoder.layers.28.post_attention_layernorm.bias": "model-00006-of-00009.safetensors",
289
+ "transformer.encoder.layers.28.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
290
+ "transformer.encoder.layers.28.self_attention.dense.bias": "model-00006-of-00009.safetensors",
291
+ "transformer.encoder.layers.28.self_attention.dense.weight": "model-00006-of-00009.safetensors",
292
+ "transformer.encoder.layers.28.self_attention.query_key_value.bias": "model-00006-of-00009.safetensors",
293
+ "transformer.encoder.layers.28.self_attention.query_key_value.weight": "model-00006-of-00009.safetensors",
294
+ "transformer.encoder.layers.28.self_attention.rotary_emb.inv_freq": "model-00006-of-00009.safetensors",
295
+ "transformer.encoder.layers.29.input_layernorm.bias": "model-00006-of-00009.safetensors",
296
+ "transformer.encoder.layers.29.input_layernorm.weight": "model-00006-of-00009.safetensors",
297
+ "transformer.encoder.layers.29.mlp.dense_4h_to_h.bias": "model-00006-of-00009.safetensors",
298
+ "transformer.encoder.layers.29.mlp.dense_4h_to_h.weight": "model-00006-of-00009.safetensors",
299
+ "transformer.encoder.layers.29.mlp.dense_h_to_4h.bias": "model-00006-of-00009.safetensors",
300
+ "transformer.encoder.layers.29.mlp.dense_h_to_4h.weight": "model-00006-of-00009.safetensors",
301
+ "transformer.encoder.layers.29.post_attention_layernorm.bias": "model-00006-of-00009.safetensors",
302
+ "transformer.encoder.layers.29.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
303
+ "transformer.encoder.layers.29.self_attention.dense.bias": "model-00006-of-00009.safetensors",
304
+ "transformer.encoder.layers.29.self_attention.dense.weight": "model-00006-of-00009.safetensors",
305
+ "transformer.encoder.layers.29.self_attention.query_key_value.bias": "model-00006-of-00009.safetensors",
306
+ "transformer.encoder.layers.29.self_attention.query_key_value.weight": "model-00006-of-00009.safetensors",
307
+ "transformer.encoder.layers.29.self_attention.rotary_emb.inv_freq": "model-00006-of-00009.safetensors",
308
+ "transformer.encoder.layers.3.input_layernorm.bias": "model-00001-of-00009.safetensors",
309
+ "transformer.encoder.layers.3.input_layernorm.weight": "model-00001-of-00009.safetensors",
310
+ "transformer.encoder.layers.3.mlp.dense_4h_to_h.bias": "model-00001-of-00009.safetensors",
311
+ "transformer.encoder.layers.3.mlp.dense_4h_to_h.weight": "model-00001-of-00009.safetensors",
312
+ "transformer.encoder.layers.3.mlp.dense_h_to_4h.bias": "model-00001-of-00009.safetensors",
313
+ "transformer.encoder.layers.3.mlp.dense_h_to_4h.weight": "model-00001-of-00009.safetensors",
314
+ "transformer.encoder.layers.3.post_attention_layernorm.bias": "model-00001-of-00009.safetensors",
315
+ "transformer.encoder.layers.3.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
316
+ "transformer.encoder.layers.3.self_attention.dense.bias": "model-00001-of-00009.safetensors",
317
+ "transformer.encoder.layers.3.self_attention.dense.weight": "model-00001-of-00009.safetensors",
318
+ "transformer.encoder.layers.3.self_attention.query_key_value.bias": "model-00001-of-00009.safetensors",
319
+ "transformer.encoder.layers.3.self_attention.query_key_value.weight": "model-00001-of-00009.safetensors",
320
+ "transformer.encoder.layers.3.self_attention.rotary_emb.inv_freq": "model-00001-of-00009.safetensors",
321
+ "transformer.encoder.layers.30.input_layernorm.bias": "model-00006-of-00009.safetensors",
322
+ "transformer.encoder.layers.30.input_layernorm.weight": "model-00006-of-00009.safetensors",
323
+ "transformer.encoder.layers.30.mlp.dense_4h_to_h.bias": "model-00006-of-00009.safetensors",
324
+ "transformer.encoder.layers.30.mlp.dense_4h_to_h.weight": "model-00006-of-00009.safetensors",
325
+ "transformer.encoder.layers.30.mlp.dense_h_to_4h.bias": "model-00006-of-00009.safetensors",
326
+ "transformer.encoder.layers.30.mlp.dense_h_to_4h.weight": "model-00006-of-00009.safetensors",
327
+ "transformer.encoder.layers.30.post_attention_layernorm.bias": "model-00006-of-00009.safetensors",
328
+ "transformer.encoder.layers.30.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
329
+ "transformer.encoder.layers.30.self_attention.dense.bias": "model-00006-of-00009.safetensors",
330
+ "transformer.encoder.layers.30.self_attention.dense.weight": "model-00006-of-00009.safetensors",
331
+ "transformer.encoder.layers.30.self_attention.query_key_value.bias": "model-00006-of-00009.safetensors",
332
+ "transformer.encoder.layers.30.self_attention.query_key_value.weight": "model-00006-of-00009.safetensors",
333
+ "transformer.encoder.layers.30.self_attention.rotary_emb.inv_freq": "model-00006-of-00009.safetensors",
334
+ "transformer.encoder.layers.31.input_layernorm.bias": "model-00006-of-00009.safetensors",
335
+ "transformer.encoder.layers.31.input_layernorm.weight": "model-00006-of-00009.safetensors",
336
+ "transformer.encoder.layers.31.mlp.dense_4h_to_h.bias": "model-00006-of-00009.safetensors",
337
+ "transformer.encoder.layers.31.mlp.dense_4h_to_h.weight": "model-00006-of-00009.safetensors",
338
+ "transformer.encoder.layers.31.mlp.dense_h_to_4h.bias": "model-00006-of-00009.safetensors",
339
+ "transformer.encoder.layers.31.mlp.dense_h_to_4h.weight": "model-00006-of-00009.safetensors",
340
+ "transformer.encoder.layers.31.post_attention_layernorm.bias": "model-00006-of-00009.safetensors",
341
+ "transformer.encoder.layers.31.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
342
+ "transformer.encoder.layers.31.self_attention.dense.bias": "model-00006-of-00009.safetensors",
343
+ "transformer.encoder.layers.31.self_attention.dense.weight": "model-00006-of-00009.safetensors",
344
+ "transformer.encoder.layers.31.self_attention.query_key_value.bias": "model-00006-of-00009.safetensors",
345
+ "transformer.encoder.layers.31.self_attention.query_key_value.weight": "model-00006-of-00009.safetensors",
346
+ "transformer.encoder.layers.31.self_attention.rotary_emb.inv_freq": "model-00006-of-00009.safetensors",
347
+ "transformer.encoder.layers.32.input_layernorm.bias": "model-00006-of-00009.safetensors",
348
+ "transformer.encoder.layers.32.input_layernorm.weight": "model-00006-of-00009.safetensors",
349
+ "transformer.encoder.layers.32.mlp.dense_4h_to_h.bias": "model-00007-of-00009.safetensors",
350
+ "transformer.encoder.layers.32.mlp.dense_4h_to_h.weight": "model-00007-of-00009.safetensors",
351
+ "transformer.encoder.layers.32.mlp.dense_h_to_4h.bias": "model-00007-of-00009.safetensors",
352
+ "transformer.encoder.layers.32.mlp.dense_h_to_4h.weight": "model-00007-of-00009.safetensors",
353
+ "transformer.encoder.layers.32.post_attention_layernorm.bias": "model-00007-of-00009.safetensors",
354
+ "transformer.encoder.layers.32.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
355
+ "transformer.encoder.layers.32.self_attention.dense.bias": "model-00007-of-00009.safetensors",
356
+ "transformer.encoder.layers.32.self_attention.dense.weight": "model-00007-of-00009.safetensors",
357
+ "transformer.encoder.layers.32.self_attention.query_key_value.bias": "model-00006-of-00009.safetensors",
358
+ "transformer.encoder.layers.32.self_attention.query_key_value.weight": "model-00006-of-00009.safetensors",
359
+ "transformer.encoder.layers.32.self_attention.rotary_emb.inv_freq": "model-00007-of-00009.safetensors",
360
+ "transformer.encoder.layers.33.input_layernorm.bias": "model-00007-of-00009.safetensors",
361
+ "transformer.encoder.layers.33.input_layernorm.weight": "model-00007-of-00009.safetensors",
362
+ "transformer.encoder.layers.33.mlp.dense_4h_to_h.bias": "model-00007-of-00009.safetensors",
363
+ "transformer.encoder.layers.33.mlp.dense_4h_to_h.weight": "model-00007-of-00009.safetensors",
364
+ "transformer.encoder.layers.33.mlp.dense_h_to_4h.bias": "model-00007-of-00009.safetensors",
365
+ "transformer.encoder.layers.33.mlp.dense_h_to_4h.weight": "model-00007-of-00009.safetensors",
366
+ "transformer.encoder.layers.33.post_attention_layernorm.bias": "model-00007-of-00009.safetensors",
367
+ "transformer.encoder.layers.33.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
368
+ "transformer.encoder.layers.33.self_attention.dense.bias": "model-00007-of-00009.safetensors",
369
+ "transformer.encoder.layers.33.self_attention.dense.weight": "model-00007-of-00009.safetensors",
370
+ "transformer.encoder.layers.33.self_attention.query_key_value.bias": "model-00007-of-00009.safetensors",
371
+ "transformer.encoder.layers.33.self_attention.query_key_value.weight": "model-00007-of-00009.safetensors",
372
+ "transformer.encoder.layers.33.self_attention.rotary_emb.inv_freq": "model-00007-of-00009.safetensors",
373
+ "transformer.encoder.layers.34.input_layernorm.bias": "model-00007-of-00009.safetensors",
374
+ "transformer.encoder.layers.34.input_layernorm.weight": "model-00007-of-00009.safetensors",
375
+ "transformer.encoder.layers.34.mlp.dense_4h_to_h.bias": "model-00007-of-00009.safetensors",
376
+ "transformer.encoder.layers.34.mlp.dense_4h_to_h.weight": "model-00007-of-00009.safetensors",
377
+ "transformer.encoder.layers.34.mlp.dense_h_to_4h.bias": "model-00007-of-00009.safetensors",
378
+ "transformer.encoder.layers.34.mlp.dense_h_to_4h.weight": "model-00007-of-00009.safetensors",
379
+ "transformer.encoder.layers.34.post_attention_layernorm.bias": "model-00007-of-00009.safetensors",
380
+ "transformer.encoder.layers.34.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
381
+ "transformer.encoder.layers.34.self_attention.dense.bias": "model-00007-of-00009.safetensors",
382
+ "transformer.encoder.layers.34.self_attention.dense.weight": "model-00007-of-00009.safetensors",
383
+ "transformer.encoder.layers.34.self_attention.query_key_value.bias": "model-00007-of-00009.safetensors",
384
+ "transformer.encoder.layers.34.self_attention.query_key_value.weight": "model-00007-of-00009.safetensors",
385
+ "transformer.encoder.layers.34.self_attention.rotary_emb.inv_freq": "model-00007-of-00009.safetensors",
386
+ "transformer.encoder.layers.35.input_layernorm.bias": "model-00007-of-00009.safetensors",
387
+ "transformer.encoder.layers.35.input_layernorm.weight": "model-00007-of-00009.safetensors",
388
+ "transformer.encoder.layers.35.mlp.dense_4h_to_h.bias": "model-00007-of-00009.safetensors",
389
+ "transformer.encoder.layers.35.mlp.dense_4h_to_h.weight": "model-00007-of-00009.safetensors",
390
+ "transformer.encoder.layers.35.mlp.dense_h_to_4h.bias": "model-00007-of-00009.safetensors",
391
+ "transformer.encoder.layers.35.mlp.dense_h_to_4h.weight": "model-00007-of-00009.safetensors",
392
+ "transformer.encoder.layers.35.post_attention_layernorm.bias": "model-00007-of-00009.safetensors",
393
+ "transformer.encoder.layers.35.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
394
+ "transformer.encoder.layers.35.self_attention.dense.bias": "model-00007-of-00009.safetensors",
395
+ "transformer.encoder.layers.35.self_attention.dense.weight": "model-00007-of-00009.safetensors",
396
+ "transformer.encoder.layers.35.self_attention.query_key_value.bias": "model-00007-of-00009.safetensors",
397
+ "transformer.encoder.layers.35.self_attention.query_key_value.weight": "model-00007-of-00009.safetensors",
398
+ "transformer.encoder.layers.35.self_attention.rotary_emb.inv_freq": "model-00007-of-00009.safetensors",
399
+ "transformer.encoder.layers.36.input_layernorm.bias": "model-00007-of-00009.safetensors",
400
+ "transformer.encoder.layers.36.input_layernorm.weight": "model-00007-of-00009.safetensors",
401
+ "transformer.encoder.layers.36.mlp.dense_4h_to_h.bias": "model-00007-of-00009.safetensors",
402
+ "transformer.encoder.layers.36.mlp.dense_4h_to_h.weight": "model-00007-of-00009.safetensors",
403
+ "transformer.encoder.layers.36.mlp.dense_h_to_4h.bias": "model-00007-of-00009.safetensors",
404
+ "transformer.encoder.layers.36.mlp.dense_h_to_4h.weight": "model-00007-of-00009.safetensors",
405
+ "transformer.encoder.layers.36.post_attention_layernorm.bias": "model-00007-of-00009.safetensors",
406
+ "transformer.encoder.layers.36.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
407
+ "transformer.encoder.layers.36.self_attention.dense.bias": "model-00007-of-00009.safetensors",
408
+ "transformer.encoder.layers.36.self_attention.dense.weight": "model-00007-of-00009.safetensors",
409
+ "transformer.encoder.layers.36.self_attention.query_key_value.bias": "model-00007-of-00009.safetensors",
410
+ "transformer.encoder.layers.36.self_attention.query_key_value.weight": "model-00007-of-00009.safetensors",
411
+ "transformer.encoder.layers.36.self_attention.rotary_emb.inv_freq": "model-00007-of-00009.safetensors",
412
+ "transformer.encoder.layers.37.input_layernorm.bias": "model-00007-of-00009.safetensors",
413
+ "transformer.encoder.layers.37.input_layernorm.weight": "model-00007-of-00009.safetensors",
414
+ "transformer.encoder.layers.37.mlp.dense_4h_to_h.bias": "model-00008-of-00009.safetensors",
415
+ "transformer.encoder.layers.37.mlp.dense_4h_to_h.weight": "model-00008-of-00009.safetensors",
416
+ "transformer.encoder.layers.37.mlp.dense_h_to_4h.bias": "model-00008-of-00009.safetensors",
417
+ "transformer.encoder.layers.37.mlp.dense_h_to_4h.weight": "model-00008-of-00009.safetensors",
418
+ "transformer.encoder.layers.37.post_attention_layernorm.bias": "model-00007-of-00009.safetensors",
419
+ "transformer.encoder.layers.37.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
420
+ "transformer.encoder.layers.37.self_attention.dense.bias": "model-00007-of-00009.safetensors",
421
+ "transformer.encoder.layers.37.self_attention.dense.weight": "model-00007-of-00009.safetensors",
422
+ "transformer.encoder.layers.37.self_attention.query_key_value.bias": "model-00007-of-00009.safetensors",
423
+ "transformer.encoder.layers.37.self_attention.query_key_value.weight": "model-00007-of-00009.safetensors",
424
+ "transformer.encoder.layers.37.self_attention.rotary_emb.inv_freq": "model-00007-of-00009.safetensors",
425
+ "transformer.encoder.layers.38.input_layernorm.bias": "model-00008-of-00009.safetensors",
426
+ "transformer.encoder.layers.38.input_layernorm.weight": "model-00008-of-00009.safetensors",
427
+ "transformer.encoder.layers.38.mlp.dense_4h_to_h.bias": "model-00008-of-00009.safetensors",
428
+ "transformer.encoder.layers.38.mlp.dense_4h_to_h.weight": "model-00008-of-00009.safetensors",
429
+ "transformer.encoder.layers.38.mlp.dense_h_to_4h.bias": "model-00008-of-00009.safetensors",
430
+ "transformer.encoder.layers.38.mlp.dense_h_to_4h.weight": "model-00008-of-00009.safetensors",
431
+ "transformer.encoder.layers.38.post_attention_layernorm.bias": "model-00008-of-00009.safetensors",
432
+ "transformer.encoder.layers.38.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
433
+ "transformer.encoder.layers.38.self_attention.dense.bias": "model-00008-of-00009.safetensors",
434
+ "transformer.encoder.layers.38.self_attention.dense.weight": "model-00008-of-00009.safetensors",
435
+ "transformer.encoder.layers.38.self_attention.query_key_value.bias": "model-00008-of-00009.safetensors",
436
+ "transformer.encoder.layers.38.self_attention.query_key_value.weight": "model-00008-of-00009.safetensors",
437
+ "transformer.encoder.layers.38.self_attention.rotary_emb.inv_freq": "model-00008-of-00009.safetensors",
438
+ "transformer.encoder.layers.39.input_layernorm.bias": "model-00008-of-00009.safetensors",
439
+ "transformer.encoder.layers.39.input_layernorm.weight": "model-00008-of-00009.safetensors",
440
+ "transformer.encoder.layers.39.mlp.dense_4h_to_h.bias": "model-00008-of-00009.safetensors",
441
+ "transformer.encoder.layers.39.mlp.dense_4h_to_h.weight": "model-00008-of-00009.safetensors",
442
+ "transformer.encoder.layers.39.mlp.dense_h_to_4h.bias": "model-00008-of-00009.safetensors",
443
+ "transformer.encoder.layers.39.mlp.dense_h_to_4h.weight": "model-00008-of-00009.safetensors",
444
+ "transformer.encoder.layers.39.post_attention_layernorm.bias": "model-00008-of-00009.safetensors",
445
+ "transformer.encoder.layers.39.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
446
+ "transformer.encoder.layers.39.self_attention.dense.bias": "model-00008-of-00009.safetensors",
447
+ "transformer.encoder.layers.39.self_attention.dense.weight": "model-00008-of-00009.safetensors",
448
+ "transformer.encoder.layers.39.self_attention.query_key_value.bias": "model-00008-of-00009.safetensors",
449
+ "transformer.encoder.layers.39.self_attention.query_key_value.weight": "model-00008-of-00009.safetensors",
450
+ "transformer.encoder.layers.39.self_attention.rotary_emb.inv_freq": "model-00008-of-00009.safetensors",
451
+ "transformer.encoder.layers.4.input_layernorm.bias": "model-00001-of-00009.safetensors",
452
+ "transformer.encoder.layers.4.input_layernorm.weight": "model-00001-of-00009.safetensors",
453
+ "transformer.encoder.layers.4.mlp.dense_4h_to_h.bias": "model-00001-of-00009.safetensors",
454
+ "transformer.encoder.layers.4.mlp.dense_4h_to_h.weight": "model-00001-of-00009.safetensors",
455
+ "transformer.encoder.layers.4.mlp.dense_h_to_4h.bias": "model-00001-of-00009.safetensors",
456
+ "transformer.encoder.layers.4.mlp.dense_h_to_4h.weight": "model-00001-of-00009.safetensors",
457
+ "transformer.encoder.layers.4.post_attention_layernorm.bias": "model-00001-of-00009.safetensors",
458
+ "transformer.encoder.layers.4.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
459
+ "transformer.encoder.layers.4.self_attention.dense.bias": "model-00001-of-00009.safetensors",
460
+ "transformer.encoder.layers.4.self_attention.dense.weight": "model-00001-of-00009.safetensors",
461
+ "transformer.encoder.layers.4.self_attention.query_key_value.bias": "model-00001-of-00009.safetensors",
462
+ "transformer.encoder.layers.4.self_attention.query_key_value.weight": "model-00001-of-00009.safetensors",
463
+ "transformer.encoder.layers.4.self_attention.rotary_emb.inv_freq": "model-00001-of-00009.safetensors",
464
+ "transformer.encoder.layers.40.input_layernorm.bias": "model-00008-of-00009.safetensors",
465
+ "transformer.encoder.layers.40.input_layernorm.weight": "model-00008-of-00009.safetensors",
466
+ "transformer.encoder.layers.40.mlp.dense_4h_to_h.bias": "model-00008-of-00009.safetensors",
467
+ "transformer.encoder.layers.40.mlp.dense_4h_to_h.weight": "model-00008-of-00009.safetensors",
468
+ "transformer.encoder.layers.40.mlp.dense_h_to_4h.bias": "model-00008-of-00009.safetensors",
469
+ "transformer.encoder.layers.40.mlp.dense_h_to_4h.weight": "model-00008-of-00009.safetensors",
470
+ "transformer.encoder.layers.40.post_attention_layernorm.bias": "model-00008-of-00009.safetensors",
471
+ "transformer.encoder.layers.40.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
472
+ "transformer.encoder.layers.40.self_attention.dense.bias": "model-00008-of-00009.safetensors",
473
+ "transformer.encoder.layers.40.self_attention.dense.weight": "model-00008-of-00009.safetensors",
474
+ "transformer.encoder.layers.40.self_attention.query_key_value.bias": "model-00008-of-00009.safetensors",
475
+ "transformer.encoder.layers.40.self_attention.query_key_value.weight": "model-00008-of-00009.safetensors",
476
+ "transformer.encoder.layers.40.self_attention.rotary_emb.inv_freq": "model-00008-of-00009.safetensors",
477
+ "transformer.encoder.layers.41.input_layernorm.bias": "model-00008-of-00009.safetensors",
478
+ "transformer.encoder.layers.41.input_layernorm.weight": "model-00008-of-00009.safetensors",
479
+ "transformer.encoder.layers.41.mlp.dense_4h_to_h.bias": "model-00008-of-00009.safetensors",
480
+ "transformer.encoder.layers.41.mlp.dense_4h_to_h.weight": "model-00008-of-00009.safetensors",
481
+ "transformer.encoder.layers.41.mlp.dense_h_to_4h.bias": "model-00008-of-00009.safetensors",
482
+ "transformer.encoder.layers.41.mlp.dense_h_to_4h.weight": "model-00008-of-00009.safetensors",
483
+ "transformer.encoder.layers.41.post_attention_layernorm.bias": "model-00008-of-00009.safetensors",
484
+ "transformer.encoder.layers.41.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
485
+ "transformer.encoder.layers.41.self_attention.dense.bias": "model-00008-of-00009.safetensors",
486
+ "transformer.encoder.layers.41.self_attention.dense.weight": "model-00008-of-00009.safetensors",
487
+ "transformer.encoder.layers.41.self_attention.query_key_value.bias": "model-00008-of-00009.safetensors",
488
+ "transformer.encoder.layers.41.self_attention.query_key_value.weight": "model-00008-of-00009.safetensors",
489
+ "transformer.encoder.layers.41.self_attention.rotary_emb.inv_freq": "model-00008-of-00009.safetensors",
490
+ "transformer.encoder.layers.42.input_layernorm.bias": "model-00008-of-00009.safetensors",
491
+ "transformer.encoder.layers.42.input_layernorm.weight": "model-00008-of-00009.safetensors",
492
+ "transformer.encoder.layers.42.mlp.dense_4h_to_h.bias": "model-00009-of-00009.safetensors",
493
+ "transformer.encoder.layers.42.mlp.dense_4h_to_h.weight": "model-00009-of-00009.safetensors",
494
+ "transformer.encoder.layers.42.mlp.dense_h_to_4h.bias": "model-00008-of-00009.safetensors",
495
+ "transformer.encoder.layers.42.mlp.dense_h_to_4h.weight": "model-00008-of-00009.safetensors",
496
+ "transformer.encoder.layers.42.post_attention_layernorm.bias": "model-00008-of-00009.safetensors",
497
+ "transformer.encoder.layers.42.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
498
+ "transformer.encoder.layers.42.self_attention.dense.bias": "model-00008-of-00009.safetensors",
499
+ "transformer.encoder.layers.42.self_attention.dense.weight": "model-00008-of-00009.safetensors",
500
+ "transformer.encoder.layers.42.self_attention.query_key_value.bias": "model-00008-of-00009.safetensors",
501
+ "transformer.encoder.layers.42.self_attention.query_key_value.weight": "model-00008-of-00009.safetensors",
502
+ "transformer.encoder.layers.42.self_attention.rotary_emb.inv_freq": "model-00008-of-00009.safetensors",
503
+ "transformer.encoder.layers.43.input_layernorm.bias": "model-00009-of-00009.safetensors",
504
+ "transformer.encoder.layers.43.input_layernorm.weight": "model-00009-of-00009.safetensors",
505
+ "transformer.encoder.layers.43.mlp.dense_4h_to_h.bias": "model-00009-of-00009.safetensors",
506
+ "transformer.encoder.layers.43.mlp.dense_4h_to_h.weight": "model-00009-of-00009.safetensors",
507
+ "transformer.encoder.layers.43.mlp.dense_h_to_4h.bias": "model-00009-of-00009.safetensors",
508
+ "transformer.encoder.layers.43.mlp.dense_h_to_4h.weight": "model-00009-of-00009.safetensors",
509
+ "transformer.encoder.layers.43.post_attention_layernorm.bias": "model-00009-of-00009.safetensors",
510
+ "transformer.encoder.layers.43.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
511
+ "transformer.encoder.layers.43.self_attention.dense.bias": "model-00009-of-00009.safetensors",
512
+ "transformer.encoder.layers.43.self_attention.dense.weight": "model-00009-of-00009.safetensors",
513
+ "transformer.encoder.layers.43.self_attention.query_key_value.bias": "model-00009-of-00009.safetensors",
514
+ "transformer.encoder.layers.43.self_attention.query_key_value.weight": "model-00009-of-00009.safetensors",
515
+ "transformer.encoder.layers.43.self_attention.rotary_emb.inv_freq": "model-00009-of-00009.safetensors",
516
+ "transformer.encoder.layers.44.input_layernorm.bias": "model-00009-of-00009.safetensors",
517
+ "transformer.encoder.layers.44.input_layernorm.weight": "model-00009-of-00009.safetensors",
518
+ "transformer.encoder.layers.44.mlp.dense_4h_to_h.bias": "model-00009-of-00009.safetensors",
519
+ "transformer.encoder.layers.44.mlp.dense_4h_to_h.weight": "model-00009-of-00009.safetensors",
520
+ "transformer.encoder.layers.44.mlp.dense_h_to_4h.bias": "model-00009-of-00009.safetensors",
521
+ "transformer.encoder.layers.44.mlp.dense_h_to_4h.weight": "model-00009-of-00009.safetensors",
522
+ "transformer.encoder.layers.44.post_attention_layernorm.bias": "model-00009-of-00009.safetensors",
523
+ "transformer.encoder.layers.44.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
524
+ "transformer.encoder.layers.44.self_attention.dense.bias": "model-00009-of-00009.safetensors",
525
+ "transformer.encoder.layers.44.self_attention.dense.weight": "model-00009-of-00009.safetensors",
526
+ "transformer.encoder.layers.44.self_attention.query_key_value.bias": "model-00009-of-00009.safetensors",
527
+ "transformer.encoder.layers.44.self_attention.query_key_value.weight": "model-00009-of-00009.safetensors",
528
+ "transformer.encoder.layers.44.self_attention.rotary_emb.inv_freq": "model-00009-of-00009.safetensors",
529
+ "transformer.encoder.layers.45.input_layernorm.bias": "model-00009-of-00009.safetensors",
530
+ "transformer.encoder.layers.45.input_layernorm.weight": "model-00009-of-00009.safetensors",
531
+ "transformer.encoder.layers.45.mlp.dense_4h_to_h.bias": "model-00009-of-00009.safetensors",
532
+ "transformer.encoder.layers.45.mlp.dense_4h_to_h.weight": "model-00009-of-00009.safetensors",
533
+ "transformer.encoder.layers.45.mlp.dense_h_to_4h.bias": "model-00009-of-00009.safetensors",
534
+ "transformer.encoder.layers.45.mlp.dense_h_to_4h.weight": "model-00009-of-00009.safetensors",
535
+ "transformer.encoder.layers.45.post_attention_layernorm.bias": "model-00009-of-00009.safetensors",
536
+ "transformer.encoder.layers.45.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
537
+ "transformer.encoder.layers.45.self_attention.dense.bias": "model-00009-of-00009.safetensors",
538
+ "transformer.encoder.layers.45.self_attention.dense.weight": "model-00009-of-00009.safetensors",
539
+ "transformer.encoder.layers.45.self_attention.query_key_value.bias": "model-00009-of-00009.safetensors",
540
+ "transformer.encoder.layers.45.self_attention.query_key_value.weight": "model-00009-of-00009.safetensors",
541
+ "transformer.encoder.layers.45.self_attention.rotary_emb.inv_freq": "model-00009-of-00009.safetensors",
542
+ "transformer.encoder.layers.46.input_layernorm.bias": "model-00009-of-00009.safetensors",
543
+ "transformer.encoder.layers.46.input_layernorm.weight": "model-00009-of-00009.safetensors",
544
+ "transformer.encoder.layers.46.mlp.dense_4h_to_h.bias": "model-00009-of-00009.safetensors",
545
+ "transformer.encoder.layers.46.mlp.dense_4h_to_h.weight": "model-00009-of-00009.safetensors",
546
+ "transformer.encoder.layers.46.mlp.dense_h_to_4h.bias": "model-00009-of-00009.safetensors",
547
+ "transformer.encoder.layers.46.mlp.dense_h_to_4h.weight": "model-00009-of-00009.safetensors",
548
+ "transformer.encoder.layers.46.post_attention_layernorm.bias": "model-00009-of-00009.safetensors",
549
+ "transformer.encoder.layers.46.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
550
+ "transformer.encoder.layers.46.self_attention.dense.bias": "model-00009-of-00009.safetensors",
551
+ "transformer.encoder.layers.46.self_attention.dense.weight": "model-00009-of-00009.safetensors",
552
+ "transformer.encoder.layers.46.self_attention.query_key_value.bias": "model-00009-of-00009.safetensors",
553
+ "transformer.encoder.layers.46.self_attention.query_key_value.weight": "model-00009-of-00009.safetensors",
554
+ "transformer.encoder.layers.46.self_attention.rotary_emb.inv_freq": "model-00009-of-00009.safetensors",
555
+ "transformer.encoder.layers.5.input_layernorm.bias": "model-00001-of-00009.safetensors",
556
+ "transformer.encoder.layers.5.input_layernorm.weight": "model-00001-of-00009.safetensors",
557
+ "transformer.encoder.layers.5.mlp.dense_4h_to_h.bias": "model-00002-of-00009.safetensors",
558
+ "transformer.encoder.layers.5.mlp.dense_4h_to_h.weight": "model-00002-of-00009.safetensors",
559
+ "transformer.encoder.layers.5.mlp.dense_h_to_4h.bias": "model-00002-of-00009.safetensors",
560
+ "transformer.encoder.layers.5.mlp.dense_h_to_4h.weight": "model-00002-of-00009.safetensors",
561
+ "transformer.encoder.layers.5.post_attention_layernorm.bias": "model-00001-of-00009.safetensors",
562
+ "transformer.encoder.layers.5.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
563
+ "transformer.encoder.layers.5.self_attention.dense.bias": "model-00001-of-00009.safetensors",
564
+ "transformer.encoder.layers.5.self_attention.dense.weight": "model-00001-of-00009.safetensors",
565
+ "transformer.encoder.layers.5.self_attention.query_key_value.bias": "model-00001-of-00009.safetensors",
566
+ "transformer.encoder.layers.5.self_attention.query_key_value.weight": "model-00001-of-00009.safetensors",
567
+ "transformer.encoder.layers.5.self_attention.rotary_emb.inv_freq": "model-00001-of-00009.safetensors",
568
+ "transformer.encoder.layers.6.input_layernorm.bias": "model-00002-of-00009.safetensors",
569
+ "transformer.encoder.layers.6.input_layernorm.weight": "model-00002-of-00009.safetensors",
570
+ "transformer.encoder.layers.6.mlp.dense_4h_to_h.bias": "model-00002-of-00009.safetensors",
571
+ "transformer.encoder.layers.6.mlp.dense_4h_to_h.weight": "model-00002-of-00009.safetensors",
572
+ "transformer.encoder.layers.6.mlp.dense_h_to_4h.bias": "model-00002-of-00009.safetensors",
573
+ "transformer.encoder.layers.6.mlp.dense_h_to_4h.weight": "model-00002-of-00009.safetensors",
574
+ "transformer.encoder.layers.6.post_attention_layernorm.bias": "model-00002-of-00009.safetensors",
575
+ "transformer.encoder.layers.6.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
576
+ "transformer.encoder.layers.6.self_attention.dense.bias": "model-00002-of-00009.safetensors",
577
+ "transformer.encoder.layers.6.self_attention.dense.weight": "model-00002-of-00009.safetensors",
578
+ "transformer.encoder.layers.6.self_attention.query_key_value.bias": "model-00002-of-00009.safetensors",
579
+ "transformer.encoder.layers.6.self_attention.query_key_value.weight": "model-00002-of-00009.safetensors",
580
+ "transformer.encoder.layers.6.self_attention.rotary_emb.inv_freq": "model-00002-of-00009.safetensors",
581
+ "transformer.encoder.layers.7.input_layernorm.bias": "model-00002-of-00009.safetensors",
582
+ "transformer.encoder.layers.7.input_layernorm.weight": "model-00002-of-00009.safetensors",
583
+ "transformer.encoder.layers.7.mlp.dense_4h_to_h.bias": "model-00002-of-00009.safetensors",
584
+ "transformer.encoder.layers.7.mlp.dense_4h_to_h.weight": "model-00002-of-00009.safetensors",
585
+ "transformer.encoder.layers.7.mlp.dense_h_to_4h.bias": "model-00002-of-00009.safetensors",
586
+ "transformer.encoder.layers.7.mlp.dense_h_to_4h.weight": "model-00002-of-00009.safetensors",
587
+ "transformer.encoder.layers.7.post_attention_layernorm.bias": "model-00002-of-00009.safetensors",
588
+ "transformer.encoder.layers.7.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
589
+ "transformer.encoder.layers.7.self_attention.dense.bias": "model-00002-of-00009.safetensors",
590
+ "transformer.encoder.layers.7.self_attention.dense.weight": "model-00002-of-00009.safetensors",
591
+ "transformer.encoder.layers.7.self_attention.query_key_value.bias": "model-00002-of-00009.safetensors",
592
+ "transformer.encoder.layers.7.self_attention.query_key_value.weight": "model-00002-of-00009.safetensors",
593
+ "transformer.encoder.layers.7.self_attention.rotary_emb.inv_freq": "model-00002-of-00009.safetensors",
594
+ "transformer.encoder.layers.8.input_layernorm.bias": "model-00002-of-00009.safetensors",
595
+ "transformer.encoder.layers.8.input_layernorm.weight": "model-00002-of-00009.safetensors",
596
+ "transformer.encoder.layers.8.mlp.dense_4h_to_h.bias": "model-00002-of-00009.safetensors",
597
+ "transformer.encoder.layers.8.mlp.dense_4h_to_h.weight": "model-00002-of-00009.safetensors",
598
+ "transformer.encoder.layers.8.mlp.dense_h_to_4h.bias": "model-00002-of-00009.safetensors",
599
+ "transformer.encoder.layers.8.mlp.dense_h_to_4h.weight": "model-00002-of-00009.safetensors",
600
+ "transformer.encoder.layers.8.post_attention_layernorm.bias": "model-00002-of-00009.safetensors",
601
+ "transformer.encoder.layers.8.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
602
+ "transformer.encoder.layers.8.self_attention.dense.bias": "model-00002-of-00009.safetensors",
603
+ "transformer.encoder.layers.8.self_attention.dense.weight": "model-00002-of-00009.safetensors",
604
+ "transformer.encoder.layers.8.self_attention.query_key_value.bias": "model-00002-of-00009.safetensors",
605
+ "transformer.encoder.layers.8.self_attention.query_key_value.weight": "model-00002-of-00009.safetensors",
606
+ "transformer.encoder.layers.8.self_attention.rotary_emb.inv_freq": "model-00002-of-00009.safetensors",
607
+ "transformer.encoder.layers.9.input_layernorm.bias": "model-00002-of-00009.safetensors",
608
+ "transformer.encoder.layers.9.input_layernorm.weight": "model-00002-of-00009.safetensors",
609
+ "transformer.encoder.layers.9.mlp.dense_4h_to_h.bias": "model-00002-of-00009.safetensors",
610
+ "transformer.encoder.layers.9.mlp.dense_4h_to_h.weight": "model-00002-of-00009.safetensors",
611
+ "transformer.encoder.layers.9.mlp.dense_h_to_4h.bias": "model-00002-of-00009.safetensors",
612
+ "transformer.encoder.layers.9.mlp.dense_h_to_4h.weight": "model-00002-of-00009.safetensors",
613
+ "transformer.encoder.layers.9.post_attention_layernorm.bias": "model-00002-of-00009.safetensors",
614
+ "transformer.encoder.layers.9.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
615
+ "transformer.encoder.layers.9.self_attention.dense.bias": "model-00002-of-00009.safetensors",
616
+ "transformer.encoder.layers.9.self_attention.dense.weight": "model-00002-of-00009.safetensors",
617
+ "transformer.encoder.layers.9.self_attention.query_key_value.bias": "model-00002-of-00009.safetensors",
618
+ "transformer.encoder.layers.9.self_attention.query_key_value.weight": "model-00002-of-00009.safetensors",
619
+ "transformer.encoder.layers.9.self_attention.rotary_emb.inv_freq": "model-00002-of-00009.safetensors",
620
+ "transformer.output_layer.weight": "model-00009-of-00009.safetensors"
621
+ }
622
+ }
modeling_xtrimopglm.py ADDED
@@ -0,0 +1,1573 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PyTorch xTrimoPGLM model. """
2
+
3
+ import math
4
+ import copy
5
+ import warnings
6
+ import re
7
+ import sys
8
+ import os
9
+ import pathlib
10
+ import time
11
+ import random
12
+ import numpy as np
13
+ from tqdm.auto import tqdm
14
+
15
+ import torch, deepspeed
16
+ import torch.utils.checkpoint
17
+ import torch.nn.functional as F
18
+ from torch import nn
19
+ from torch.nn import CrossEntropyLoss, LayerNorm, MSELoss, BCEWithLogitsLoss
20
+ from torch.nn.utils import skip_init
21
+ from typing import Optional, Tuple, Union, List, Callable, Dict, Any
22
+ from copy import deepcopy
23
+ from collections import namedtuple
24
+
25
+ from transformers.modeling_outputs import (
26
+ BaseModelOutputWithPast,
27
+ MaskedLMOutput,
28
+ CausalLMOutputWithPast,
29
+ SequenceClassifierOutput,
30
+ TokenClassifierOutput
31
+ )
32
+ from transformers import PreTrainedModel
33
+ from transformers.utils import logging
34
+ from transformers.generation.logits_process import LogitsProcessor
35
+ from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput
36
+
37
+ from .configuration_xtrimopglm import xTrimoPGLMConfig
38
+ from .quantization import quantize
39
+
40
+ def get_checkpoint_fn():
41
+ if deepspeed.checkpointing.is_configured():
42
+ checkpoint = deepspeed.checkpointing.checkpoint
43
+ else:
44
+ checkpoint = torch.utils.checkpoint.checkpoint
45
+ return checkpoint
46
+
47
+ # flags required to enable jit fusion kernels
48
+
49
+ if sys.platform != 'darwin':
50
+ torch._C._jit_set_profiling_mode(False)
51
+ torch._C._jit_set_profiling_executor(False)
52
+ torch._C._jit_override_can_fuse_on_cpu(True)
53
+ torch._C._jit_override_can_fuse_on_gpu(True)
54
+
55
+ logger = logging.get_logger(__name__)
56
+
57
+ _CHECKPOINT_FOR_DOC = "BioMap/xtrimopglm-100b-int4"
58
+ _CONFIG_FOR_DOC = "xTrimoPGLMConfig"
59
+ DeepNormCoefficients = namedtuple("DeepNormCoefficients", ["alpha", "beta"])
60
+
61
+ def default_init(cls, *args, **kwargs):
62
+ return cls(*args, **kwargs)
63
+
64
+
65
+ def get_deepnorm_coefficients(config: xTrimoPGLMConfig):
66
+ """
67
+ DeepNorm coefficients from : https://kexue.fm/archives/8978
68
+ """
69
+ num_layers = config.num_layers
70
+ return DeepNormCoefficients(alpha=(2 * num_layers) ** 0.5, beta=(2 * num_layers) ** -0.5)
71
+
72
+
73
+ class InvalidScoreLogitsProcessor(LogitsProcessor):
74
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
75
+ if torch.isnan(scores).any() or torch.isinf(scores).any():
76
+ scores.zero_()
77
+ scores[..., 5] = 5e4
78
+ return scores
79
+
80
+
81
+ def split_tensor_along_last_dim(
82
+ tensor: torch.Tensor,
83
+ num_partitions: int,
84
+ contiguous_split_chunks: bool = False,
85
+ ) -> List[torch.Tensor]:
86
+ """Split a tensor along its last dimension.
87
+
88
+ Arguments:
89
+ tensor: input tensor.
90
+ num_partitions: number of partitions to split the tensor
91
+ contiguous_split_chunks: If True, make each chunk contiguous
92
+ in memory.
93
+
94
+ Returns:
95
+ A list of Tensors
96
+ """
97
+ # Get the size and dimension.
98
+ last_dim = tensor.dim() - 1
99
+ last_dim_size = tensor.size()[last_dim] // num_partitions
100
+ # Split.
101
+ tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
102
+ # Note: torch.split does not create contiguous tensors by default.
103
+ if contiguous_split_chunks:
104
+ return tuple(chunk.contiguous() for chunk in tensor_list)
105
+
106
+ return tensor_list
107
+
108
+ class RotaryEmbedding(torch.nn.Module):
109
+
110
+ def __init__(self, dim, base=10000, precision=torch.half, learnable=False):
111
+ super().__init__()
112
+ inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim)).to(precision)
113
+ self.dim = dim
114
+ self.base = base
115
+ self.learnable = learnable
116
+ if learnable:
117
+ self.inv_freq = torch.nn.Parameter(inv_freq)
118
+ self.max_seq_len_cached = None
119
+ else:
120
+ self.register_buffer('inv_freq', inv_freq)
121
+ self.max_seq_len_cached = None
122
+ self.cos_cached = None
123
+ self.sin_cached = None
124
+ self.precision = precision
125
+
126
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
127
+ if f'{prefix}inv_freq' in state_dict:
128
+ super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
129
+ else:
130
+ self.inv_freq.copy_(1. / (self.base ** (torch.arange(0, self.dim, 2).float() / self.dim)).to(self.precision))
131
+
132
+ def forward(self, x, seq_dim=1, seq_len=None):
133
+ if seq_len is None:
134
+ seq_len = x.shape[seq_dim]
135
+ if self.max_seq_len_cached is None or (seq_len > self.max_seq_len_cached):
136
+ self.max_seq_len_cached = None if self.learnable else seq_len
137
+ t = torch.arange(seq_len, device=x.device, dtype=torch.float32)
138
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq.to(x.device))
139
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
140
+ emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
141
+ if self.precision == torch.bfloat16 or self.precision == torch.half:
142
+ emb = emb.float()
143
+ # [sx, 1 (b * np), hn]
144
+ cos_cached = emb.cos()[:, None, :]
145
+ sin_cached = emb.sin()[:, None, :]
146
+ if self.precision == torch.bfloat16:
147
+ cos_cached = cos_cached.bfloat16()
148
+ sin_cached = sin_cached.bfloat16()
149
+ elif self.precision == torch.half:
150
+ cos_cached = cos_cached.half()
151
+ sin_cached = sin_cached.half()
152
+ if self.learnable:
153
+ return cos_cached, sin_cached
154
+ self.cos_cached, self.sin_cached = cos_cached, sin_cached
155
+ return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...]
156
+
157
+ def rotate_half(x):
158
+ x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
159
+ return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in earlier torch versions
160
+
161
+ def assert_dim_check(tensor, ndim=None, shape=None):
162
+ if ndim is not None:
163
+ assert tensor.ndim == ndim, f"Exepct tensor.ndim={ndim}. gut got tensor.shape={tensor.shape}"
164
+ if shape is not None:
165
+ assert list(tensor.shape) == list(shape), f"Exepct tensor.shape={shape}. gut got tensor.shape={tensor.shape}"
166
+
167
+ def apply_rotary_pos_emb_index_torch(q, k, cos, sin, position_id): # jitting fails with bf16
168
+ # position_id: [sq, b], q, k: [sq, b, np, hn], cos: [sq, 1, hn] -> [sq, b, 1, hn]
169
+ cos, sin = F.embedding(position_id, cos.squeeze(1)).unsqueeze(2), \
170
+ F.embedding(position_id, sin.squeeze(1)).unsqueeze(2)
171
+ q, k = (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
172
+ return q, k
173
+
174
+ class RMSNorm(torch.nn.Module):
175
+ def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs):
176
+ super().__init__()
177
+ self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype))
178
+ self.eps = eps
179
+
180
+ def forward(self, hidden_states: torch.Tensor):
181
+ input_dtype = hidden_states.dtype
182
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
183
+ hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
184
+
185
+ return (self.weight * hidden_states).to(input_dtype)
186
+
187
+ class CoreAttention(torch.nn.Module):
188
+ def __init__(self, config: xTrimoPGLMConfig, layer_number):
189
+ super(CoreAttention, self).__init__()
190
+
191
+ self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling
192
+ self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
193
+ if self.apply_query_key_layer_scaling:
194
+ self.attention_softmax_in_fp32 = True
195
+ self.layer_number = max(1, layer_number)
196
+
197
+ projection_size = config.kv_channels * config.num_attention_heads
198
+
199
+ # Per attention head and per partition values.
200
+ self.hidden_size_per_partition = projection_size
201
+ self.hidden_size_per_attention_head = projection_size // config.num_attention_heads
202
+ self.num_attention_heads_per_partition = config.num_attention_heads
203
+
204
+ coeff = None
205
+ self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
206
+ if self.apply_query_key_layer_scaling:
207
+ coeff = self.layer_number
208
+ self.norm_factor *= coeff
209
+ self.coeff = coeff
210
+
211
+ self.attention_dropout = torch.nn.Dropout(config.attention_dropout)
212
+
213
+ self.is_causal = config.is_causal
214
+ self.use_pytorch_sdpa = config.use_pytorch_sdpa
215
+
216
+ def forward(self, query_layer, key_layer, value_layer, attention_mask):
217
+ # query_layer, key_layer, value_layer: [seq_len, batch_size, num_heads, head_dim]
218
+ # import pdb; pdb.set_trace();
219
+ pytorch_major_version = int(torch.__version__.split('.')[0])
220
+ # assert pytorch_major_version >= 2, f"Expect PyTorch version > 2.0"
221
+ if pytorch_major_version >= 2 and self.use_pytorch_sdpa:
222
+ dropout_p = self.attention_dropout.p if self.training else 0
223
+ # [seq_len, batch_size, num_heads, head_dim] -> [batch_size, num_heads, seq_len, head_dim]
224
+ query_layer, key_layer, value_layer = [k.permute(1, 2, 0, 3) for k in [query_layer, key_layer, value_layer]]
225
+ # import pdb; pdb.set_trace();
226
+ if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]:
227
+ # context_layer: [batch_size, num_heads, seq_len, head_dim]
228
+ context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, is_causal=self.is_causal, dropout_p=dropout_p)
229
+ else:
230
+ if (attention_mask is not None) and (attention_mask.dtype == torch.bool):
231
+ attention_mask = attention_mask.logical_not() ## DO NOT inplace operation!!!!
232
+ context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, attention_mask, dropout_p=dropout_p)
233
+ # [batch_size, num_heads, seq_len, head_dim] -> [seq_len, batch_size, num_heads, head_dim]
234
+ context_layer = context_layer.permute(2, 0, 1, 3)
235
+ # [seq_len, batch_size, 2560]
236
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
237
+ context_layer = context_layer.reshape(*new_context_layer_shape)
238
+ else:
239
+ # Raw attention scores
240
+
241
+ # [b, np, sq, sk]
242
+ output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))
243
+
244
+ # [sq, b, np, hn] -> [sq, b * np, hn]
245
+ query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)
246
+ # [sk, b, np, hn] -> [sk, b * np, hn]
247
+ key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
248
+
249
+ # preallocting input tensor: [b * np, sq, sk]
250
+ matmul_input_buffer = torch.empty(
251
+ output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype,
252
+ device=query_layer.device
253
+ )
254
+
255
+ # Raw attention scores. [b * np, sq, sk]
256
+ matmul_result = torch.baddbmm(
257
+ matmul_input_buffer,
258
+ query_layer.transpose(0, 1), # [b * np, sq, hn]
259
+ key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
260
+ beta=0.0,
261
+ alpha=(1.0 / self.norm_factor),
262
+ )
263
+
264
+ # change view to [b, np, sq, sk]
265
+ attention_scores = matmul_result.view(*output_size)
266
+
267
+ # ===========================
268
+ # Attention probs and dropout
269
+ # ===========================
270
+
271
+ # attention scores and attention mask [b, np, sq, sk]
272
+ if self.attention_softmax_in_fp32:
273
+ attention_scores = attention_scores.float()
274
+ if self.coeff is not None:
275
+ attention_scores = attention_scores * self.coeff
276
+ if self.is_causal and attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]:
277
+ attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3],
278
+ device=attention_scores.device, dtype=torch.bool)
279
+ attention_mask.tril_()
280
+ attention_mask = ~attention_mask
281
+ if attention_mask is not None:
282
+ attention_scores = attention_scores.masked_fill(attention_mask, float("-inf"))
283
+ attention_probs = F.softmax(attention_scores, dim=-1)
284
+ attention_probs = attention_probs.type_as(value_layer)
285
+
286
+ # This is actually dropping out entire tokens to attend to, which might
287
+ # seem a bit unusual, but is taken from the original Transformer paper.
288
+ attention_probs = self.attention_dropout(attention_probs)
289
+ # =========================
290
+ # Context layer. [sq, b, hp]
291
+ # =========================
292
+
293
+ # value_layer -> context layer.
294
+ # [sk, b, np, hn] --> [b, np, sq, hn]
295
+
296
+ # context layer shape: [b, np, sq, hn]
297
+ output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))
298
+ # change view [sk, b * np, hn]
299
+ value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)
300
+ # change view [b * np, sq, sk]
301
+ attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
302
+ # matmul: [b * np, sq, hn]
303
+ context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
304
+ # change view [b, np, sq, hn]
305
+ context_layer = context_layer.view(*output_size)
306
+ # [b, np, sq, hn] --> [sq, b, np, hn]
307
+ context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
308
+ # [sq, b, np, hn] --> [sq, b, hp]
309
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
310
+ context_layer = context_layer.view(*new_context_layer_shape)
311
+
312
+ return context_layer
313
+
314
+
315
+ class SelfAttention(torch.nn.Module):
316
+ """Parallel self-attention layer abstract class.
317
+
318
+ Self-attention layer takes input with size [s, b, h]
319
+ and returns output of the same size.
320
+ """
321
+
322
+ def __init__(self, config: xTrimoPGLMConfig, layer_number, device=None):
323
+ super(SelfAttention, self).__init__()
324
+ self.layer_number = max(1, layer_number)
325
+
326
+ self.projection_size = config.kv_channels * config.num_attention_heads
327
+
328
+ # Per attention head and per partition values.
329
+ self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads
330
+ self.num_attention_heads_per_partition = config.num_attention_heads
331
+
332
+ self.multi_query_attention = config.multi_query_attention
333
+ self.qkv_hidden_size = 3 * self.projection_size
334
+ if self.multi_query_attention:
335
+ self.num_multi_query_groups_per_partition = config.multi_query_group_num
336
+ self.qkv_hidden_size = (
337
+ self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num
338
+ )
339
+ self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size,
340
+ bias=config.add_bias_linear or config.add_qkv_bias,
341
+ device=device, **_config_to_kwargs(config)
342
+ )
343
+
344
+ self.core_attention = CoreAttention(config, self.layer_number)
345
+
346
+ # Output.
347
+ self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear, device=device, **_config_to_kwargs(config))
348
+
349
+ self.rotary_embedding_2d = config.rotary_embedding_2d
350
+ # dim, base=10000, precision=torch.half, learnable=False
351
+ self.rotary_emb = RotaryEmbedding(self.hidden_size_per_attention_head // 2 if self.rotary_embedding_2d else self.hidden_size_per_attention_head,
352
+ base=10000, precision=config.torch_dtype, learnable=False)
353
+
354
+
355
+ def forward(
356
+ self, hidden_states, attention_mask, position_ids, kv_cache=None, use_cache=True
357
+ ):
358
+ # hidden_states: [sq, b, h]
359
+
360
+ # =================================================
361
+ # Pre-allocate memory for key-values for inference.
362
+ # =================================================
363
+ # =====================
364
+ # Query, Key, and Value
365
+ # =====================
366
+
367
+ # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
368
+ mixed_x_layer = self.query_key_value(hidden_states)
369
+
370
+ if self.multi_query_attention:
371
+ (query_layer, key_layer, value_layer) = mixed_x_layer.split(
372
+ [
373
+ self.num_attention_heads_per_partition * self.hidden_size_per_attention_head,
374
+ self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
375
+ self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
376
+ ],
377
+ dim=-1,
378
+ )
379
+ query_layer = query_layer.view(
380
+ query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)
381
+ )
382
+ key_layer = key_layer.view(
383
+ key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
384
+ )
385
+ value_layer = value_layer.view(
386
+ value_layer.size()[:-1]
387
+ + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
388
+ )
389
+ else:
390
+ new_tensor_shape = mixed_x_layer.size()[:-1] + (self.num_attention_heads_per_partition, 3 * self.hidden_size_per_attention_head)
391
+ mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
392
+ # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
393
+ (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)
394
+
395
+ # apply relative positional encoding (rotary embedding)
396
+ if position_ids is not None: # [seq_len, 2, batch_size, 32, 2]
397
+
398
+ if self.rotary_embedding_2d:
399
+ q1, q2 = query_layer.chunk(2, dim=(query_layer.ndim - 1)) # 32
400
+ k1, k2 = key_layer.chunk(2, dim=(key_layer.ndim - 1))
401
+ # import pdb; pdb.set_trace();
402
+ cos, sin = self.rotary_emb(q1, seq_len=position_ids.max() + 1) # 32
403
+ position_ids, block_position_ids = \
404
+ position_ids[:, 0, :].transpose(0, 1).contiguous(), \
405
+ position_ids[:, 1, :].transpose(0, 1).contiguous()
406
+ q1, k1 = apply_rotary_pos_emb_index_torch(q1, k1, cos, sin, position_ids)
407
+ q2, k2 = apply_rotary_pos_emb_index_torch(q2, k2, cos, sin, block_position_ids)
408
+ query_layer = torch.concat([q1, q2], dim=(q1.ndim - 1))
409
+ key_layer = torch.concat([k1, k2], dim=(k1.ndim - 1))
410
+ else:
411
+ # [b, sq] -> [sq, b]
412
+ position_ids = position_ids.transpose(0, 1)
413
+ cos, sin = self.rotary_emb(value_layer, seq_len=position_ids.max() + 1)
414
+ query_layer, key_layer = apply_rotary_pos_emb_index_torch(query_layer, key_layer, cos, sin, position_ids)
415
+
416
+ # adjust key and value for inference
417
+ if kv_cache is not None:
418
+ cache_k, cache_v = kv_cache
419
+ key_layer = torch.cat((cache_k, key_layer), dim=0)
420
+ value_layer = torch.cat((cache_v, value_layer), dim=0)
421
+ if use_cache:
422
+ kv_cache = (key_layer, value_layer)
423
+ else:
424
+ kv_cache = None
425
+
426
+ if self.multi_query_attention:
427
+ key_layer = key_layer.unsqueeze(-2)
428
+ key_layer = key_layer.expand(-1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1)
429
+ key_layer = key_layer.contiguous().view(key_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head))
430
+ value_layer = value_layer.unsqueeze(-2)
431
+ value_layer = value_layer.expand(-1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1)
432
+ value_layer = value_layer.contiguous().view(value_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head))
433
+
434
+ # ==================================
435
+ # core attention computation
436
+ # ==================================
437
+
438
+ context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask) # context_layer: [seq_len, batch_size, num_heads*head_dim]
439
+ output = self.dense(context_layer)
440
+ # =================
441
+ # Output. [sq, b, h]
442
+ # =================
443
+
444
+ # output = context_layer @ self.dense.weight.T + self.dense.bias
445
+ return output, kv_cache
446
+
447
+
448
+ def _config_to_kwargs(args):
449
+ common_kwargs = {
450
+ "dtype": args.torch_dtype,
451
+ }
452
+ return common_kwargs
453
+
454
+
455
+ class MLP(torch.nn.Module):
456
+ """MLP.
457
+
458
+ MLP will take the input with h hidden state, project it to 4*h
459
+ hidden dimension, perform nonlinear transformation, and project the
460
+ state back into h hidden dimension.
461
+ """
462
+
463
+ def __init__(self, config: xTrimoPGLMConfig, device=None):
464
+ super(MLP, self).__init__()
465
+
466
+ self.add_bias = config.add_bias_linear
467
+ self.moe = config.moe
468
+ self.num_experts = config.num_experts
469
+ self.experts_per_token = config.experts_per_token # 2
470
+
471
+ # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf
472
+ self.dense_h_to_4h = nn.Linear(
473
+ config.hidden_size,
474
+ config.ffn_hidden_size * 2,
475
+ bias=self.add_bias,
476
+ device=device,
477
+ **_config_to_kwargs(config)
478
+ )
479
+
480
+ def swiglu(x):
481
+ x = torch.chunk(x, 2, dim=-1)
482
+ return x[0] * F.silu(x[1])
483
+
484
+ def geglu(x):
485
+ x = torch.chunk(x, 2, dim=-1)
486
+ return x[0] * F.gelu(x[1])
487
+
488
+ if config.glu_activation == 'geglu':
489
+ self.activation_func = geglu
490
+ elif config.glu_activation == 'swiglu':
491
+ self.activation_func = swiglu
492
+ else:
493
+ assert RuntimeError(f"Unsupported glu_activation: {config.glu_activation}")
494
+
495
+ # Project back to h.
496
+ self.dense_4h_to_h = nn.Linear(
497
+ config.ffn_hidden_size,
498
+ config.hidden_size,
499
+ bias=self.add_bias,
500
+ device=device,
501
+ **_config_to_kwargs(config)
502
+ )
503
+
504
+ if self.moe:
505
+ assert self.num_experts > 1
506
+ del self.dense_h_to_4h
507
+ del self.dense_4h_to_h
508
+ self.router = nn.Linear(
509
+ config.hidden_size,
510
+ config.num_experts,
511
+ bias=False,
512
+ device=device,
513
+ dtype=torch.float32
514
+ )
515
+ for i in range(0, self.num_experts):
516
+ self.register_module(f"dense_h_to_4h_{i}", nn.Linear(
517
+ config.hidden_size,
518
+ config.ffn_hidden_size * 2,
519
+ bias=self.add_bias,
520
+ device=device,
521
+ **_config_to_kwargs(config)
522
+ ))
523
+ self.register_module(f"dense_4h_to_h_{i}", nn.Linear(
524
+ config.ffn_hidden_size,
525
+ config.hidden_size,
526
+ bias=self.add_bias,
527
+ device=device,
528
+ **_config_to_kwargs(config)
529
+ ))
530
+
531
+ def moe_forward(self, hidden_states, expert_idx):
532
+ intermediate_parallel = getattr(self, f"dense_h_to_4h_{expert_idx}")(hidden_states)
533
+ intermediate_parallel = self.activation_func(intermediate_parallel)
534
+ output = getattr(self, f"dense_4h_to_h_{expert_idx}")(intermediate_parallel)
535
+ return output
536
+
537
+ def forward(self, hidden_states):
538
+ if self.moe:
539
+ # import pdb; pdb.set_trace();
540
+ s, b, n = hidden_states.shape
541
+ dtype = hidden_states.dtype
542
+ hidden_states = hidden_states.view(-1, hidden_states.size(2)) # [s*b h]
543
+ route = self.router(hidden_states).to(dtype)
544
+
545
+ weights, selected_experts = torch.topk(route, self.experts_per_token)
546
+ weights = F.softmax(weights, dim=1, dtype=torch.float).to(hidden_states.dtype)
547
+ output = torch.zeros_like(hidden_states, dtype=hidden_states.dtype, device=hidden_states.device)
548
+ for expert_idx in range(self.num_experts):
549
+ batch_idx, nth_expert = torch.where(selected_experts == expert_idx)
550
+ if nth_expert.shape[0] == 0:
551
+ continue
552
+ cur_out = self.moe_forward(hidden_states[batch_idx], expert_idx)
553
+ output[batch_idx] += weights[batch_idx, nth_expert, None] * cur_out
554
+ output = output.reshape(s, b, n)
555
+ else:
556
+ # [s, b, 4hp]
557
+ intermediate_parallel = self.dense_h_to_4h(hidden_states)
558
+ intermediate_parallel = self.activation_func(intermediate_parallel)
559
+ # [s, b, h]
560
+ output = self.dense_4h_to_h(intermediate_parallel)
561
+ return output
562
+
563
+ class xTrimoPGLMBlock(torch.nn.Module):
564
+ """A single transformer layer.
565
+
566
+ Transformer layer takes input with size [s, b, h] and returns an
567
+ output of the same size.
568
+ """
569
+
570
+ def __init__(self, config: xTrimoPGLMConfig, layer_number, device=None):
571
+ super(xTrimoPGLMBlock, self).__init__()
572
+ self.layer_number = layer_number
573
+
574
+ self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm
575
+
576
+ self.fp32_residual_connection = config.fp32_residual_connection
577
+
578
+ LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
579
+ # Layernorm on the input data.
580
+ self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon)
581
+
582
+ # Self attention.
583
+ self.self_attention = SelfAttention(config, layer_number, device=device)
584
+ self.hidden_dropout = config.hidden_dropout
585
+
586
+ # Layernorm on the attention output
587
+ self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon)
588
+
589
+ # MLP
590
+ self.mlp = MLP(config, device=device)
591
+
592
+ self.deepnorm_coeff = get_deepnorm_coefficients(config) if config.deepnorm else None
593
+
594
+ def forward(
595
+ self, hidden_states, attention_mask, position_ids, kv_cache=None, use_cache=True,
596
+ ):
597
+ # hidden_states: [s, b, h]
598
+ # Layer norm at the beginning of the transformer layer.
599
+ layernorm_output = self.input_layernorm(hidden_states)
600
+ # Self attention.
601
+ attention_output, kv_cache = self.self_attention(
602
+ layernorm_output,
603
+ attention_mask,
604
+ position_ids, # [batch_size, 2, seq_len, 32, 2]
605
+ kv_cache=kv_cache,
606
+ use_cache=use_cache
607
+ )
608
+
609
+ # Residual connection.
610
+ if self.apply_residual_connection_post_layernorm:
611
+ residual = layernorm_output
612
+ else:
613
+ residual = hidden_states
614
+
615
+ layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training)
616
+ if self.deepnorm_coeff is not None:
617
+ layernorm_input = residual*self.deepnorm_coeff.alpha + layernorm_input
618
+ else:
619
+ layernorm_input = residual + layernorm_input
620
+
621
+ # Layer norm post the self attention.
622
+ layernorm_output = self.post_attention_layernorm(layernorm_input)
623
+
624
+ # MLP.
625
+ mlp_output = self.mlp(layernorm_output)
626
+
627
+ # Second residual connection.
628
+ if self.apply_residual_connection_post_layernorm:
629
+ residual = layernorm_output
630
+ else:
631
+ residual = layernorm_input
632
+
633
+ output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training)
634
+ if self.deepnorm_coeff is not None:
635
+ output = residual*self.deepnorm_coeff.alpha + output
636
+ else:
637
+ #print(f"2 self.deepnorm_coeff is None")
638
+ output = residual + output
639
+
640
+ return output, kv_cache
641
+
642
+
643
+ class xTrimoPGLMTransformer(torch.nn.Module):
644
+ """Transformer class."""
645
+
646
+ def __init__(self, config: xTrimoPGLMConfig, device=None):
647
+ super(xTrimoPGLMTransformer, self).__init__()
648
+
649
+ self.fp32_residual_connection = config.fp32_residual_connection
650
+ self.post_layer_norm = config.post_layer_norm
651
+
652
+ # Number of layers.
653
+ self.num_layers = config.num_layers
654
+
655
+ # Transformer layers.
656
+ def build_layer(layer_number):
657
+ return xTrimoPGLMBlock(config, layer_number, device=device)
658
+
659
+ self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)])
660
+
661
+ if self.post_layer_norm:
662
+ LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
663
+ # Final layer norm before output.
664
+ self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon)
665
+
666
+ self.gradient_checkpointing = False
667
+
668
+ def _get_layer(self, layer_number):
669
+ return self.layers[layer_number]
670
+
671
+ def forward(
672
+ self, hidden_states, attention_mask, position_ids, kv_caches=None,
673
+ use_cache: Optional[bool] = True,
674
+ output_hidden_states: Optional[bool] = False,
675
+ ):
676
+ if not kv_caches:
677
+ kv_caches = [None for _ in range(self.num_layers)]
678
+ presents = () if use_cache else None
679
+ if self.gradient_checkpointing and self.training:
680
+ if use_cache:
681
+ logger.warning_once(
682
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
683
+ )
684
+ use_cache = False
685
+
686
+ all_self_attentions = None
687
+ all_hidden_states = () if output_hidden_states else None
688
+ for index in range(self.num_layers):
689
+ if output_hidden_states:
690
+ all_hidden_states = all_hidden_states + (hidden_states,)
691
+
692
+ layer = self._get_layer(index)
693
+ if self.gradient_checkpointing and self.training and torch.is_grad_enabled():
694
+ layer_ret = get_checkpoint_fn()(
695
+ layer,
696
+ hidden_states,
697
+ attention_mask,
698
+ position_ids,
699
+ kv_caches[index],
700
+ use_cache
701
+ )
702
+ else:
703
+ layer_ret = layer(
704
+ hidden_states,
705
+ attention_mask,
706
+ position_ids,
707
+ kv_cache=kv_caches[index],
708
+ use_cache=use_cache
709
+ )
710
+ hidden_states, kv_cache = layer_ret
711
+ if use_cache:
712
+ presents = presents + (kv_cache,)
713
+
714
+
715
+ # Final layer norm.
716
+ if self.post_layer_norm:
717
+ hidden_states = self.final_layernorm(hidden_states)
718
+
719
+ if output_hidden_states:
720
+ all_hidden_states = all_hidden_states + (hidden_states,)
721
+
722
+ return hidden_states, presents, all_hidden_states, all_self_attentions
723
+
724
+
725
+ class xTrimoPGLMPreTrainedModel(PreTrainedModel):
726
+ """
727
+ An abstract class to handle weights initialization and
728
+ a simple interface for downloading and loading pretrained models.
729
+ """
730
+
731
+ is_parallelizable = False
732
+ supports_gradient_checkpointing = True
733
+ config_class = xTrimoPGLMConfig
734
+ base_model_prefix = "transformer"
735
+ _no_split_modules = ["xTrimoPGLMBlock"]
736
+
737
+ _quantized = False
738
+
739
+
740
+ def get_masks(self, input_ids, past_key_values, padding_mask=None, is_causal=True):
741
+ batch_size, seq_length = input_ids.shape
742
+ full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device)
743
+ if is_causal:
744
+ full_attention_mask.tril_()
745
+ past_length = 0
746
+ if past_key_values:
747
+ past_length = past_key_values[0][0].shape[0]
748
+ if past_length:
749
+ full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length,
750
+ device=input_ids.device), full_attention_mask), dim=-1)
751
+ if padding_mask is not None:
752
+ full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1)
753
+ if not past_length and padding_mask is not None:
754
+ full_attention_mask -= padding_mask.unsqueeze(-1) - 1
755
+ full_attention_mask = (full_attention_mask < 0.5).bool()
756
+ full_attention_mask.unsqueeze_(1)
757
+ return full_attention_mask
758
+
759
+ def get_position_ids(self, input_ids, device, context_length=0):
760
+ batch_size, seq_length = input_ids.shape
761
+ if self.config.rotary_embedding_2d:
762
+ if self.config.is_causal: # 100b model
763
+ position_ids_1 = torch.zeros(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) # [batch_size, seq_len]
764
+ position_ids_2 = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) # [batch_size, seq_len]
765
+ position_ids = torch.stack([position_ids_1, position_ids_2], axis=1) # [batch_size, 2, seq_len]
766
+ else:
767
+ position_ids_1 = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) # [batch_size, seq_len]
768
+ position_ids_2 = torch.zeros(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) # [batch_size, seq_len]
769
+ position_ids = torch.stack([position_ids_1, position_ids_2], axis=1) # [batch_size, 2, seq_len]
770
+ else:
771
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) # [batch_size, 1, seq_len]
772
+ return position_ids
773
+
774
+ def _set_gradient_checkpointing(self, module, value=False):
775
+ if isinstance(module, xTrimoPGLMTransformer):
776
+ module.gradient_checkpointing = value
777
+
778
+
779
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
780
+ def _init_weights(self, module):
781
+ std = self.config.initializer_range
782
+ """Initialize the weights"""
783
+ if isinstance(module, nn.Linear):
784
+ # Slightly different from the TF version which uses truncated_normal for initialization
785
+ # cf https://github.com/pytorch/pytorch/pull/5617
786
+ module.weight.data.normal_(mean=0.0, std=std)
787
+ if module.bias is not None:
788
+ module.bias.data.zero_()
789
+ elif isinstance(module, nn.Embedding):
790
+ module.weight.data.normal_(mean=0.0, std=std)
791
+ if module.padding_idx is not None:
792
+ module.weight.data[module.padding_idx].zero_()
793
+ elif isinstance(module, nn.LayerNorm):
794
+ module.bias.data.zero_()
795
+ module.weight.data.fill_(1.0)
796
+
797
+ def quantize(self, weight_bit_width: int, empty_init=True, device=None):
798
+ if self._quantized:
799
+ print(f"Model has been quantized...")
800
+ return
801
+ self.transformer.encoder = quantize(self.transformer.encoder, weight_bit_width, empty_init, device)
802
+ self._quantized = True
803
+ return self
804
+
805
+ class Embedding(torch.nn.Module):
806
+ """Language model embeddings."""
807
+
808
+ def __init__(self, config: xTrimoPGLMConfig, device=None):
809
+ super(Embedding, self).__init__()
810
+
811
+ self.hidden_size = config.hidden_size
812
+ # Word embeddings (parallel).
813
+ self.word_embeddings = nn.Embedding(
814
+ config.padded_vocab_size,
815
+ self.hidden_size,
816
+ dtype=config.torch_dtype,
817
+ device=device
818
+ )
819
+ self.fp32_residual_connection = config.fp32_residual_connection
820
+
821
+
822
+ def forward(self, input_ids):
823
+ # Embeddings.
824
+ words_embeddings = self.word_embeddings(input_ids)
825
+ embeddings = words_embeddings
826
+ # Data format change to avoid explicit tranposes : [b s h] --> [s b h].
827
+ embeddings = embeddings.transpose(0, 1).contiguous()
828
+ # If the input flag for fp32 residual connection is set, convert for float.
829
+ if self.fp32_residual_connection:
830
+ embeddings = embeddings.float()
831
+ return embeddings
832
+
833
+ class xTrimoPGLMModel(xTrimoPGLMPreTrainedModel):
834
+ def __init__(self, config: xTrimoPGLMConfig, device=None, empty_init=True):
835
+ super().__init__(config)
836
+ if empty_init:
837
+ init_method = skip_init
838
+ else:
839
+ init_method = default_init
840
+ init_kwargs = {}
841
+ if device is not None:
842
+ init_kwargs["device"] = device
843
+ self.embedding = init_method(Embedding, config, **init_kwargs)
844
+ self.num_layers = config.num_layers
845
+ self.multi_query_group_num = config.multi_query_group_num
846
+ self.kv_channels = config.kv_channels
847
+
848
+ # Rotary positional embeddings
849
+ self.seq_length = config.seq_length
850
+ rotary_dim = (
851
+ config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels
852
+ )
853
+
854
+ # self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, base=10000, precision=config.torch_dtype, learnable=False)
855
+ self.encoder = init_method(xTrimoPGLMTransformer, config, **init_kwargs)
856
+
857
+ self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False,
858
+ dtype=config.torch_dtype, **init_kwargs)
859
+
860
+ def get_input_embeddings(self):
861
+ return self.embedding.word_embeddings
862
+
863
+ def set_input_embeddings(self, value):
864
+ self.embedding.word_embeddings = value
865
+
866
+ def forward(
867
+ self,
868
+ input_ids,
869
+ position_ids: Optional[torch.Tensor] = None, # position_ids: [batch_size, 2, seq_len]
870
+ attention_mask: Optional[torch.BoolTensor] = None,
871
+ full_attention_mask: Optional[torch.BoolTensor] = None,
872
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
873
+ inputs_embeds: Optional[torch.Tensor] = None,
874
+ use_cache: Optional[bool] = None,
875
+ output_hidden_states: Optional[bool] = None,
876
+ return_dict: Optional[bool] = None,
877
+ ):
878
+ output_hidden_states = (
879
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
880
+ )
881
+ if self.config.is_causal:
882
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
883
+ else:
884
+ use_cache = False
885
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
886
+
887
+ batch_size, seq_length = input_ids.shape
888
+
889
+ if inputs_embeds is None:
890
+ inputs_embeds = self.embedding(input_ids)
891
+
892
+ if full_attention_mask is None:
893
+ if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1):
894
+ full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask)
895
+ # Run encoder.
896
+ hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(
897
+ inputs_embeds, full_attention_mask, position_ids=position_ids,
898
+ kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states
899
+ )
900
+
901
+ if not return_dict:
902
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
903
+
904
+ return BaseModelOutputWithPast(
905
+ last_hidden_state=hidden_states,
906
+ past_key_values=presents,
907
+ hidden_states=all_hidden_states,
908
+ attentions=all_self_attentions,
909
+ )
910
+
911
+
912
+ class xTrimoPGLMForMaskedLM(xTrimoPGLMPreTrainedModel):
913
+ def __init__(self, config: xTrimoPGLMConfig, empty_init=True, device=None):
914
+ super().__init__(config)
915
+
916
+ self.max_sequence_length = config.max_length
917
+ self.transformer = xTrimoPGLMModel(config, empty_init=empty_init, device=device)
918
+ self.config = config
919
+ if self.config.quantization_bit:
920
+ print(f"Begin Quantization to {self.config.quantization_bit} bit")
921
+ self.quantize(self.config.quantization_bit, empty_init=True, device=device)
922
+
923
+ def forward(
924
+ self,
925
+ input_ids: Optional[torch.Tensor] = None,
926
+ position_ids: Optional[torch.Tensor] = None,
927
+ attention_mask: Optional[torch.Tensor] = None,
928
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
929
+ inputs_embeds: Optional[torch.Tensor] = None,
930
+ labels: Optional[torch.Tensor] = None,
931
+ use_cache: Optional[bool] = None,
932
+ output_attentions: Optional[bool] = None,
933
+ output_hidden_states: Optional[bool] = None,
934
+ return_dict: Optional[bool] = None,
935
+ return_last_logit: Optional[bool] = None,
936
+ return_last_hidden_state: Optional[bool] = None
937
+ ):
938
+ if self.config.is_causal:
939
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
940
+ else:
941
+ use_cache = False
942
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
943
+
944
+ if position_ids is None:
945
+ position_ids = self.get_position_ids(input_ids, device=input_ids.device)
946
+
947
+ full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask, is_causal=self.config.is_causal)
948
+
949
+ transformer_outputs = self.transformer(
950
+ input_ids=input_ids,
951
+ position_ids=position_ids, # position_ids: [batch_size, 2, seq_len]
952
+ full_attention_mask=full_attention_mask,
953
+ past_key_values=past_key_values,
954
+ inputs_embeds=inputs_embeds,
955
+ use_cache=use_cache,
956
+ output_hidden_states=output_hidden_states,
957
+ return_dict=return_dict,
958
+ )
959
+
960
+ hidden_states = transformer_outputs[0]
961
+ if return_last_logit:
962
+ hidden_states = hidden_states[-1:]
963
+ lm_logits = self.transformer.output_layer(hidden_states)
964
+ lm_logits = lm_logits.transpose(0, 1).contiguous()
965
+
966
+ masked_lm_loss = None
967
+ if labels is not None:
968
+ lm_logits = lm_logits.to(torch.float32)
969
+
970
+ # Flatten the tokens
971
+ loss_fct = CrossEntropyLoss(ignore_index=-100) # -100 for padding token.
972
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
973
+
974
+ lm_logits = lm_logits.to(hidden_states.dtype)
975
+ loss = loss.to(hidden_states.dtype)
976
+
977
+ if not return_dict:
978
+ output = (lm_logits,) + transformer_outputs[1:]
979
+ return ((loss,) + output) if loss is not None else output
980
+ return MaskedLMOutput(
981
+ loss = masked_lm_loss,
982
+ logits=lm_logits,
983
+ hidden_states=transformer_outputs.last_hidden_state if return_last_hidden_state else transformer_outputs.hidden_states,
984
+ attentions=transformer_outputs.attentions,
985
+ )
986
+
987
+
988
+
989
+
990
+ class xTrimoPGLMForSequenceClassification(xTrimoPGLMPreTrainedModel):
991
+ def __init__(self, config: xTrimoPGLMConfig, empty_init=True, device=None):
992
+ super().__init__(config)
993
+ self.config = config
994
+ self.num_labels = config.num_labels
995
+
996
+ self.transformer = xTrimoPGLMModel(config, empty_init=empty_init, device=device)
997
+ self.classifier = xTrimoPGLMClassificationHead(config)
998
+ if self.config.quantization_bit:
999
+ print(f"Begin Quantization to {self.config.quantization_bit} bit")
1000
+ self.quantize(self.config.quantization_bit, empty_init=True, device=device)
1001
+
1002
+ def forward(
1003
+ self,
1004
+ input_ids: Optional[torch.Tensor] = None,
1005
+ position_ids: Optional[torch.Tensor] = None,
1006
+ attention_mask: Optional[torch.Tensor] = None,
1007
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1008
+ inputs_embeds: Optional[torch.Tensor] = None,
1009
+ labels: Optional[torch.Tensor] = None,
1010
+ use_cache: Optional[bool] = None,
1011
+ output_attentions: Optional[bool] = None,
1012
+ output_hidden_states: Optional[bool] = None,
1013
+ return_dict: Optional[bool] = None,
1014
+ return_last_logit: Optional[bool] = None,
1015
+ return_last_hidden_state: Optional[bool] = None,
1016
+ **kwargs
1017
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1018
+ r"""
1019
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1020
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1021
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1022
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1023
+ """
1024
+ if self.config.is_causal:
1025
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1026
+ else:
1027
+ use_cache = False
1028
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1029
+
1030
+ if position_ids is None:
1031
+ position_ids = self.get_position_ids(input_ids, device=input_ids.device)
1032
+
1033
+ full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask, is_causal=self.config.is_causal)
1034
+
1035
+ transformer_outputs = self.transformer(
1036
+ input_ids=input_ids,
1037
+ position_ids=position_ids, # position_ids: [batch_size, 2, seq_len]
1038
+ full_attention_mask=full_attention_mask,
1039
+ past_key_values=past_key_values,
1040
+ inputs_embeds=inputs_embeds,
1041
+ use_cache=use_cache,
1042
+ output_hidden_states=output_hidden_states,
1043
+ return_dict=return_dict,
1044
+ )
1045
+ if self.config.add_special_tokens:
1046
+ hidden_states = transformer_outputs[0][:-1] # get rid of <eos> token
1047
+ else:
1048
+ hidden_states = transformer_outputs[0]
1049
+ logits = self.classifier(hidden_states, add_pooling=True)
1050
+ loss = None
1051
+ if labels is not None:
1052
+ labels = labels.to(logits.device)
1053
+
1054
+ if self.config.problem_type is None:
1055
+ if self.num_labels == 1:
1056
+ self.config.problem_type = "regression"
1057
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1058
+ self.config.problem_type = "single_label_classification"
1059
+ else:
1060
+ self.config.problem_type = "multi_label_classification"
1061
+
1062
+ if self.config.problem_type == "regression":
1063
+ loss_fct = MSELoss()
1064
+ if self.num_labels == 1:
1065
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1066
+ else:
1067
+ loss = loss_fct(logits, labels)
1068
+ elif self.config.problem_type == "single_label_classification":
1069
+ loss_fct = CrossEntropyLoss()
1070
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1071
+ elif self.config.problem_type == "multi_label_classification":
1072
+ loss_fct = BCEWithLogitsLoss()
1073
+ loss = loss_fct(logits, labels)
1074
+
1075
+ if not return_dict:
1076
+ output = (logits,) + transformer_outputs[2:]
1077
+ return ((loss,) + output) if loss is not None else output
1078
+
1079
+ return SequenceClassifierOutput(
1080
+ loss=loss,
1081
+ logits=logits,
1082
+ hidden_states=transformer_outputs.hidden_states,
1083
+ attentions=transformer_outputs.attentions,
1084
+ )
1085
+
1086
+ class xTrimoPGLMForTokenClassification(xTrimoPGLMPreTrainedModel):
1087
+ def __init__(self, config: xTrimoPGLMConfig, empty_init=True, device=None):
1088
+ super().__init__(config)
1089
+ self.config = config
1090
+ self.num_labels = config.num_labels
1091
+
1092
+ self.transformer = xTrimoPGLMModel(config, empty_init=empty_init, device=device)
1093
+ if config.task_modality == "token":
1094
+ self.classifier = xTrimoPGLMClassificationHead(config)
1095
+ elif config.task_modality == 'pair':
1096
+ self.classifier = xTrimoPGLMContactHead(config)
1097
+
1098
+ self.quantized = False
1099
+
1100
+ if self.config.quantization_bit:
1101
+ print(f"Begin Quantization to {self.config.quantization_bit} bit")
1102
+ self.quantize(self.config.quantization_bit, empty_init=True, device=device)
1103
+
1104
+
1105
+ def forward(
1106
+ self,
1107
+ input_ids: Optional[torch.Tensor] = None,
1108
+ position_ids: Optional[torch.Tensor] = None,
1109
+ attention_mask: Optional[torch.Tensor] = None,
1110
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1111
+ inputs_embeds: Optional[torch.Tensor] = None,
1112
+ labels: Optional[torch.Tensor] = None,
1113
+ use_cache: Optional[bool] = None,
1114
+ output_attentions: Optional[bool] = None,
1115
+ output_hidden_states: Optional[bool] = None,
1116
+ return_dict: Optional[bool] = None,
1117
+ return_last_logit: Optional[bool] = None,
1118
+ return_last_hidden_state: Optional[bool] = None,
1119
+ **kwargs
1120
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1121
+ r"""
1122
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1123
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1124
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1125
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1126
+ """
1127
+ if self.config.is_causal:
1128
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1129
+ else:
1130
+ use_cache = False
1131
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1132
+
1133
+ if position_ids is None:
1134
+ position_ids = self.get_position_ids(input_ids, device=input_ids.device)
1135
+
1136
+ full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask, is_causal = self.config.is_causal)
1137
+
1138
+ transformer_outputs = self.transformer(
1139
+ input_ids=input_ids,
1140
+ position_ids=position_ids, # position_ids: [batch_size, 2, seq_len]
1141
+ full_attention_mask=full_attention_mask,
1142
+ past_key_values=past_key_values,
1143
+ inputs_embeds=inputs_embeds,
1144
+ use_cache=use_cache,
1145
+ output_hidden_states=output_hidden_states,
1146
+ return_dict=return_dict,
1147
+ )
1148
+ if self.config.add_special_tokens:
1149
+ hidden_states = transformer_outputs[0][:-1] # get rid of <eos> token
1150
+ else:
1151
+ hidden_states = transformer_outputs[0]
1152
+
1153
+ logits = self.classifier(hidden_states, add_pooling=False)
1154
+ loss = None
1155
+ if labels is not None:
1156
+ labels = labels.to(logits.device)
1157
+ loss_fct = CrossEntropyLoss()
1158
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1159
+
1160
+ if not return_dict:
1161
+ output = (logits,) + transformer_outputs[2:]
1162
+ return ((loss,) + output) if loss is not None else output
1163
+
1164
+
1165
+ return TokenClassifierOutput(
1166
+ loss=loss,
1167
+ logits=logits,
1168
+ hidden_states=transformer_outputs.hidden_states,
1169
+ attentions=transformer_outputs.attentions,
1170
+ )
1171
+
1172
+
1173
+
1174
+ class xTrimoPGLMClassificationHead(nn.Module):
1175
+ """Head for classification tasks."""
1176
+ def __init__(self, config):
1177
+ super().__init__()
1178
+ self.activation_func = config.activation_func
1179
+ self.layers = torch.nn.ModuleList()
1180
+ last_size = config.hidden_size
1181
+ for sz in config.inter_hidden_size:
1182
+ this_layer = torch.nn.Linear(last_size, sz, bias=config.bias)
1183
+ last_size = sz
1184
+ self.layers.append(this_layer)
1185
+
1186
+ def forward(self,
1187
+ input_features,
1188
+ add_pooling: Optional[bool] = True
1189
+ ):
1190
+ # [s, b, h] -> [b, s ,h]
1191
+ input_features = input_features.transpose(0,1).contiguous()
1192
+ if add_pooling:
1193
+ # [b, h]
1194
+ input_features = torch.mean(input_features, dim = 1)
1195
+ for i, layer in enumerate(self.layers):
1196
+ if i > 0:
1197
+ input_features = self.activation_func(input_features)
1198
+ input_features = layer(input_features)
1199
+ return input_features
1200
+
1201
+ class xTrimoPGLMContactHead(nn.Module):
1202
+ """Head for sentence-level classification tasks."""
1203
+ def __init__(self, config):
1204
+ super().__init__()
1205
+ self.activation_func = config.activation_func
1206
+ self.layers = torch.nn.ModuleList()
1207
+ last_size = config.hidden_size * 2
1208
+ for sz in config.inter_hidden_size:
1209
+ this_layer = torch.nn.Linear(last_size, sz, bias=config.bias)
1210
+ last_size = sz
1211
+ self.layers.append(this_layer)
1212
+
1213
+ def outer_concat(self, x):
1214
+ batch_size, seq_len, features = x.shape
1215
+
1216
+ # Permute to [batch_size, features, seq_len]
1217
+ x = x.permute(0, 2, 1)
1218
+
1219
+ # Introduce new dimensions for broadcasting
1220
+ x_1 = x[:, None, :, :, None] # [batch_size, 1, features, seq_len, 1]
1221
+ x_2 = x[:, None, :, None, :] # [batch_size, 1, features, 1, seq_len]
1222
+
1223
+ # Repeat along new dimensions
1224
+ x_1 = x_1.repeat(1, 1, 1, 1, seq_len) # [batch_size, 1, features, seq_len, seq_len]
1225
+ x_2 = x_2.repeat(1, 1, 1, seq_len, 1) # [batch_size, 1, features, seq_len, seq_len]
1226
+
1227
+ # Concatenate along the second dimension
1228
+ x = torch.cat((x_1, x_2), dim=1) # [batch_size, 2, features, seq_len, seq_len]
1229
+
1230
+ # Get lower triangular indices
1231
+ I, J = torch.tril_indices(seq_len, seq_len, -1)
1232
+
1233
+ # Symmetrize
1234
+ x[:, :, :, I, J] = x[:, :, :, J, I]
1235
+
1236
+ # Permute to desired shape and make contiguous
1237
+ x = x.permute(0, 3, 4, 2, 1).contiguous() # [batch_size, seq_len, seq_len, features, 2]
1238
+
1239
+ # Reshape to combine the last two dimensions
1240
+ x = x.view(batch_size, seq_len, seq_len, features * 2) # [batch_size, seq_len, seq_len, features * 2]
1241
+
1242
+ return x
1243
+
1244
+ def forward(self,
1245
+ input_features,
1246
+ add_pooling: Optional[bool] = True
1247
+ ):
1248
+ # [s, b, h] -> [b, s ,h]
1249
+ input_features = input_features.transpose(0,1).contiguous()
1250
+ input_features = self.outer_concat(input_features)
1251
+ for i, layer in enumerate(self.layers):
1252
+ if i > 0:
1253
+ input_features = self.activation_func(input_features)
1254
+ input_features = layer(input_features)
1255
+ return input_features
1256
+
1257
+
1258
+
1259
+
1260
+
1261
+ class xTrimoPGLMForCasualLM(xTrimoPGLMPreTrainedModel):
1262
+ def __init__(self, config: xTrimoPGLMConfig, empty_init=True, device=None):
1263
+ super().__init__(config)
1264
+
1265
+ self.max_sequence_length = config.max_length
1266
+ self.transformer = xTrimoPGLMModel(config, empty_init=empty_init, device=device)
1267
+ self.config = config
1268
+ if self.config.quantization_bit:
1269
+ print(f"Begin Quantization to {self.config.quantization_bit} bit")
1270
+ self.quantize(self.config.quantization_bit, empty_init=True, device=device)
1271
+
1272
+ def _update_model_kwargs_for_generation(
1273
+ self,
1274
+ outputs: ModelOutput,
1275
+ model_kwargs: Dict[str, Any],
1276
+ is_encoder_decoder: bool = False,
1277
+ standardize_cache_format: bool = False,
1278
+ ) -> Dict[str, Any]:
1279
+ # update past_key_values
1280
+ model_kwargs["past_key_values"] = self._extract_past_from_model_output(
1281
+ outputs, standardize_cache_format=standardize_cache_format
1282
+ )
1283
+
1284
+ # update attention mask
1285
+ if "attention_mask" in model_kwargs:
1286
+ attention_mask = model_kwargs["attention_mask"]
1287
+ model_kwargs["attention_mask"] = torch.cat(
1288
+ [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
1289
+ )
1290
+
1291
+ # update position ids
1292
+ if "position_ids" in model_kwargs:
1293
+ position_ids = model_kwargs["position_ids"]
1294
+ new_position_id = position_ids[..., -1:].clone() # [batch_size, 2, 1]
1295
+ if self.config.rotary_embedding_2d:
1296
+ new_position_id[:, 1] += 1 # Only update the 2nd dimension
1297
+ else:
1298
+ new_position_id[:] += 1
1299
+ model_kwargs["position_ids"] = torch.cat(
1300
+ [position_ids, new_position_id], dim=-1
1301
+ ) # [batch_size, 2, seq_len+1]
1302
+
1303
+ model_kwargs["is_first_forward"] = False
1304
+ return model_kwargs
1305
+
1306
+ def prepare_inputs_for_generation(
1307
+ self,
1308
+ input_ids: torch.LongTensor,
1309
+ past_key_values: Optional[torch.Tensor] = None,
1310
+ attention_mask: Optional[torch.Tensor] = None,
1311
+ position_ids: Optional[torch.Tensor] = None,
1312
+ use_cache: Optional[bool] = None,
1313
+ is_first_forward: bool = True,
1314
+ **kwargs
1315
+ ) -> dict:
1316
+ # only last token for input_ids if past is not None
1317
+ if position_ids is None:
1318
+ position_ids = self.get_position_ids(input_ids, device=input_ids.device) # position_ids: [batch_size, 2, seq_len]
1319
+ if not is_first_forward:
1320
+ if past_key_values is not None:
1321
+ position_ids = position_ids[..., -1:]
1322
+ input_ids = input_ids[:, -1:]
1323
+ return {
1324
+ "input_ids": input_ids,
1325
+ "past_key_values": past_key_values,
1326
+ "position_ids": position_ids,
1327
+ "attention_mask": attention_mask,
1328
+ "return_last_logit": True,
1329
+ "use_cache": use_cache
1330
+ }
1331
+
1332
+ def forward(
1333
+ self,
1334
+ input_ids: Optional[torch.Tensor] = None,
1335
+ position_ids: Optional[torch.Tensor] = None,
1336
+ attention_mask: Optional[torch.Tensor] = None,
1337
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1338
+ inputs_embeds: Optional[torch.Tensor] = None,
1339
+ labels: Optional[torch.Tensor] = None,
1340
+ use_cache: Optional[bool] = None,
1341
+ output_attentions: Optional[bool] = None,
1342
+ output_hidden_states: Optional[bool] = None,
1343
+ return_dict: Optional[bool] = None,
1344
+ return_last_logit: Optional[bool] = False
1345
+ ):
1346
+ if self.config.is_causal:
1347
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1348
+ else:
1349
+ use_cache = False
1350
+
1351
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1352
+
1353
+ if position_ids is None:
1354
+ position_ids = self.get_position_ids(input_ids, device=input_ids.device)
1355
+
1356
+ transformer_outputs = self.transformer(
1357
+ input_ids=input_ids,
1358
+ position_ids=position_ids, # position_ids: [batch_size, 2, seq_len]
1359
+ attention_mask=attention_mask,
1360
+ past_key_values=past_key_values,
1361
+ inputs_embeds=inputs_embeds,
1362
+ use_cache=use_cache,
1363
+ output_hidden_states=output_hidden_states,
1364
+ return_dict=return_dict
1365
+ )
1366
+ hidden_states = transformer_outputs[0]
1367
+ if return_last_logit:
1368
+ hidden_states = hidden_states[-1:]
1369
+ lm_logits = self.transformer.output_layer(hidden_states)
1370
+ lm_logits = lm_logits.transpose(0, 1).contiguous()
1371
+
1372
+ loss = None
1373
+ if labels is not None:
1374
+ lm_logits = lm_logits.to(torch.float32)
1375
+
1376
+ # Shift so that tokens < n predict n
1377
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1378
+ shift_labels = labels[..., 1:].contiguous()
1379
+ # Flatten the tokens
1380
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
1381
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1382
+
1383
+ lm_logits = lm_logits.to(hidden_states.dtype)
1384
+ loss = loss.to(hidden_states.dtype)
1385
+
1386
+ if not return_dict:
1387
+ output = (lm_logits,) + transformer_outputs[1:]
1388
+ return ((loss,) + output) if loss is not None else output
1389
+
1390
+ return CausalLMOutputWithPast(
1391
+ loss=loss,
1392
+ logits=lm_logits,
1393
+ past_key_values=transformer_outputs.past_key_values,
1394
+ hidden_states=transformer_outputs.hidden_states,
1395
+ attentions=transformer_outputs.attentions,
1396
+ )
1397
+
1398
+ @staticmethod
1399
+ def _reorder_cache(
1400
+ past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
1401
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
1402
+ """
1403
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
1404
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1405
+ beam_idx at every generation step.
1406
+
1407
+ Output shares the same memory storage as `past`.
1408
+ """
1409
+ return tuple(
1410
+ (
1411
+ layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)),
1412
+ layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)),
1413
+ )
1414
+ for layer_past in past
1415
+ )
1416
+
1417
+ @torch.inference_mode()
1418
+ def chat(self, tokenizer, query: str, max_length: int = 256, num_beams=1, do_sample=True,
1419
+ top_p=1.0, temperature=1.0, logits_processor=None, **kwargs):
1420
+ if logits_processor is None:
1421
+ logits_processor = LogitsProcessorList()
1422
+ logits_processor.append(InvalidScoreLogitsProcessor())
1423
+ gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
1424
+ "temperature": temperature, "logits_processor": logits_processor, **kwargs}
1425
+ inputs = tokenizer.apply_chat_template(query, add_generation_prompt=True, tokenize=True,
1426
+ return_tensors="pt", return_dict=True)
1427
+ position_ids = self.get_position_ids(inputs['input_ids'], device=self.device) # TODO: ADD BATCH
1428
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<eop>")]
1429
+ inputs["position_ids"] = position_ids
1430
+ inputs = inputs.to(self.device)
1431
+ outputs = self.generate(**inputs, **gen_kwargs, eos_token_id=eos_token_id)
1432
+ outputs = outputs.tolist()[0][3:] # 3 for generation prompt "<gmask><sop><eos>"
1433
+ if outputs[-1] in eos_token_id:
1434
+ outputs = outputs[:-1]
1435
+ response = tokenizer.decode(outputs)
1436
+ return response
1437
+
1438
+ # TODO: fix bug in streaming chat
1439
+ @torch.inference_mode()
1440
+ def stream_chat(self, tokenizer, query: str, max_length: int = 56, num_beams=1, do_sample=True,
1441
+ top_p=0.8, temperature=0.8, logits_processor=None, past_key_values = None, **kwargs):
1442
+ if logits_processor is None:
1443
+ logits_processor = LogitsProcessorList()
1444
+ logits_processor.append(InvalidScoreLogitsProcessor())
1445
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<eop>")]
1446
+ gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p,
1447
+ "temperature": temperature, "logits_processor": logits_processor, **kwargs}
1448
+ inputs = tokenizer.apply_chat_template(query, add_generation_prompt=True, tokenize=True,
1449
+ return_tensors="pt", return_dict=True)
1450
+ position_ids = self.get_position_ids(inputs['input_ids'], device=self.device) # TODO: ADD BATCH
1451
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<eop>")]
1452
+ inputs["position_ids"] = position_ids
1453
+ inputs = inputs.to(self.device)
1454
+ offset = 3 # 3 for generation prompt
1455
+ for outputs in self.stream_generate(**inputs, past_key_values=past_key_values,
1456
+ eos_token_id=eos_token_id, return_past_key_values=False,
1457
+ **gen_kwargs):
1458
+ outputs = outputs.tolist()[0][3:]
1459
+ if outputs[-1] in eos_token_id:
1460
+ outputs = outputs[:-1]
1461
+ # offset = 3 + len(outputs)
1462
+ response = tokenizer.decode(outputs)
1463
+ if response:
1464
+ yield response
1465
+
1466
+ @torch.inference_mode()
1467
+ def stream_generate(
1468
+ self,
1469
+ input_ids,
1470
+ generation_config: Optional[GenerationConfig] = None,
1471
+ logits_processor: Optional[LogitsProcessorList] = None,
1472
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
1473
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
1474
+ return_past_key_values=False,
1475
+ **kwargs,
1476
+ ):
1477
+ breakpoint()
1478
+ batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
1479
+
1480
+ if generation_config is None:
1481
+ generation_config = self.generation_config
1482
+ generation_config = copy.deepcopy(generation_config)
1483
+ model_kwargs = generation_config.update(**kwargs)
1484
+ model_kwargs["use_cache"] = generation_config.use_cache
1485
+ bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id
1486
+
1487
+ if isinstance(eos_token_id, int):
1488
+ eos_token_id = [eos_token_id]
1489
+ eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None
1490
+
1491
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
1492
+ if has_default_max_length and generation_config.max_new_tokens is None:
1493
+ warnings.warn(
1494
+ f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
1495
+ "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
1496
+ " recommend using `max_new_tokens` to control the maximum length of the generation.",
1497
+ UserWarning,
1498
+ )
1499
+ elif generation_config.max_new_tokens is not None:
1500
+ generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
1501
+ if not has_default_max_length:
1502
+ logger.warn(
1503
+ f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
1504
+ f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
1505
+ "Please refer to the documentation for more information. "
1506
+ "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
1507
+ UserWarning,
1508
+ )
1509
+
1510
+ if input_ids_seq_length >= generation_config.max_length:
1511
+ input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
1512
+ logger.warning(
1513
+ f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
1514
+ f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
1515
+ " increasing `max_new_tokens`."
1516
+ )
1517
+
1518
+ # 2. Set generation parameters if not already defined
1519
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
1520
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
1521
+
1522
+ logits_processor = self._get_logits_processor(
1523
+ generation_config=generation_config,
1524
+ input_ids_seq_length=input_ids_seq_length,
1525
+ encoder_input_ids=input_ids,
1526
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1527
+ logits_processor=logits_processor,
1528
+ )
1529
+
1530
+ stopping_criteria = self._get_stopping_criteria(
1531
+ generation_config=generation_config, stopping_criteria=stopping_criteria
1532
+ )
1533
+ logits_warper = self._get_logits_warper(generation_config)
1534
+
1535
+ unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
1536
+ scores = None
1537
+ while True:
1538
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
1539
+ # forward pass to get next token
1540
+ outputs = self(
1541
+ **model_inputs,
1542
+ return_dict=True,
1543
+ output_attentions=False,
1544
+ output_hidden_states=False,
1545
+ )
1546
+
1547
+ next_token_logits = outputs.logits[:, -1, :]
1548
+
1549
+ # pre-process distribution
1550
+ next_token_scores = logits_processor(input_ids, next_token_logits)
1551
+ next_token_scores = logits_warper(input_ids, next_token_scores)
1552
+
1553
+ # sample
1554
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
1555
+ if generation_config.do_sample:
1556
+ next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
1557
+ else:
1558
+ next_tokens = torch.argmax(probs, dim=-1)
1559
+ # update generated ids, model inputs, and length for next step
1560
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
1561
+ model_kwargs = self._update_model_kwargs_for_generation(
1562
+ outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
1563
+ )
1564
+ unfinished_sequences = unfinished_sequences.mul(
1565
+ next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)
1566
+ )
1567
+ if return_past_key_values:
1568
+ yield input_ids, outputs.past_key_values
1569
+ else:
1570
+ yield input_ids
1571
+ # stop when each sentence is finished, or if we exceed the maximum length
1572
+ if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
1573
+ break
quantization.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.nn import Linear
2
+ from torch.nn.parameter import Parameter
3
+
4
+ import bz2
5
+ import torch
6
+ import base64
7
+ import ctypes
8
+ from transformers.utils import logging
9
+
10
+ from typing import List
11
+ from functools import partial
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+ try:
16
+ from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up
17
+
18
+ class Kernel:
19
+ def __init__(self, code: bytes, function_names: List[str]):
20
+ self.code = code
21
+ self._function_names = function_names
22
+ self._cmodule = LazyKernelCModule(self.code)
23
+
24
+ for name in self._function_names:
25
+ setattr(self, name, KernelFunction(self._cmodule, name))
26
+
27
+ quantization_code = "$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ"
28
+
29
+ kernels = Kernel(
30
+ bz2.decompress(base64.b64decode(quantization_code)),
31
+ [
32
+ "int4WeightCompression",
33
+ "int4WeightExtractionFloat",
34
+ "int4WeightExtractionHalf",
35
+ "int8WeightExtractionFloat",
36
+ "int8WeightExtractionHalf",
37
+ ],
38
+ )
39
+ except Exception as exception:
40
+ kernels = None
41
+ logger.warning("Failed to load cpm_kernels:" + str(exception))
42
+
43
+
44
+ class W8A16Linear(torch.autograd.Function):
45
+ @staticmethod
46
+ def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width):
47
+ ctx.inp_shape = inp.size()
48
+ ctx.weight_bit_width = weight_bit_width
49
+ out_features = quant_w.size(0)
50
+ inp = inp.contiguous().view(-1, inp.size(-1))
51
+ weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width)
52
+ ctx.weight_shape = weight.size()
53
+ output = inp.mm(weight.t())
54
+ ctx.save_for_backward(inp, quant_w, scale_w)
55
+ return output.view(*(ctx.inp_shape[:-1] + (out_features,)))
56
+
57
+ @staticmethod
58
+ def backward(ctx, grad_output: torch.Tensor):
59
+ inp, quant_w, scale_w = ctx.saved_tensors
60
+ weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width)
61
+ grad_output = grad_output.contiguous().view(-1, weight.size(0))
62
+ grad_input = grad_output.mm(weight)
63
+ grad_weight = grad_output.t().mm(inp)
64
+ return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None
65
+
66
+
67
+ def compress_int4_weight(weight: torch.Tensor): # (n, m)
68
+ with torch.cuda.device(weight.device):
69
+ n, m = weight.size(0), weight.size(1)
70
+ assert m % 2 == 0
71
+ m = m // 2
72
+ out = torch.empty(n, m, dtype=torch.int8, device="cuda")
73
+ stream = torch.cuda.current_stream()
74
+
75
+ gridDim = (n, 1, 1)
76
+ blockDim = (min(round_up(m, 32), 1024), 1, 1)
77
+
78
+ kernels.int4WeightCompression(
79
+ gridDim,
80
+ blockDim,
81
+ 0,
82
+ stream,
83
+ [ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)],
84
+ )
85
+ return out
86
+
87
+
88
+ def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int):
89
+ assert scale_list.dtype in [torch.half, torch.bfloat16]
90
+ assert weight.dtype in [torch.int8]
91
+ if source_bit_width == 8:
92
+ return weight.to(scale_list.dtype) * scale_list[:, None]
93
+ elif source_bit_width == 4:
94
+ func = (
95
+ kernels.int4WeightExtractionHalf if scale_list.dtype == torch.half else kernels.int4WeightExtractionBFloat16
96
+ )
97
+ else:
98
+ assert False, "Unsupported bit-width"
99
+
100
+ with torch.cuda.device(weight.device):
101
+ n, m = weight.size(0), weight.size(1)
102
+ out = torch.empty(n, m * (8 // source_bit_width), dtype=scale_list.dtype, device="cuda")
103
+ stream = torch.cuda.current_stream()
104
+
105
+ gridDim = (n, 1, 1)
106
+ blockDim = (min(round_up(m, 32), 1024), 1, 1)
107
+
108
+ func(
109
+ gridDim,
110
+ blockDim,
111
+ 0,
112
+ stream,
113
+ [
114
+ ctypes.c_void_p(weight.data_ptr()),
115
+ ctypes.c_void_p(scale_list.data_ptr()),
116
+ ctypes.c_void_p(out.data_ptr()),
117
+ ctypes.c_int32(n),
118
+ ctypes.c_int32(m),
119
+ ],
120
+ )
121
+ return out
122
+
123
+
124
+ class QuantizedLinear(torch.nn.Module):
125
+ def __init__(self, weight_bit_width: int, weight, bias=None, device="cpu", dtype=None, empty_init=False, *args,
126
+ **kwargs):
127
+ super().__init__()
128
+ self.weight_bit_width = weight_bit_width
129
+
130
+ shape = weight.shape
131
+
132
+ if weight is None or empty_init:
133
+ self.weight = torch.empty(shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=device)
134
+ self.weight_scale = torch.empty(shape[0], dtype=dtype, device=device)
135
+ else:
136
+ self.weight_scale = weight.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)
137
+ self.weight = torch.round(weight / self.weight_scale[:, None]).to(torch.int8)
138
+ if weight_bit_width == 4:
139
+ self.weight = compress_int4_weight(self.weight)
140
+
141
+ self.weight = Parameter(self.weight.to(device), requires_grad=False)
142
+ self.weight_scale = Parameter(self.weight_scale.to(device), requires_grad=False)
143
+ self.bias = Parameter(bias.to(device), requires_grad=False) if bias is not None else None
144
+
145
+ def forward(self, input):
146
+ output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width)
147
+ if self.bias is not None:
148
+ output = output + self.bias
149
+ return output
150
+
151
+
152
+ def quantize(model, weight_bit_width, empty_init=False, device=None):
153
+ """Replace fp16 linear with quantized linear"""
154
+ for layer in model.layers:
155
+ layer.self_attention.query_key_value = QuantizedLinear(
156
+ weight_bit_width=weight_bit_width,
157
+ weight=layer.self_attention.query_key_value.weight.to(torch.cuda.current_device()),
158
+ bias=layer.self_attention.query_key_value.bias,
159
+ dtype=layer.self_attention.query_key_value.weight.dtype,
160
+ device=layer.self_attention.query_key_value.weight.device if device is None else device,
161
+ empty_init=empty_init
162
+ )
163
+ layer.self_attention.dense = QuantizedLinear(
164
+ weight_bit_width=weight_bit_width,
165
+ weight=layer.self_attention.dense.weight.to(torch.cuda.current_device()),
166
+ bias=layer.self_attention.dense.bias,
167
+ dtype=layer.self_attention.dense.weight.dtype,
168
+ device=layer.self_attention.dense.weight.device if device is None else device,
169
+ empty_init=empty_init
170
+ )
171
+ layer.mlp.dense_h_to_4h = QuantizedLinear(
172
+ weight_bit_width=weight_bit_width,
173
+ weight=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()),
174
+ bias=layer.mlp.dense_h_to_4h.bias,
175
+ dtype=layer.mlp.dense_h_to_4h.weight.dtype,
176
+ device=layer.mlp.dense_h_to_4h.weight.device if device is None else device,
177
+ empty_init=empty_init
178
+ )
179
+ layer.mlp.dense_4h_to_h = QuantizedLinear(
180
+ weight_bit_width=weight_bit_width,
181
+ weight=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()),
182
+ bias=layer.mlp.dense_4h_to_h.bias,
183
+ dtype=layer.mlp.dense_4h_to_h.weight.dtype,
184
+ device=layer.mlp.dense_4h_to_h.weight.device if device is None else device,
185
+ empty_init=empty_init
186
+ )
187
+
188
+ return model
special_tokens_map.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<pad>",
4
+ "<mask>",
5
+ "<gmask>",
6
+ "<smask>",
7
+ "<eod>",
8
+ "<sop>",
9
+ "<eop>",
10
+ "<eos>",
11
+ "<unk>"
12
+ ],
13
+ "eos_token": "<eos>",
14
+ "mask_token": "<mask>",
15
+ "pad_token": "<pad>",
16
+ "unk_token": "<unk>"
17
+ }
tokenization_xtrimopglm.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tokenization classes for xTrimoPGLM."""
2
+
3
+ import os
4
+ from typing import List, Optional, Union, Dict, Any
5
+ from torch import TensorType
6
+ from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast
7
+ from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
8
+
9
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
10
+
11
+
12
+ def load_vocab_file(vocab_file: str) -> List[str]:
13
+ with open(vocab_file, "r") as f:
14
+ lines = f.read().splitlines()
15
+ return [line.strip() for line in lines]
16
+
17
+
18
+ class xTrimoPGLMTokenizer(PreTrainedTokenizer):
19
+ """
20
+ Constructs a xTrimoPGLM tokenizer.
21
+ """
22
+
23
+ vocab_files_names = VOCAB_FILES_NAMES
24
+ model_input_names = ["input_ids", "attention_mask", "position_ids"]
25
+ def __init__(
26
+ self,
27
+ vocab_file: str,
28
+ unk_token: str = "<unk>",
29
+ pad_token: str = "<pad>",
30
+ mask_token: str = "<mask>",
31
+ eos_token: str = "<eos>",
32
+ model_max_length: int = 2048,
33
+ additional_special_tokens: Optional[List[str]] = None,
34
+ **kwargs,
35
+ ):
36
+ self.all_tokens = load_vocab_file(vocab_file)
37
+ self._id_to_token = dict(enumerate(self.all_tokens))
38
+ self._token_to_id = {tok: ind for ind, tok in enumerate(self.all_tokens)}
39
+
40
+ if additional_special_tokens is None:
41
+ additional_special_tokens = ['<pad>', '<mask>', '<gmask>', '<smask>', '<eod>', '<sop>', '<eop>', '<eos>', '<unk>']
42
+
43
+ super().__init__(
44
+ unk_token=unk_token,
45
+ pad_token=pad_token,
46
+ mask_token=mask_token,
47
+ eos_token=eos_token,
48
+ model_max_length=model_max_length,
49
+ additional_special_tokens=additional_special_tokens,
50
+ **kwargs,
51
+ )
52
+
53
+ self.unique_no_split_tokens = self.all_tokens
54
+ self._update_trie(self.unique_no_split_tokens)
55
+
56
+ def _convert_id_to_token(self, index: int) -> str:
57
+ return self._id_to_token.get(index, self.unk_token)
58
+
59
+ def _convert_token_to_id(self, token: str) -> int:
60
+ return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
61
+
62
+ def _tokenize(self, text: str, **kwargs) -> List[str]:
63
+ return text.split()
64
+
65
+ def get_vocab(self) -> dict:
66
+ base_vocab = self._token_to_id.copy()
67
+ base_vocab.update(self.added_tokens_encoder)
68
+ return base_vocab
69
+
70
+ def token_to_id(self, token: str) -> int:
71
+ return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
72
+
73
+ def id_to_token(self, index: int) -> str:
74
+ return self._id_to_token.get(index, self.unk_token)
75
+
76
+ def build_inputs_with_special_tokens(
77
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
78
+ ) -> List[int]:
79
+ sep = [self.eos_token_id]
80
+ if token_ids_1 is None:
81
+ if self.eos_token_id is None:
82
+ return token_ids_0
83
+ else:
84
+ return token_ids_0 + sep
85
+ elif self.eos_token_id is None:
86
+ raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!")
87
+ return token_ids_0 + sep + token_ids_1 + sep # Multiple inputs always have an EOS token
88
+
89
+
90
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
91
+ vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "tokenizer.model")
92
+ with open(vocab_file, "w") as f:
93
+ f.write("\n".join(self.all_tokens))
94
+ return (vocab_file,)
95
+
96
+ @property
97
+ def vocab_size(self) -> int:
98
+ return len(self.all_tokens)
99
+
100
+ def apply_chat_template(
101
+ self,
102
+ query,
103
+ add_generation_prompt: bool = True,
104
+ tokenize: bool = True,
105
+ padding: bool = False,
106
+ truncation: bool = False,
107
+ max_length: Optional[int] = None,
108
+ return_tensors: Optional[Union[str, TensorType]] = None,
109
+ return_dict: bool = False,
110
+ tokenizer_kwargs: Optional[Dict[str, Any]] = None,
111
+ add_special_tokens: bool = True,
112
+ **kwargs,
113
+ ) -> Union[str, List[int], List[str], List[List[int]], BatchEncoding]:
114
+
115
+ generation_prompt = "<gmask><sop><eos>"
116
+ if isinstance(query, str):
117
+ query = [query]
118
+ prompt_query = []
119
+ if add_generation_prompt:
120
+ for each in query:
121
+ assert isinstance(each, str)
122
+ prompt_query.append(generation_prompt+each)
123
+ else:
124
+ prompt_query = query
125
+ if tokenize:
126
+ output = self.batch_encode_plus(
127
+ prompt_query,
128
+ padding=padding,
129
+ truncation=truncation,
130
+ max_length=max_length,
131
+ return_tensors=return_tensors,
132
+ is_split_into_words=True,
133
+ add_special_tokens=False
134
+ )
135
+ if return_dict:
136
+ return output
137
+ else:
138
+ return output["input_ids"]
139
+ else:
140
+ return prompt_query
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bd2746ab3ae26b1ae1c4246e0d35f895c86c9685fcd85f8aa89a9a08e534da0
3
+ size 112
tokenizer_config.json ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<pad>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "28": {
12
+ "content": "<mask>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "29": {
20
+ "content": "<gmask>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "30": {
28
+ "content": "<smask>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "31": {
36
+ "content": "<eod>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "32": {
44
+ "content": "<sop>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "33": {
52
+ "content": "<eop>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "34": {
60
+ "content": "<eos>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "35": {
68
+ "content": "<unk>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ }
75
+ },
76
+ "additional_special_tokens": [
77
+ "<pad>",
78
+ "<mask>",
79
+ "<gmask>",
80
+ "<smask>",
81
+ "<eod>",
82
+ "<sop>",
83
+ "<eop>",
84
+ "<eos>",
85
+ "<unk>"
86
+ ],
87
+ "auto_map": {
88
+ "AutoTokenizer": [
89
+ "tokenization_xtrimopglm.xTrimoPGLMTokenizer",
90
+ null
91
+ ]
92
+ },
93
+ "clean_up_tokenization_spaces": true,
94
+ "eos_token": "<eos>",
95
+ "mask_token": "<mask>",
96
+ "model_max_length": 2048,
97
+ "pad_token": "<pad>",
98
+ "tokenizer_class": "xTrimoPGLMTokenizer",
99
+ "unk_token": "<unk>"
100
+ }