Pclanglais
commited on
Commit
•
1668dbc
1
Parent(s):
0321ff3
Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- added_tokens.json +3 -0
- config.json +110 -0
- model.safetensors +3 -0
- optimizer.pt +3 -0
- rng_state.pth +3 -0
- scheduler.pt +3 -0
- special_tokens_map.json +15 -0
- spm.model +3 -0
- tokenizer.json +3 -0
- tokenizer_config.json +58 -0
- trainer_state.json +124 -0
- training_args.bin +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
added_tokens.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"[MASK]": 250101
|
3 |
+
}
|
config.json
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/gpfsdswork/projects/rech/fmr/uft12cr/classification/deberta-large",
|
3 |
+
"architectures": [
|
4 |
+
"DebertaV2ForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"hidden_act": "gelu",
|
8 |
+
"hidden_dropout_prob": 0.1,
|
9 |
+
"hidden_size": 768,
|
10 |
+
"id2label": {
|
11 |
+
"0": "Intention_Informer",
|
12 |
+
"1": "Intention_\u00c9duquer",
|
13 |
+
"2": "Intention_Sensibiliser",
|
14 |
+
"3": "Intention_Mobiliser",
|
15 |
+
"4": "Intention_Promouvoir",
|
16 |
+
"5": "Intention_Divertir",
|
17 |
+
"6": "Theme_Sant\u00e9",
|
18 |
+
"7": "Theme_Soci\u00e9t\u00e9",
|
19 |
+
"8": "Theme_\u00c9conomie",
|
20 |
+
"9": "Theme_Politique",
|
21 |
+
"10": "Theme_Pub",
|
22 |
+
"11": "Theme_Sports",
|
23 |
+
"12": "Theme_Environnement",
|
24 |
+
"13": "Theme_Culture",
|
25 |
+
"14": "Theme_je",
|
26 |
+
"15": "Expression_Publicite",
|
27 |
+
"16": "Expression_Informations",
|
28 |
+
"17": "Expression_Reportage/Enquete",
|
29 |
+
"18": "Expression_Interview/Discussion",
|
30 |
+
"19": "Expression_Meteo",
|
31 |
+
"20": "Tonalite_Informative",
|
32 |
+
"21": "Tonalite_Emotionnelle",
|
33 |
+
"22": "Tonalite_Publicitaire",
|
34 |
+
"23": "Tonalite_Analytique",
|
35 |
+
"24": "Tonalite_Satirique",
|
36 |
+
"25": "Emotion_Neutre",
|
37 |
+
"26": "Emotion_Alarmant",
|
38 |
+
"27": "Emotion_Indign\u00e9",
|
39 |
+
"28": "Emotion_Persuasif",
|
40 |
+
"29": "Emotion_Solennel",
|
41 |
+
"30": "Emotion_Optimiste",
|
42 |
+
"31": "Emotion_Ironique",
|
43 |
+
"32": "Figure_Metaphore",
|
44 |
+
"33": "Figure_Paradoxe",
|
45 |
+
"34": "Figure_Hyperbole"
|
46 |
+
},
|
47 |
+
"initializer_range": 0.02,
|
48 |
+
"intermediate_size": 3072,
|
49 |
+
"label2id": {
|
50 |
+
"Emotion_Alarmant": 26,
|
51 |
+
"Emotion_Indign\u00e9": 27,
|
52 |
+
"Emotion_Ironique": 31,
|
53 |
+
"Emotion_Neutre": 25,
|
54 |
+
"Emotion_Optimiste": 30,
|
55 |
+
"Emotion_Persuasif": 28,
|
56 |
+
"Emotion_Solennel": 29,
|
57 |
+
"Expression_Informations": 16,
|
58 |
+
"Expression_Interview/Discussion": 18,
|
59 |
+
"Expression_Meteo": 19,
|
60 |
+
"Expression_Publicite": 15,
|
61 |
+
"Expression_Reportage/Enquete": 17,
|
62 |
+
"Figure_Hyperbole": 34,
|
63 |
+
"Figure_Metaphore": 32,
|
64 |
+
"Figure_Paradoxe": 33,
|
65 |
+
"Intention_Divertir": 5,
|
66 |
+
"Intention_Informer": 0,
|
67 |
+
"Intention_Mobiliser": 3,
|
68 |
+
"Intention_Promouvoir": 4,
|
69 |
+
"Intention_Sensibiliser": 2,
|
70 |
+
"Intention_\u00c9duquer": 1,
|
71 |
+
"Theme_Culture": 13,
|
72 |
+
"Theme_Environnement": 12,
|
73 |
+
"Theme_Politique": 9,
|
74 |
+
"Theme_Pub": 10,
|
75 |
+
"Theme_Sant\u00e9": 6,
|
76 |
+
"Theme_Soci\u00e9t\u00e9": 7,
|
77 |
+
"Theme_Sports": 11,
|
78 |
+
"Theme_je": 14,
|
79 |
+
"Theme_\u00c9conomie": 8,
|
80 |
+
"Tonalite_Analytique": 23,
|
81 |
+
"Tonalite_Emotionnelle": 21,
|
82 |
+
"Tonalite_Informative": 20,
|
83 |
+
"Tonalite_Publicitaire": 22,
|
84 |
+
"Tonalite_Satirique": 24
|
85 |
+
},
|
86 |
+
"layer_norm_eps": 1e-07,
|
87 |
+
"max_position_embeddings": 512,
|
88 |
+
"max_relative_positions": -1,
|
89 |
+
"model_type": "deberta-v2",
|
90 |
+
"norm_rel_ebd": "layer_norm",
|
91 |
+
"num_attention_heads": 12,
|
92 |
+
"num_hidden_layers": 12,
|
93 |
+
"pad_token_id": 0,
|
94 |
+
"pooler_dropout": 0,
|
95 |
+
"pooler_hidden_act": "gelu",
|
96 |
+
"pooler_hidden_size": 768,
|
97 |
+
"pos_att_type": [
|
98 |
+
"p2c",
|
99 |
+
"c2p"
|
100 |
+
],
|
101 |
+
"position_biased_input": false,
|
102 |
+
"position_buckets": 256,
|
103 |
+
"problem_type": "multi_label_classification",
|
104 |
+
"relative_attention": true,
|
105 |
+
"share_att_key": true,
|
106 |
+
"torch_dtype": "float32",
|
107 |
+
"transformers_version": "4.38.0.dev0",
|
108 |
+
"type_vocab_size": 0,
|
109 |
+
"vocab_size": 251000
|
110 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c062326a3d397e2afad75b264aaef47a0923a24c2b66e3d7c43b69b6cc54ed46
|
3 |
+
size 1115369724
|
optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9fd917d9135af97af2e6beb9601a7271692252feb68f771cf700fc43e479eaa5
|
3 |
+
size 2230859002
|
rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0e2b27a523a9fd069d1154bb25df70d3244a9c0a81e1eff87a5bf621d7350745
|
3 |
+
size 14244
|
scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7df548493459caf2a0282cf8e370e77e5b78ca0b2c21d4560c164003ecd2189e
|
3 |
+
size 1064
|
special_tokens_map.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "[CLS]",
|
3 |
+
"cls_token": "[CLS]",
|
4 |
+
"eos_token": "[SEP]",
|
5 |
+
"mask_token": "[MASK]",
|
6 |
+
"pad_token": "[PAD]",
|
7 |
+
"sep_token": "[SEP]",
|
8 |
+
"unk_token": {
|
9 |
+
"content": "[UNK]",
|
10 |
+
"lstrip": false,
|
11 |
+
"normalized": true,
|
12 |
+
"rstrip": false,
|
13 |
+
"single_word": false
|
14 |
+
}
|
15 |
+
}
|
spm.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:13c8d666d62a7bc4ac8f040aab68e942c861f93303156cc28f5c7e885d86d6e3
|
3 |
+
size 4305025
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f42839ec04c3025630c31f069df13d750b5a3c969345d768ca8e0da9119600c7
|
3 |
+
size 16331661
|
tokenizer_config.json
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "[CLS]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "[SEP]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "[UNK]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": true,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"250101": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"bos_token": "[CLS]",
|
45 |
+
"clean_up_tokenization_spaces": true,
|
46 |
+
"cls_token": "[CLS]",
|
47 |
+
"do_lower_case": false,
|
48 |
+
"eos_token": "[SEP]",
|
49 |
+
"mask_token": "[MASK]",
|
50 |
+
"model_max_length": 1000000000000000019884624838656,
|
51 |
+
"pad_token": "[PAD]",
|
52 |
+
"sep_token": "[SEP]",
|
53 |
+
"sp_model_kwargs": {},
|
54 |
+
"split_by_punct": false,
|
55 |
+
"tokenizer_class": "DebertaV2Tokenizer",
|
56 |
+
"unk_token": "[UNK]",
|
57 |
+
"vocab_type": "spm"
|
58 |
+
}
|
trainer_state.json
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.7596717467760844,
|
3 |
+
"best_model_checkpoint": "deberta-classification-transcripts/checkpoint-4325",
|
4 |
+
"epoch": 5.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 4325,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.58,
|
13 |
+
"learning_rate": 8.84393063583815e-06,
|
14 |
+
"loss": 0.4483,
|
15 |
+
"step": 500
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"epoch": 1.0,
|
19 |
+
"eval_accuracy": 0.09210526315789473,
|
20 |
+
"eval_f1": 0.6887254901960784,
|
21 |
+
"eval_loss": 0.2805722951889038,
|
22 |
+
"eval_roc_auc": 0.7836829389177116,
|
23 |
+
"eval_runtime": 1.7776,
|
24 |
+
"eval_samples_per_second": 42.755,
|
25 |
+
"eval_steps_per_second": 42.755,
|
26 |
+
"step": 865
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"epoch": 1.16,
|
30 |
+
"learning_rate": 7.687861271676302e-06,
|
31 |
+
"loss": 0.339,
|
32 |
+
"step": 1000
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"epoch": 1.73,
|
36 |
+
"learning_rate": 6.531791907514451e-06,
|
37 |
+
"loss": 0.2866,
|
38 |
+
"step": 1500
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"epoch": 2.0,
|
42 |
+
"eval_accuracy": 0.10526315789473684,
|
43 |
+
"eval_f1": 0.7407407407407407,
|
44 |
+
"eval_loss": 0.22884871065616608,
|
45 |
+
"eval_roc_auc": 0.8162958199606408,
|
46 |
+
"eval_runtime": 1.7174,
|
47 |
+
"eval_samples_per_second": 44.252,
|
48 |
+
"eval_steps_per_second": 44.252,
|
49 |
+
"step": 1730
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"epoch": 2.31,
|
53 |
+
"learning_rate": 5.375722543352601e-06,
|
54 |
+
"loss": 0.265,
|
55 |
+
"step": 2000
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"epoch": 2.89,
|
59 |
+
"learning_rate": 4.219653179190752e-06,
|
60 |
+
"loss": 0.2368,
|
61 |
+
"step": 2500
|
62 |
+
},
|
63 |
+
{
|
64 |
+
"epoch": 3.0,
|
65 |
+
"eval_accuracy": 0.06578947368421052,
|
66 |
+
"eval_f1": 0.7450058754406581,
|
67 |
+
"eval_loss": 0.20490357279777527,
|
68 |
+
"eval_roc_auc": 0.8221279142285448,
|
69 |
+
"eval_runtime": 1.7181,
|
70 |
+
"eval_samples_per_second": 44.235,
|
71 |
+
"eval_steps_per_second": 44.235,
|
72 |
+
"step": 2595
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 3.47,
|
76 |
+
"learning_rate": 3.063583815028902e-06,
|
77 |
+
"loss": 0.2351,
|
78 |
+
"step": 3000
|
79 |
+
},
|
80 |
+
{
|
81 |
+
"epoch": 4.0,
|
82 |
+
"eval_accuracy": 0.09210526315789473,
|
83 |
+
"eval_f1": 0.7520759193357057,
|
84 |
+
"eval_loss": 0.19738489389419556,
|
85 |
+
"eval_roc_auc": 0.8239552326387778,
|
86 |
+
"eval_runtime": 1.7202,
|
87 |
+
"eval_samples_per_second": 44.18,
|
88 |
+
"eval_steps_per_second": 44.18,
|
89 |
+
"step": 3460
|
90 |
+
},
|
91 |
+
{
|
92 |
+
"epoch": 4.05,
|
93 |
+
"learning_rate": 1.907514450867052e-06,
|
94 |
+
"loss": 0.2232,
|
95 |
+
"step": 3500
|
96 |
+
},
|
97 |
+
{
|
98 |
+
"epoch": 4.62,
|
99 |
+
"learning_rate": 7.514450867052025e-07,
|
100 |
+
"loss": 0.2127,
|
101 |
+
"step": 4000
|
102 |
+
},
|
103 |
+
{
|
104 |
+
"epoch": 5.0,
|
105 |
+
"eval_accuracy": 0.10526315789473684,
|
106 |
+
"eval_f1": 0.7596717467760844,
|
107 |
+
"eval_loss": 0.19270123541355133,
|
108 |
+
"eval_roc_auc": 0.830700986111798,
|
109 |
+
"eval_runtime": 1.7179,
|
110 |
+
"eval_samples_per_second": 44.241,
|
111 |
+
"eval_steps_per_second": 44.241,
|
112 |
+
"step": 4325
|
113 |
+
}
|
114 |
+
],
|
115 |
+
"logging_steps": 500,
|
116 |
+
"max_steps": 4325,
|
117 |
+
"num_input_tokens_seen": 0,
|
118 |
+
"num_train_epochs": 5,
|
119 |
+
"save_steps": 500,
|
120 |
+
"total_flos": 1138312891315200.0,
|
121 |
+
"train_batch_size": 1,
|
122 |
+
"trial_name": null,
|
123 |
+
"trial_params": null
|
124 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:458c8d3b21adc6abf439f3039446992fc81ee61cc98a20d37234ca69deb52872
|
3 |
+
size 4792
|