alexandreacff commited on
Commit
194dcc8
1 Parent(s): 3ab0145

End of training

Browse files
README.md CHANGED
@@ -2,6 +2,7 @@
2
  license: apache-2.0
3
  base_model: facebook/wav2vec2-large
4
  tags:
 
5
  - generated_from_trainer
6
  metrics:
7
  - accuracy
@@ -15,10 +16,10 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # wav2vec2-large-ft-fake-detection
17
 
18
- This model is a fine-tuned version of [facebook/wav2vec2-large](https://huggingface.co/facebook/wav2vec2-large) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 0.7867
21
- - Accuracy: 0.6822
22
 
23
  ## Model description
24
 
 
2
  license: apache-2.0
3
  base_model: facebook/wav2vec2-large
4
  tags:
5
+ - audio-classification
6
  - generated_from_trainer
7
  metrics:
8
  - accuracy
 
16
 
17
  # wav2vec2-large-ft-fake-detection
18
 
19
+ This model is a fine-tuned version of [facebook/wav2vec2-large](https://huggingface.co/facebook/wav2vec2-large) on the alexandreacff/kaggle-fake-detection dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 0.6694
22
+ - Accuracy: 0.7103
23
 
24
  ## Model description
25
 
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.850746268656717,
3
+ "eval_accuracy": 0.7102803738317757,
4
+ "eval_loss": 0.669384777545929,
5
+ "eval_runtime": 41.1606,
6
+ "eval_samples_per_second": 25.996,
7
+ "eval_steps_per_second": 1.628,
8
+ "total_flos": 1.277142970631616e+18,
9
+ "train_loss": 0.33447390686381945,
10
+ "train_runtime": 2006.3999,
11
+ "train_samples_per_second": 21.317,
12
+ "train_steps_per_second": 0.164
13
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.850746268656717,
3
+ "eval_accuracy": 0.7102803738317757,
4
+ "eval_loss": 0.669384777545929,
5
+ "eval_runtime": 41.1606,
6
+ "eval_samples_per_second": 25.996,
7
+ "eval_steps_per_second": 1.628
8
+ }
runs/May09_14-43-35_5c7bc4386fe7/events.out.tfevents.1715267987.5c7bc4386fe7.465.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f58f6074ad579aa6d0f9ba91fff62ee6e03916a2448cc9292e8e216f81a11c5f
3
+ size 363
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.850746268656717,
3
+ "total_flos": 1.277142970631616e+18,
4
+ "train_loss": 0.33447390686381945,
5
+ "train_runtime": 2006.3999,
6
+ "train_samples_per_second": 21.317,
7
+ "train_steps_per_second": 0.164
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7102803738317757,
3
+ "best_model_checkpoint": "wav2vec2-large-ft-fake-detection/checkpoint-301",
4
+ "epoch": 9.850746268656717,
5
+ "eval_steps": 500,
6
+ "global_step": 330,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.29850746268656714,
13
+ "grad_norm": 0.7566494941711426,
14
+ "learning_rate": 9.090909090909091e-06,
15
+ "loss": 0.6829,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.5970149253731343,
20
+ "grad_norm": 0.6273395419120789,
21
+ "learning_rate": 1.8181818181818182e-05,
22
+ "loss": 0.6512,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.8955223880597015,
27
+ "grad_norm": 1.1124577522277832,
28
+ "learning_rate": 2.7272727272727273e-05,
29
+ "loss": 0.6274,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.9850746268656716,
34
+ "eval_accuracy": 0.6205607476635514,
35
+ "eval_loss": 0.62535560131073,
36
+ "eval_runtime": 39.1441,
37
+ "eval_samples_per_second": 27.335,
38
+ "eval_steps_per_second": 1.712,
39
+ "step": 33
40
+ },
41
+ {
42
+ "epoch": 1.1940298507462686,
43
+ "grad_norm": 1.8781670331954956,
44
+ "learning_rate": 2.9292929292929294e-05,
45
+ "loss": 0.577,
46
+ "step": 40
47
+ },
48
+ {
49
+ "epoch": 1.4925373134328357,
50
+ "grad_norm": 6.504702568054199,
51
+ "learning_rate": 2.8282828282828285e-05,
52
+ "loss": 0.5106,
53
+ "step": 50
54
+ },
55
+ {
56
+ "epoch": 1.7910447761194028,
57
+ "grad_norm": 2.83394455909729,
58
+ "learning_rate": 2.7272727272727273e-05,
59
+ "loss": 0.4961,
60
+ "step": 60
61
+ },
62
+ {
63
+ "epoch": 2.0,
64
+ "eval_accuracy": 0.6158878504672897,
65
+ "eval_loss": 0.947680652141571,
66
+ "eval_runtime": 43.4278,
67
+ "eval_samples_per_second": 24.639,
68
+ "eval_steps_per_second": 1.543,
69
+ "step": 67
70
+ },
71
+ {
72
+ "epoch": 2.08955223880597,
73
+ "grad_norm": 2.453080177307129,
74
+ "learning_rate": 2.6262626262626265e-05,
75
+ "loss": 0.4237,
76
+ "step": 70
77
+ },
78
+ {
79
+ "epoch": 2.388059701492537,
80
+ "grad_norm": 2.8955702781677246,
81
+ "learning_rate": 2.5252525252525256e-05,
82
+ "loss": 0.3841,
83
+ "step": 80
84
+ },
85
+ {
86
+ "epoch": 2.6865671641791042,
87
+ "grad_norm": 2.1122732162475586,
88
+ "learning_rate": 2.4242424242424244e-05,
89
+ "loss": 0.3749,
90
+ "step": 90
91
+ },
92
+ {
93
+ "epoch": 2.9850746268656714,
94
+ "grad_norm": 3.88417649269104,
95
+ "learning_rate": 2.3232323232323235e-05,
96
+ "loss": 0.3391,
97
+ "step": 100
98
+ },
99
+ {
100
+ "epoch": 2.9850746268656714,
101
+ "eval_accuracy": 0.6411214953271028,
102
+ "eval_loss": 0.9273380041122437,
103
+ "eval_runtime": 39.2854,
104
+ "eval_samples_per_second": 27.237,
105
+ "eval_steps_per_second": 1.705,
106
+ "step": 100
107
+ },
108
+ {
109
+ "epoch": 3.283582089552239,
110
+ "grad_norm": 2.406618595123291,
111
+ "learning_rate": 2.222222222222222e-05,
112
+ "loss": 0.3291,
113
+ "step": 110
114
+ },
115
+ {
116
+ "epoch": 3.582089552238806,
117
+ "grad_norm": 2.385477066040039,
118
+ "learning_rate": 2.121212121212121e-05,
119
+ "loss": 0.3334,
120
+ "step": 120
121
+ },
122
+ {
123
+ "epoch": 3.8805970149253732,
124
+ "grad_norm": 5.02505350112915,
125
+ "learning_rate": 2.0202020202020203e-05,
126
+ "loss": 0.2857,
127
+ "step": 130
128
+ },
129
+ {
130
+ "epoch": 4.0,
131
+ "eval_accuracy": 0.6616822429906543,
132
+ "eval_loss": 0.6611053347587585,
133
+ "eval_runtime": 39.7207,
134
+ "eval_samples_per_second": 26.938,
135
+ "eval_steps_per_second": 1.687,
136
+ "step": 134
137
+ },
138
+ {
139
+ "epoch": 4.17910447761194,
140
+ "grad_norm": 2.3081231117248535,
141
+ "learning_rate": 1.919191919191919e-05,
142
+ "loss": 0.3724,
143
+ "step": 140
144
+ },
145
+ {
146
+ "epoch": 4.477611940298507,
147
+ "grad_norm": 3.455447196960449,
148
+ "learning_rate": 1.8181818181818182e-05,
149
+ "loss": 0.2929,
150
+ "step": 150
151
+ },
152
+ {
153
+ "epoch": 4.776119402985074,
154
+ "grad_norm": 3.234449625015259,
155
+ "learning_rate": 1.717171717171717e-05,
156
+ "loss": 0.3186,
157
+ "step": 160
158
+ },
159
+ {
160
+ "epoch": 4.985074626865671,
161
+ "eval_accuracy": 0.6214953271028038,
162
+ "eval_loss": 0.7654162049293518,
163
+ "eval_runtime": 40.6872,
164
+ "eval_samples_per_second": 26.298,
165
+ "eval_steps_per_second": 1.647,
166
+ "step": 167
167
+ },
168
+ {
169
+ "epoch": 5.074626865671641,
170
+ "grad_norm": 1.7067972421646118,
171
+ "learning_rate": 1.616161616161616e-05,
172
+ "loss": 0.308,
173
+ "step": 170
174
+ },
175
+ {
176
+ "epoch": 5.373134328358209,
177
+ "grad_norm": 2.3431663513183594,
178
+ "learning_rate": 1.5151515151515153e-05,
179
+ "loss": 0.2597,
180
+ "step": 180
181
+ },
182
+ {
183
+ "epoch": 5.6716417910447765,
184
+ "grad_norm": 5.564834117889404,
185
+ "learning_rate": 1.4141414141414143e-05,
186
+ "loss": 0.2341,
187
+ "step": 190
188
+ },
189
+ {
190
+ "epoch": 5.970149253731344,
191
+ "grad_norm": 4.282162189483643,
192
+ "learning_rate": 1.3131313131313132e-05,
193
+ "loss": 0.2483,
194
+ "step": 200
195
+ },
196
+ {
197
+ "epoch": 6.0,
198
+ "eval_accuracy": 0.6224299065420561,
199
+ "eval_loss": 0.9394506216049194,
200
+ "eval_runtime": 37.7276,
201
+ "eval_samples_per_second": 28.361,
202
+ "eval_steps_per_second": 1.776,
203
+ "step": 201
204
+ },
205
+ {
206
+ "epoch": 6.268656716417911,
207
+ "grad_norm": 3.2581288814544678,
208
+ "learning_rate": 1.2121212121212122e-05,
209
+ "loss": 0.2363,
210
+ "step": 210
211
+ },
212
+ {
213
+ "epoch": 6.567164179104478,
214
+ "grad_norm": 3.974890947341919,
215
+ "learning_rate": 1.111111111111111e-05,
216
+ "loss": 0.2808,
217
+ "step": 220
218
+ },
219
+ {
220
+ "epoch": 6.865671641791045,
221
+ "grad_norm": 2.1842823028564453,
222
+ "learning_rate": 1.0101010101010101e-05,
223
+ "loss": 0.239,
224
+ "step": 230
225
+ },
226
+ {
227
+ "epoch": 6.985074626865671,
228
+ "eval_accuracy": 0.6542056074766355,
229
+ "eval_loss": 0.8366522192955017,
230
+ "eval_runtime": 42.0615,
231
+ "eval_samples_per_second": 25.439,
232
+ "eval_steps_per_second": 1.593,
233
+ "step": 234
234
+ },
235
+ {
236
+ "epoch": 7.164179104477612,
237
+ "grad_norm": 2.455822706222534,
238
+ "learning_rate": 9.090909090909091e-06,
239
+ "loss": 0.2156,
240
+ "step": 240
241
+ },
242
+ {
243
+ "epoch": 7.462686567164179,
244
+ "grad_norm": 4.788252830505371,
245
+ "learning_rate": 8.08080808080808e-06,
246
+ "loss": 0.2297,
247
+ "step": 250
248
+ },
249
+ {
250
+ "epoch": 7.7611940298507465,
251
+ "grad_norm": 3.2945239543914795,
252
+ "learning_rate": 7.070707070707071e-06,
253
+ "loss": 0.2049,
254
+ "step": 260
255
+ },
256
+ {
257
+ "epoch": 8.0,
258
+ "eval_accuracy": 0.685981308411215,
259
+ "eval_loss": 0.7708710432052612,
260
+ "eval_runtime": 43.0532,
261
+ "eval_samples_per_second": 24.853,
262
+ "eval_steps_per_second": 1.556,
263
+ "step": 268
264
+ },
265
+ {
266
+ "epoch": 8.059701492537313,
267
+ "grad_norm": 3.367969512939453,
268
+ "learning_rate": 6.060606060606061e-06,
269
+ "loss": 0.2485,
270
+ "step": 270
271
+ },
272
+ {
273
+ "epoch": 8.35820895522388,
274
+ "grad_norm": 2.869590997695923,
275
+ "learning_rate": 5.050505050505051e-06,
276
+ "loss": 0.2318,
277
+ "step": 280
278
+ },
279
+ {
280
+ "epoch": 8.656716417910447,
281
+ "grad_norm": 3.1148149967193604,
282
+ "learning_rate": 4.04040404040404e-06,
283
+ "loss": 0.2058,
284
+ "step": 290
285
+ },
286
+ {
287
+ "epoch": 8.955223880597014,
288
+ "grad_norm": 4.1323065757751465,
289
+ "learning_rate": 3.0303030303030305e-06,
290
+ "loss": 0.224,
291
+ "step": 300
292
+ },
293
+ {
294
+ "epoch": 8.985074626865671,
295
+ "eval_accuracy": 0.7102803738317757,
296
+ "eval_loss": 0.669384777545929,
297
+ "eval_runtime": 42.9166,
298
+ "eval_samples_per_second": 24.932,
299
+ "eval_steps_per_second": 1.561,
300
+ "step": 301
301
+ },
302
+ {
303
+ "epoch": 9.253731343283581,
304
+ "grad_norm": 3.609107494354248,
305
+ "learning_rate": 2.02020202020202e-06,
306
+ "loss": 0.2291,
307
+ "step": 310
308
+ },
309
+ {
310
+ "epoch": 9.552238805970148,
311
+ "grad_norm": 2.175992250442505,
312
+ "learning_rate": 1.01010101010101e-06,
313
+ "loss": 0.2151,
314
+ "step": 320
315
+ },
316
+ {
317
+ "epoch": 9.850746268656717,
318
+ "grad_norm": 3.807249069213867,
319
+ "learning_rate": 0.0,
320
+ "loss": 0.2279,
321
+ "step": 330
322
+ },
323
+ {
324
+ "epoch": 9.850746268656717,
325
+ "eval_accuracy": 0.6822429906542056,
326
+ "eval_loss": 0.786749541759491,
327
+ "eval_runtime": 39.843,
328
+ "eval_samples_per_second": 26.855,
329
+ "eval_steps_per_second": 1.682,
330
+ "step": 330
331
+ },
332
+ {
333
+ "epoch": 9.850746268656717,
334
+ "step": 330,
335
+ "total_flos": 1.277142970631616e+18,
336
+ "train_loss": 0.33447390686381945,
337
+ "train_runtime": 2006.3999,
338
+ "train_samples_per_second": 21.317,
339
+ "train_steps_per_second": 0.164
340
+ }
341
+ ],
342
+ "logging_steps": 10,
343
+ "max_steps": 330,
344
+ "num_input_tokens_seen": 0,
345
+ "num_train_epochs": 10,
346
+ "save_steps": 500,
347
+ "stateful_callbacks": {
348
+ "TrainerControl": {
349
+ "args": {
350
+ "should_epoch_stop": false,
351
+ "should_evaluate": false,
352
+ "should_log": false,
353
+ "should_save": true,
354
+ "should_training_stop": true
355
+ },
356
+ "attributes": {}
357
+ }
358
+ },
359
+ "total_flos": 1.277142970631616e+18,
360
+ "train_batch_size": 32,
361
+ "trial_name": null,
362
+ "trial_params": null
363
+ }