tiagoblima commited on
Commit
f9b12bb
1 Parent(s): 5c652a2

End of training

Browse files
Files changed (5) hide show
  1. README.md +4 -2
  2. all_results.json +13 -0
  3. eval_results.json +8 -0
  4. train_results.json +8 -0
  5. trainer_state.json +308 -0
README.md CHANGED
@@ -3,6 +3,8 @@ license: mit
3
  base_model: unicamp-dl/ptt5-large-t5-vocab
4
  tags:
5
  - generated_from_trainer
 
 
6
  model-index:
7
  - name: t5_large-qg-ap-nopeft
8
  results: []
@@ -13,9 +15,9 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # t5_large-qg-ap-nopeft
15
 
16
- This model is a fine-tuned version of [unicamp-dl/ptt5-large-t5-vocab](https://huggingface.co/unicamp-dl/ptt5-large-t5-vocab) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - Loss: 1.2048
19
 
20
  ## Model description
21
 
 
3
  base_model: unicamp-dl/ptt5-large-t5-vocab
4
  tags:
5
  - generated_from_trainer
6
+ datasets:
7
+ - tiagoblima/qg_squad_v1_pt
8
  model-index:
9
  - name: t5_large-qg-ap-nopeft
10
  results: []
 
15
 
16
  # t5_large-qg-ap-nopeft
17
 
18
+ This model is a fine-tuned version of [unicamp-dl/ptt5-large-t5-vocab](https://huggingface.co/unicamp-dl/ptt5-large-t5-vocab) on the tiagoblima/qg_squad_v1_pt dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 1.1994
21
 
22
  ## Model description
23
 
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_loss": 1.1993927955627441,
4
+ "eval_runtime": 37.8229,
5
+ "eval_samples": 3585,
6
+ "eval_samples_per_second": 94.784,
7
+ "eval_steps_per_second": 5.949,
8
+ "train_loss": 1.0571302187324751,
9
+ "train_runtime": 9858.1834,
10
+ "train_samples": 51704,
11
+ "train_samples_per_second": 26.224,
12
+ "train_steps_per_second": 0.41
13
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_loss": 1.1993927955627441,
4
+ "eval_runtime": 37.8229,
5
+ "eval_samples": 3585,
6
+ "eval_samples_per_second": 94.784,
7
+ "eval_steps_per_second": 5.949
8
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 1.0571302187324751,
4
+ "train_runtime": 9858.1834,
5
+ "train_samples": 51704,
6
+ "train_samples_per_second": 26.224,
7
+ "train_steps_per_second": 0.41
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.1993927955627441,
3
+ "best_model_checkpoint": "/temp/t5_large-qg-ap-nopeft/checkpoint-2424",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 4040,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.12,
13
+ "learning_rate": 9.752475247524753e-05,
14
+ "loss": 1.9922,
15
+ "step": 100
16
+ },
17
+ {
18
+ "epoch": 0.25,
19
+ "learning_rate": 9.504950495049505e-05,
20
+ "loss": 1.3295,
21
+ "step": 200
22
+ },
23
+ {
24
+ "epoch": 0.37,
25
+ "learning_rate": 9.257425742574259e-05,
26
+ "loss": 1.2696,
27
+ "step": 300
28
+ },
29
+ {
30
+ "epoch": 0.5,
31
+ "learning_rate": 9.009900990099011e-05,
32
+ "loss": 1.2353,
33
+ "step": 400
34
+ },
35
+ {
36
+ "epoch": 0.62,
37
+ "learning_rate": 8.762376237623763e-05,
38
+ "loss": 1.2092,
39
+ "step": 500
40
+ },
41
+ {
42
+ "epoch": 0.74,
43
+ "learning_rate": 8.514851485148515e-05,
44
+ "loss": 1.1913,
45
+ "step": 600
46
+ },
47
+ {
48
+ "epoch": 0.87,
49
+ "learning_rate": 8.267326732673268e-05,
50
+ "loss": 1.1831,
51
+ "step": 700
52
+ },
53
+ {
54
+ "epoch": 0.99,
55
+ "learning_rate": 8.019801980198021e-05,
56
+ "loss": 1.1714,
57
+ "step": 800
58
+ },
59
+ {
60
+ "epoch": 1.0,
61
+ "eval_loss": 1.2384811639785767,
62
+ "eval_runtime": 37.8804,
63
+ "eval_samples_per_second": 94.64,
64
+ "eval_steps_per_second": 5.94,
65
+ "step": 808
66
+ },
67
+ {
68
+ "epoch": 1.11,
69
+ "learning_rate": 7.772277227722773e-05,
70
+ "loss": 1.0963,
71
+ "step": 900
72
+ },
73
+ {
74
+ "epoch": 1.24,
75
+ "learning_rate": 7.524752475247526e-05,
76
+ "loss": 1.0913,
77
+ "step": 1000
78
+ },
79
+ {
80
+ "epoch": 1.36,
81
+ "learning_rate": 7.277227722772278e-05,
82
+ "loss": 1.0815,
83
+ "step": 1100
84
+ },
85
+ {
86
+ "epoch": 1.49,
87
+ "learning_rate": 7.02970297029703e-05,
88
+ "loss": 1.0873,
89
+ "step": 1200
90
+ },
91
+ {
92
+ "epoch": 1.61,
93
+ "learning_rate": 6.782178217821783e-05,
94
+ "loss": 1.0841,
95
+ "step": 1300
96
+ },
97
+ {
98
+ "epoch": 1.73,
99
+ "learning_rate": 6.534653465346535e-05,
100
+ "loss": 1.0671,
101
+ "step": 1400
102
+ },
103
+ {
104
+ "epoch": 1.86,
105
+ "learning_rate": 6.287128712871287e-05,
106
+ "loss": 1.0734,
107
+ "step": 1500
108
+ },
109
+ {
110
+ "epoch": 1.98,
111
+ "learning_rate": 6.03960396039604e-05,
112
+ "loss": 1.0782,
113
+ "step": 1600
114
+ },
115
+ {
116
+ "epoch": 2.0,
117
+ "eval_loss": 1.2065564393997192,
118
+ "eval_runtime": 37.776,
119
+ "eval_samples_per_second": 94.901,
120
+ "eval_steps_per_second": 5.956,
121
+ "step": 1616
122
+ },
123
+ {
124
+ "epoch": 2.1,
125
+ "learning_rate": 5.792079207920792e-05,
126
+ "loss": 1.0134,
127
+ "step": 1700
128
+ },
129
+ {
130
+ "epoch": 2.23,
131
+ "learning_rate": 5.544554455445545e-05,
132
+ "loss": 1.0127,
133
+ "step": 1800
134
+ },
135
+ {
136
+ "epoch": 2.35,
137
+ "learning_rate": 5.2970297029702974e-05,
138
+ "loss": 0.9995,
139
+ "step": 1900
140
+ },
141
+ {
142
+ "epoch": 2.48,
143
+ "learning_rate": 5.0495049504950497e-05,
144
+ "loss": 1.004,
145
+ "step": 2000
146
+ },
147
+ {
148
+ "epoch": 2.6,
149
+ "learning_rate": 4.801980198019802e-05,
150
+ "loss": 1.0019,
151
+ "step": 2100
152
+ },
153
+ {
154
+ "epoch": 2.72,
155
+ "learning_rate": 4.554455445544555e-05,
156
+ "loss": 0.9972,
157
+ "step": 2200
158
+ },
159
+ {
160
+ "epoch": 2.85,
161
+ "learning_rate": 4.306930693069307e-05,
162
+ "loss": 1.0135,
163
+ "step": 2300
164
+ },
165
+ {
166
+ "epoch": 2.97,
167
+ "learning_rate": 4.05940594059406e-05,
168
+ "loss": 1.0064,
169
+ "step": 2400
170
+ },
171
+ {
172
+ "epoch": 3.0,
173
+ "eval_loss": 1.1993927955627441,
174
+ "eval_runtime": 37.796,
175
+ "eval_samples_per_second": 94.851,
176
+ "eval_steps_per_second": 5.953,
177
+ "step": 2424
178
+ },
179
+ {
180
+ "epoch": 3.09,
181
+ "learning_rate": 3.811881188118812e-05,
182
+ "loss": 0.9637,
183
+ "step": 2500
184
+ },
185
+ {
186
+ "epoch": 3.22,
187
+ "learning_rate": 3.5643564356435645e-05,
188
+ "loss": 0.9496,
189
+ "step": 2600
190
+ },
191
+ {
192
+ "epoch": 3.34,
193
+ "learning_rate": 3.3168316831683175e-05,
194
+ "loss": 0.9621,
195
+ "step": 2700
196
+ },
197
+ {
198
+ "epoch": 3.47,
199
+ "learning_rate": 3.06930693069307e-05,
200
+ "loss": 0.9592,
201
+ "step": 2800
202
+ },
203
+ {
204
+ "epoch": 3.59,
205
+ "learning_rate": 2.8217821782178216e-05,
206
+ "loss": 0.9571,
207
+ "step": 2900
208
+ },
209
+ {
210
+ "epoch": 3.71,
211
+ "learning_rate": 2.5742574257425746e-05,
212
+ "loss": 0.94,
213
+ "step": 3000
214
+ },
215
+ {
216
+ "epoch": 3.84,
217
+ "learning_rate": 2.326732673267327e-05,
218
+ "loss": 0.9553,
219
+ "step": 3100
220
+ },
221
+ {
222
+ "epoch": 3.96,
223
+ "learning_rate": 2.079207920792079e-05,
224
+ "loss": 0.9555,
225
+ "step": 3200
226
+ },
227
+ {
228
+ "epoch": 4.0,
229
+ "eval_loss": 1.2030982971191406,
230
+ "eval_runtime": 37.7841,
231
+ "eval_samples_per_second": 94.881,
232
+ "eval_steps_per_second": 5.955,
233
+ "step": 3232
234
+ },
235
+ {
236
+ "epoch": 4.08,
237
+ "learning_rate": 1.8316831683168317e-05,
238
+ "loss": 0.939,
239
+ "step": 3300
240
+ },
241
+ {
242
+ "epoch": 4.21,
243
+ "learning_rate": 1.5841584158415843e-05,
244
+ "loss": 0.9216,
245
+ "step": 3400
246
+ },
247
+ {
248
+ "epoch": 4.33,
249
+ "learning_rate": 1.3366336633663367e-05,
250
+ "loss": 0.9303,
251
+ "step": 3500
252
+ },
253
+ {
254
+ "epoch": 4.46,
255
+ "learning_rate": 1.0891089108910891e-05,
256
+ "loss": 0.9122,
257
+ "step": 3600
258
+ },
259
+ {
260
+ "epoch": 4.58,
261
+ "learning_rate": 8.415841584158417e-06,
262
+ "loss": 0.9262,
263
+ "step": 3700
264
+ },
265
+ {
266
+ "epoch": 4.7,
267
+ "learning_rate": 5.940594059405941e-06,
268
+ "loss": 0.9135,
269
+ "step": 3800
270
+ },
271
+ {
272
+ "epoch": 4.83,
273
+ "learning_rate": 3.4653465346534657e-06,
274
+ "loss": 0.929,
275
+ "step": 3900
276
+ },
277
+ {
278
+ "epoch": 4.95,
279
+ "learning_rate": 9.900990099009902e-07,
280
+ "loss": 0.9261,
281
+ "step": 4000
282
+ },
283
+ {
284
+ "epoch": 5.0,
285
+ "eval_loss": 1.2047841548919678,
286
+ "eval_runtime": 37.8264,
287
+ "eval_samples_per_second": 94.775,
288
+ "eval_steps_per_second": 5.948,
289
+ "step": 4040
290
+ },
291
+ {
292
+ "epoch": 5.0,
293
+ "step": 4040,
294
+ "total_flos": 5.5970802696192e+17,
295
+ "train_loss": 1.0571302187324751,
296
+ "train_runtime": 9858.1834,
297
+ "train_samples_per_second": 26.224,
298
+ "train_steps_per_second": 0.41
299
+ }
300
+ ],
301
+ "logging_steps": 100,
302
+ "max_steps": 4040,
303
+ "num_train_epochs": 5,
304
+ "save_steps": 500,
305
+ "total_flos": 5.5970802696192e+17,
306
+ "trial_name": null,
307
+ "trial_params": null
308
+ }