HorcruxNo13
commited on
Commit
•
5727563
1
Parent(s):
e4d595c
Model save
Browse files- README.md +50 -56
- config.json +3 -3
- model.safetensors +3 -0
- runs/Mar27_13-00-55_b68f219c92b0/events.out.tfevents.1711544458.b68f219c92b0.5853.0 +3 -0
- training_args.bin +2 -2
README.md
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
---
|
2 |
license: other
|
|
|
3 |
tags:
|
4 |
- generated_from_trainer
|
5 |
model-index:
|
@@ -14,14 +15,14 @@ should probably proofread and complete it, then remove this comment. -->
|
|
14 |
|
15 |
This model is a fine-tuned version of [nvidia/mit-b0](https://huggingface.co/nvidia/mit-b0) on an unknown dataset.
|
16 |
It achieves the following results on the evaluation set:
|
17 |
-
- Loss: 0.
|
18 |
-
- Mean Iou: 0.
|
19 |
-
- Mean Accuracy: 0.
|
20 |
-
- Overall Accuracy: 0.
|
21 |
- Accuracy Unlabeled: nan
|
22 |
-
- Accuracy
|
23 |
- Iou Unlabeled: 0.0
|
24 |
-
- Iou
|
25 |
|
26 |
## Model description
|
27 |
|
@@ -41,65 +42,58 @@ More information needed
|
|
41 |
|
42 |
The following hyperparameters were used during training:
|
43 |
- learning_rate: 0.0001
|
44 |
-
- train_batch_size:
|
45 |
-
- eval_batch_size:
|
46 |
- seed: 42
|
47 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
48 |
- lr_scheduler_type: linear
|
49 |
-
- num_epochs:
|
50 |
|
51 |
### Training results
|
52 |
|
53 |
-
| Training Loss | Epoch | Step | Validation Loss | Mean Iou | Mean Accuracy | Overall Accuracy | Accuracy Unlabeled | Accuracy
|
54 |
-
|
55 |
-
| 0.
|
56 |
-
| 0.
|
57 |
-
| 0.
|
58 |
-
| 0.
|
59 |
-
| 0.
|
60 |
-
| 0.
|
61 |
-
| 0.
|
62 |
-
| 0.
|
63 |
-
| 0.
|
64 |
-
| 0.
|
65 |
-
| 0.
|
66 |
-
| 0.
|
67 |
-
| 0.
|
68 |
-
| 0.
|
69 |
-
| 0.
|
70 |
-
| 0.
|
71 |
-
| 0.
|
72 |
-
| 0.
|
73 |
-
| 0.
|
74 |
-
| 0.
|
75 |
-
| 0.
|
76 |
-
| 0.
|
77 |
-
| 0.
|
78 |
-
| 0.
|
79 |
-
| 0.
|
80 |
-
| 0.
|
81 |
-
| 0.
|
82 |
-
| 0.
|
83 |
-
| 0.
|
84 |
-
| 0.
|
85 |
-
| 0.
|
86 |
-
| 0.
|
87 |
-
| 0.
|
88 |
-
| 0.
|
89 |
-
| 0.
|
90 |
-
| 0.
|
91 |
-
| 0.0373 | 29.6 | 740 | 0.0769 | 0.4582 | 0.9163 | 0.9163 | nan | 0.9163 | 0.0 | 0.9163 |
|
92 |
-
| 0.0366 | 30.4 | 760 | 0.0804 | 0.4632 | 0.9264 | 0.9264 | nan | 0.9264 | 0.0 | 0.9264 |
|
93 |
-
| 0.0432 | 31.2 | 780 | 0.0793 | 0.4587 | 0.9174 | 0.9174 | nan | 0.9174 | 0.0 | 0.9174 |
|
94 |
-
| 0.0328 | 32.0 | 800 | 0.0838 | 0.4688 | 0.9377 | 0.9377 | nan | 0.9377 | 0.0 | 0.9377 |
|
95 |
-
| 0.0377 | 32.8 | 820 | 0.0805 | 0.4643 | 0.9286 | 0.9286 | nan | 0.9286 | 0.0 | 0.9286 |
|
96 |
-
| 0.0327 | 33.6 | 840 | 0.0784 | 0.4614 | 0.9228 | 0.9228 | nan | 0.9228 | 0.0 | 0.9228 |
|
97 |
-
| 0.032 | 34.4 | 860 | 0.0799 | 0.4629 | 0.9258 | 0.9258 | nan | 0.9258 | 0.0 | 0.9258 |
|
98 |
|
99 |
|
100 |
### Framework versions
|
101 |
|
102 |
-
- Transformers 4.
|
103 |
- Pytorch 2.2.1+cu121
|
104 |
- Datasets 2.18.0
|
105 |
-
- Tokenizers 0.
|
|
|
1 |
---
|
2 |
license: other
|
3 |
+
base_model: nvidia/mit-b0
|
4 |
tags:
|
5 |
- generated_from_trainer
|
6 |
model-index:
|
|
|
15 |
|
16 |
This model is a fine-tuned version of [nvidia/mit-b0](https://huggingface.co/nvidia/mit-b0) on an unknown dataset.
|
17 |
It achieves the following results on the evaluation set:
|
18 |
+
- Loss: 0.0522
|
19 |
+
- Mean Iou: 0.3485
|
20 |
+
- Mean Accuracy: 0.6969
|
21 |
+
- Overall Accuracy: 0.6969
|
22 |
- Accuracy Unlabeled: nan
|
23 |
+
- Accuracy Mass: 0.6969
|
24 |
- Iou Unlabeled: 0.0
|
25 |
+
- Iou Mass: 0.6969
|
26 |
|
27 |
## Model description
|
28 |
|
|
|
42 |
|
43 |
The following hyperparameters were used during training:
|
44 |
- learning_rate: 0.0001
|
45 |
+
- train_batch_size: 32
|
46 |
+
- eval_batch_size: 32
|
47 |
- seed: 42
|
48 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
49 |
- lr_scheduler_type: linear
|
50 |
+
- num_epochs: 45
|
51 |
|
52 |
### Training results
|
53 |
|
54 |
+
| Training Loss | Epoch | Step | Validation Loss | Mean Iou | Mean Accuracy | Overall Accuracy | Accuracy Unlabeled | Accuracy Mass | Iou Unlabeled | Iou Mass |
|
55 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------:|:-------------:|:----------------:|:------------------:|:-------------:|:-------------:|:--------:|
|
56 |
+
| 0.4117 | 1.25 | 20 | 0.4147 | 0.0579 | 0.1158 | 0.1158 | nan | 0.1158 | 0.0 | 0.1158 |
|
57 |
+
| 0.2756 | 2.5 | 40 | 0.2454 | 0.1520 | 0.3040 | 0.3040 | nan | 0.3040 | 0.0 | 0.3040 |
|
58 |
+
| 0.2029 | 3.75 | 60 | 0.1873 | 0.3150 | 0.6301 | 0.6301 | nan | 0.6301 | 0.0 | 0.6301 |
|
59 |
+
| 0.1506 | 5.0 | 80 | 0.1403 | 0.3616 | 0.7232 | 0.7232 | nan | 0.7232 | 0.0 | 0.7232 |
|
60 |
+
| 0.1177 | 6.25 | 100 | 0.1077 | 0.1634 | 0.3269 | 0.3269 | nan | 0.3269 | 0.0 | 0.3269 |
|
61 |
+
| 0.088 | 7.5 | 120 | 0.0972 | 0.2268 | 0.4536 | 0.4536 | nan | 0.4536 | 0.0 | 0.4536 |
|
62 |
+
| 0.0796 | 8.75 | 140 | 0.0895 | 0.3776 | 0.7551 | 0.7551 | nan | 0.7551 | 0.0 | 0.7551 |
|
63 |
+
| 0.0702 | 10.0 | 160 | 0.0754 | 0.3785 | 0.7570 | 0.7570 | nan | 0.7570 | 0.0 | 0.7570 |
|
64 |
+
| 0.0643 | 11.25 | 180 | 0.0654 | 0.3207 | 0.6414 | 0.6414 | nan | 0.6414 | 0.0 | 0.6414 |
|
65 |
+
| 0.0566 | 12.5 | 200 | 0.0635 | 0.3408 | 0.6815 | 0.6815 | nan | 0.6815 | 0.0 | 0.6815 |
|
66 |
+
| 0.0467 | 13.75 | 220 | 0.0684 | 0.3971 | 0.7942 | 0.7942 | nan | 0.7942 | 0.0 | 0.7942 |
|
67 |
+
| 0.0481 | 15.0 | 240 | 0.0599 | 0.3713 | 0.7425 | 0.7425 | nan | 0.7425 | 0.0 | 0.7425 |
|
68 |
+
| 0.0465 | 16.25 | 260 | 0.0603 | 0.3121 | 0.6241 | 0.6241 | nan | 0.6241 | 0.0 | 0.6241 |
|
69 |
+
| 0.0409 | 17.5 | 280 | 0.0569 | 0.3441 | 0.6882 | 0.6882 | nan | 0.6882 | 0.0 | 0.6882 |
|
70 |
+
| 0.0392 | 18.75 | 300 | 0.0565 | 0.3568 | 0.7135 | 0.7135 | nan | 0.7135 | 0.0 | 0.7135 |
|
71 |
+
| 0.0287 | 20.0 | 320 | 0.0571 | 0.3237 | 0.6474 | 0.6474 | nan | 0.6474 | 0.0 | 0.6474 |
|
72 |
+
| 0.032 | 21.25 | 340 | 0.0574 | 0.3209 | 0.6419 | 0.6419 | nan | 0.6419 | 0.0 | 0.6419 |
|
73 |
+
| 0.0308 | 22.5 | 360 | 0.0551 | 0.3371 | 0.6742 | 0.6742 | nan | 0.6742 | 0.0 | 0.6742 |
|
74 |
+
| 0.0274 | 23.75 | 380 | 0.0546 | 0.3561 | 0.7122 | 0.7122 | nan | 0.7122 | 0.0 | 0.7122 |
|
75 |
+
| 0.0246 | 25.0 | 400 | 0.0534 | 0.3491 | 0.6981 | 0.6981 | nan | 0.6981 | 0.0 | 0.6981 |
|
76 |
+
| 0.0252 | 26.25 | 420 | 0.0533 | 0.3661 | 0.7322 | 0.7322 | nan | 0.7322 | 0.0 | 0.7322 |
|
77 |
+
| 0.0251 | 27.5 | 440 | 0.0542 | 0.3507 | 0.7014 | 0.7014 | nan | 0.7014 | 0.0 | 0.7014 |
|
78 |
+
| 0.027 | 28.75 | 460 | 0.0527 | 0.3531 | 0.7062 | 0.7062 | nan | 0.7062 | 0.0 | 0.7062 |
|
79 |
+
| 0.0259 | 30.0 | 480 | 0.0539 | 0.3757 | 0.7514 | 0.7514 | nan | 0.7514 | 0.0 | 0.7514 |
|
80 |
+
| 0.0212 | 31.25 | 500 | 0.0537 | 0.3283 | 0.6565 | 0.6565 | nan | 0.6565 | 0.0 | 0.6565 |
|
81 |
+
| 0.0223 | 32.5 | 520 | 0.0517 | 0.3511 | 0.7022 | 0.7022 | nan | 0.7022 | 0.0 | 0.7022 |
|
82 |
+
| 0.027 | 33.75 | 540 | 0.0504 | 0.3552 | 0.7103 | 0.7103 | nan | 0.7103 | 0.0 | 0.7103 |
|
83 |
+
| 0.026 | 35.0 | 560 | 0.0516 | 0.3596 | 0.7192 | 0.7192 | nan | 0.7192 | 0.0 | 0.7192 |
|
84 |
+
| 0.0239 | 36.25 | 580 | 0.0525 | 0.3559 | 0.7119 | 0.7119 | nan | 0.7119 | 0.0 | 0.7119 |
|
85 |
+
| 0.0218 | 37.5 | 600 | 0.0532 | 0.3374 | 0.6748 | 0.6748 | nan | 0.6748 | 0.0 | 0.6748 |
|
86 |
+
| 0.0214 | 38.75 | 620 | 0.0513 | 0.3591 | 0.7183 | 0.7183 | nan | 0.7183 | 0.0 | 0.7183 |
|
87 |
+
| 0.0187 | 40.0 | 640 | 0.0517 | 0.3660 | 0.7320 | 0.7320 | nan | 0.7320 | 0.0 | 0.7320 |
|
88 |
+
| 0.0201 | 41.25 | 660 | 0.0521 | 0.3647 | 0.7295 | 0.7295 | nan | 0.7295 | 0.0 | 0.7295 |
|
89 |
+
| 0.024 | 42.5 | 680 | 0.0520 | 0.3485 | 0.6970 | 0.6970 | nan | 0.6970 | 0.0 | 0.6970 |
|
90 |
+
| 0.0198 | 43.75 | 700 | 0.0516 | 0.3623 | 0.7247 | 0.7247 | nan | 0.7247 | 0.0 | 0.7247 |
|
91 |
+
| 0.0236 | 45.0 | 720 | 0.0522 | 0.3485 | 0.6969 | 0.6969 | nan | 0.6969 | 0.0 | 0.6969 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
|
94 |
### Framework versions
|
95 |
|
96 |
+
- Transformers 4.38.2
|
97 |
- Pytorch 2.2.1+cu121
|
98 |
- Datasets 2.18.0
|
99 |
+
- Tokenizers 0.15.2
|
config.json
CHANGED
@@ -29,12 +29,12 @@
|
|
29 |
],
|
30 |
"id2label": {
|
31 |
"0": "unlabeled",
|
32 |
-
"1": "
|
33 |
},
|
34 |
"image_size": 224,
|
35 |
"initializer_range": 0.02,
|
36 |
"label2id": {
|
37 |
-
"
|
38 |
"unlabeled": 0
|
39 |
},
|
40 |
"layer_norm_eps": 1e-06,
|
@@ -74,5 +74,5 @@
|
|
74 |
2
|
75 |
],
|
76 |
"torch_dtype": "float32",
|
77 |
-
"transformers_version": "4.
|
78 |
}
|
|
|
29 |
],
|
30 |
"id2label": {
|
31 |
"0": "unlabeled",
|
32 |
+
"1": "mass"
|
33 |
},
|
34 |
"image_size": 224,
|
35 |
"initializer_range": 0.02,
|
36 |
"label2id": {
|
37 |
+
"mass": 1,
|
38 |
"unlabeled": 0
|
39 |
},
|
40 |
"layer_norm_eps": 1e-06,
|
|
|
74 |
2
|
75 |
],
|
76 |
"torch_dtype": "float32",
|
77 |
+
"transformers_version": "4.38.2"
|
78 |
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e31be23a2a4150de0bb3e9360ef1fae693d40bd5d55dd36c54e2684b4b6d3466
|
3 |
+
size 14884776
|
runs/Mar27_13-00-55_b68f219c92b0/events.out.tfevents.1711544458.b68f219c92b0.5853.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:30c2409dd3d2e2f997c311db49aedc6578d5e2cc98f665b3e698df9641f6057c
|
3 |
+
size 180859
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6680295615b8eaee6ab75f97efdcd1cdc57418ff46ea19e5bab1bb5d55929465
|
3 |
+
size 4984
|