jhoppanne commited on
Commit
7795961
1 Parent(s): 1516b55

End of training

Browse files
Files changed (5) hide show
  1. README.md +62 -0
  2. config.json +101 -0
  3. model.safetensors +3 -0
  4. preprocessor_config.json +29 -0
  5. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google/efficientnet-b0
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - imagefolder
8
+ model-index:
9
+ - name: SkinCancerClassifier_Plain-V1
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/jhoppanne-myself/finalProject/runs/ow4dk70u)
17
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/jhoppanne-myself/finalProject/runs/ow4dk70u)
18
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/jhoppanne-myself/finalProject/runs/ow4dk70u)
19
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/jhoppanne-myself/finalProject/runs/8hxuz0bh)
20
+ # SkinCancerClassifier_Plain-V1
21
+
22
+ This model is a fine-tuned version of [google/efficientnet-b0](https://huggingface.co/google/efficientnet-b0) on the imagefolder dataset.
23
+ It achieves the following results on the evaluation set:
24
+ - eval_loss: 1.0790
25
+ - eval_accuracy: 0.7792
26
+ - eval_runtime: 1.5074
27
+ - eval_samples_per_second: 159.219
28
+ - eval_steps_per_second: 5.307
29
+ - epoch: 104.5667
30
+ - step: 3137
31
+
32
+ ## Model description
33
+
34
+ More information needed
35
+
36
+ ## Intended uses & limitations
37
+
38
+ More information needed
39
+
40
+ ## Training and evaluation data
41
+
42
+ More information needed
43
+
44
+ ## Training procedure
45
+
46
+ ### Training hyperparameters
47
+
48
+ The following hyperparameters were used during training:
49
+ - learning_rate: 1e-06
50
+ - train_batch_size: 32
51
+ - eval_batch_size: 32
52
+ - seed: 42
53
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
54
+ - lr_scheduler_type: linear
55
+ - num_epochs: 2000
56
+
57
+ ### Framework versions
58
+
59
+ - Transformers 4.42.2
60
+ - Pytorch 2.3.0
61
+ - Datasets 2.15.0
62
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/efficientnet-b0",
3
+ "architectures": [
4
+ "EfficientNetForImageClassification"
5
+ ],
6
+ "batch_norm_eps": 0.001,
7
+ "batch_norm_momentum": 0.99,
8
+ "depth_coefficient": 1.0,
9
+ "depth_divisor": 8,
10
+ "depthwise_padding": [],
11
+ "drop_connect_rate": 0.2,
12
+ "dropout_rate": 0.2,
13
+ "expand_ratios": [
14
+ 1,
15
+ 6,
16
+ 6,
17
+ 6,
18
+ 6,
19
+ 6,
20
+ 6
21
+ ],
22
+ "hidden_act": "swish",
23
+ "hidden_dim": 1280,
24
+ "id2label": {
25
+ "0": "Benign",
26
+ "1": "Indeterminate",
27
+ "2": "Malignant"
28
+ },
29
+ "image_size": 224,
30
+ "in_channels": [
31
+ 32,
32
+ 16,
33
+ 24,
34
+ 40,
35
+ 80,
36
+ 112,
37
+ 192
38
+ ],
39
+ "initializer_range": 0.02,
40
+ "kernel_sizes": [
41
+ 3,
42
+ 3,
43
+ 5,
44
+ 3,
45
+ 5,
46
+ 5,
47
+ 3
48
+ ],
49
+ "label2id": {
50
+ "Benign": "0",
51
+ "Indeterminate": "1",
52
+ "Malignant": "2"
53
+ },
54
+ "model_type": "efficientnet",
55
+ "num_block_repeats": [
56
+ 1,
57
+ 2,
58
+ 2,
59
+ 3,
60
+ 3,
61
+ 4,
62
+ 1
63
+ ],
64
+ "num_channels": 3,
65
+ "num_hidden_layers": 64,
66
+ "out_channels": [
67
+ 16,
68
+ 24,
69
+ 40,
70
+ 80,
71
+ 112,
72
+ 192,
73
+ 320
74
+ ],
75
+ "out_features": null,
76
+ "pooling_type": "mean",
77
+ "problem_type": "single_label_classification",
78
+ "squeeze_expansion_ratio": 0.25,
79
+ "stage_names": [
80
+ "stem",
81
+ "stage1",
82
+ "stage2",
83
+ "stage3",
84
+ "stage4",
85
+ "stage5",
86
+ "stage6",
87
+ "stage7"
88
+ ],
89
+ "strides": [
90
+ 1,
91
+ 2,
92
+ 2,
93
+ 2,
94
+ 1,
95
+ 2,
96
+ 1
97
+ ],
98
+ "torch_dtype": "float32",
99
+ "transformers_version": "4.42.2",
100
+ "width_coefficient": 1.0
101
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed99914513ef367aeb5f5a698bef3a66dcba7bce2d0f8dae38819463632e654c
3
+ size 16260252
preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 289,
4
+ "width": 289
5
+ },
6
+ "do_center_crop": false,
7
+ "do_normalize": true,
8
+ "do_rescale": true,
9
+ "do_resize": true,
10
+ "image_mean": [
11
+ 0.485,
12
+ 0.456,
13
+ 0.406
14
+ ],
15
+ "image_processor_type": "EfficientNetImageProcessor",
16
+ "image_std": [
17
+ 0.47853944,
18
+ 0.4732864,
19
+ 0.47434163
20
+ ],
21
+ "include_top": true,
22
+ "resample": 0,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "rescale_offset": false,
25
+ "size": {
26
+ "height": 224,
27
+ "width": 224
28
+ }
29
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a43d2c85e2467e2ba03a93f36f7bc5bdae1da1b72049c8129174faa4377995a8
3
+ size 5176