KadenMc commited on
Commit
abf3d8c
1 Parent(s): d08c06a

Upload models.

Browse files
mimic_iv_ecg_physionet_pretrained.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d0142bcb485eb9f0c7845e0c19ff3463f6ae9d0e458eab69136efe90ceb9b7e
3
+ size 1090825421
mimic_iv_ecg_physionet_pretrained.yaml ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _name: null
2
+ common:
3
+ _name: null
4
+ no_progress_bar: false
5
+ log_interval: 10
6
+ log_format: csv
7
+ log_file: null
8
+ wandb_project: null
9
+ wandb_entity: null
10
+ seed: 1
11
+ fp16: false
12
+ memory_efficient_fp16: false
13
+ fp16_no_flatten_grads: false
14
+ fp16_init_scale: 128
15
+ fp16_scale_window: null
16
+ fp16_scale_tolerance: 0.0
17
+ on_cpu_convert_precision: false
18
+ min_loss_scale: 0.0001
19
+ threshold_loss_scale: null
20
+ empty_cache_freq: 0
21
+ all_gather_list_size: 16384
22
+ model_parallel_size: 1
23
+ profile: false
24
+ reset_logging: false
25
+ suppress_crashes: false
26
+ common_eval:
27
+ _name: null
28
+ path: null
29
+ quiet: false
30
+ model_overrides: '{}'
31
+ save_outputs: false
32
+ results_path: null
33
+ distributed_training:
34
+ _name: null
35
+ distributed_world_size: 4
36
+ distributed_rank: 0
37
+ distributed_backend: nccl
38
+ distributed_init_method: null
39
+ distributed_port: 12355
40
+ device_id: 0
41
+ ddp_comm_hook: none
42
+ bucket_cap_mb: 25
43
+ fix_batches_to_gpus: false
44
+ find_unused_parameters: false
45
+ heartbeat_timeout: -1
46
+ broadcast_buffers: false
47
+ fp16: ${common.fp16}
48
+ memory_efficient_fp16: ${common.memory_efficient_fp16}
49
+ dataset:
50
+ _name: null
51
+ num_workers: 10
52
+ skip_invalid_size_inputs_valid_test: false
53
+ max_tokens: null
54
+ batch_size: 64
55
+ required_batch_size_multiple: 8
56
+ data_buffer_size: 10
57
+ train_subset: train
58
+ valid_subset: valid
59
+ combine_valid_subsets: null
60
+ ignore_unused_valid_subsets: false
61
+ validate_interval: 1
62
+ validate_interval_updates: 0
63
+ validate_after_updates: 0
64
+ fixed_validation_seed: null
65
+ disable_validation: false
66
+ max_tokens_valid: ${dataset.max_tokens}
67
+ batch_size_valid: ${dataset.batch_size}
68
+ max_valid_steps: null
69
+ curriculum: 0
70
+ num_shards: 1
71
+ shard_id: 0
72
+ optimization:
73
+ _name: null
74
+ max_epoch: 200
75
+ max_update: 0
76
+ lr:
77
+ - 5.0e-05
78
+ stop_time_hours: 0.0
79
+ clip_norm: 0.0
80
+ update_freq:
81
+ - 2
82
+ stop_min_lr: -1.0
83
+ checkpoint:
84
+ _name: null
85
+ save_dir: <REDACTED>
86
+ restore_file: checkpoint_last.pt
87
+ finetune_from_model: null
88
+ reset_dataloader: false
89
+ reset_lr_scheduler: false
90
+ reset_meters: false
91
+ reset_optimizer: false
92
+ optimizer_overrides: '{}'
93
+ save_interval: 10
94
+ save_interval_updates: 0
95
+ keep_interval_updates: -1
96
+ keep_interval_updates_pattern: -1
97
+ keep_last_epochs: 0
98
+ keep_best_checkpoints: -1
99
+ no_save: false
100
+ no_epoch_checkpoints: false
101
+ no_last_checkpoints: false
102
+ no_save_optimizer_state: false
103
+ best_checkpoint_metric: loss
104
+ maximize_best_checkpoint_metric: false
105
+ patience: -1
106
+ checkpoint_suffix: ''
107
+ checkpoint_shard_count: 1
108
+ load_checkpoint_on_all_dp_ranks: false
109
+ model:
110
+ _name: wav2vec2_cmsc
111
+ apply_mask: true
112
+ mask_prob: 0.65
113
+ encoder_layers: 24
114
+ encoder_embed_dim: 1024
115
+ encoder_ffn_embed_dim: 4096
116
+ encoder_attention_heads: 16
117
+ quantize_targets: true
118
+ final_dim: 256
119
+ dropout_input: 0.1
120
+ dropout_features: 0.1
121
+ feature_grad_mult: 0.1
122
+ in_d: 12
123
+ task:
124
+ _name: ecg_pretraining
125
+ data: <REDACTED>/cmsc
126
+ perturbation_mode:
127
+ - random_leads_masking
128
+ p:
129
+ - 1.0
130
+ mask_leads_selection: random
131
+ mask_leads_prob: 0.5
132
+ normalize: false
133
+ enable_padding: true
134
+ enable_padding_leads: false
135
+ leads_to_load: null
136
+ criterion:
137
+ _name: wav2vec2_with_cmsc
138
+ infonce: true
139
+ log_keys:
140
+ - prob_perplexity
141
+ - code_perplexity
142
+ - temp
143
+ loss_weights:
144
+ - 0.1
145
+ - 10
146
+ lr_scheduler:
147
+ _name: fixed
148
+ warmup_updates: 0
149
+ optimizer:
150
+ _name: adam
151
+ adam_betas: (0.9, 0.98)
152
+ adam_eps: 1.0e-06
153
+ weight_decay: 0.01
physionet_finetuned.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96ecf96d044e735f6477a022d36d5a00f714693ee5e563faf19e47e9d319f28e
3
+ size 1081741532
physionet_finetuned.yaml ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _name: null
2
+ common:
3
+ _name: null
4
+ no_progress_bar: false
5
+ log_interval: 10
6
+ log_format: csv
7
+ log_file: null
8
+ wandb_project: null
9
+ wandb_entity: null
10
+ seed: 1
11
+ fp16: false
12
+ memory_efficient_fp16: false
13
+ fp16_no_flatten_grads: false
14
+ fp16_init_scale: 128
15
+ fp16_scale_window: null
16
+ fp16_scale_tolerance: 0.0
17
+ on_cpu_convert_precision: false
18
+ min_loss_scale: 0.0001
19
+ threshold_loss_scale: null
20
+ empty_cache_freq: 0
21
+ all_gather_list_size: 2048000
22
+ model_parallel_size: 1
23
+ profile: false
24
+ reset_logging: false
25
+ suppress_crashes: false
26
+ common_eval:
27
+ _name: null
28
+ path: null
29
+ quiet: false
30
+ model_overrides: '{}'
31
+ save_outputs: false
32
+ results_path: null
33
+ distributed_training:
34
+ _name: null
35
+ distributed_world_size: 4
36
+ distributed_rank: 0
37
+ distributed_backend: nccl
38
+ distributed_init_method: null
39
+ distributed_port: 12355
40
+ device_id: 0
41
+ ddp_comm_hook: none
42
+ bucket_cap_mb: 25
43
+ fix_batches_to_gpus: false
44
+ find_unused_parameters: true
45
+ heartbeat_timeout: -1
46
+ broadcast_buffers: false
47
+ fp16: ${common.fp16}
48
+ memory_efficient_fp16: ${common.memory_efficient_fp16}
49
+ dataset:
50
+ _name: null
51
+ num_workers: 3
52
+ skip_invalid_size_inputs_valid_test: false
53
+ max_tokens: null
54
+ batch_size: 64
55
+ required_batch_size_multiple: 8
56
+ data_buffer_size: 10
57
+ train_subset: train
58
+ valid_subset: valid
59
+ combine_valid_subsets: null
60
+ ignore_unused_valid_subsets: false
61
+ validate_interval: 1
62
+ validate_interval_updates: 0
63
+ validate_after_updates: 0
64
+ fixed_validation_seed: null
65
+ disable_validation: false
66
+ max_tokens_valid: ${dataset.max_tokens}
67
+ batch_size_valid: ${dataset.batch_size}
68
+ max_valid_steps: null
69
+ curriculum: 0
70
+ num_shards: 1
71
+ shard_id: 0
72
+ optimization:
73
+ _name: null
74
+ max_epoch: 50
75
+ max_update: 320000
76
+ lr:
77
+ - 1.0e-06
78
+ stop_time_hours: 0.0
79
+ clip_norm: 0.0
80
+ update_freq:
81
+ - 1
82
+ stop_min_lr: -1.0
83
+ checkpoint:
84
+ _name: null
85
+ save_dir: <REDACTED>
86
+ restore_file: checkpoint_last.pt
87
+ finetune_from_model: null
88
+ reset_dataloader: false
89
+ reset_lr_scheduler: false
90
+ reset_meters: false
91
+ reset_optimizer: false
92
+ optimizer_overrides: '{}'
93
+ save_interval: 1
94
+ save_interval_updates: 0
95
+ keep_interval_updates: -1
96
+ keep_interval_updates_pattern: -1
97
+ keep_last_epochs: 0
98
+ keep_best_checkpoints: -1
99
+ no_save: false
100
+ no_epoch_checkpoints: false
101
+ no_last_checkpoints: false
102
+ no_save_optimizer_state: false
103
+ best_checkpoint_metric: loss
104
+ maximize_best_checkpoint_metric: false
105
+ patience: -1
106
+ checkpoint_suffix: ''
107
+ checkpoint_shard_count: 1
108
+ load_checkpoint_on_all_dp_ranks: false
109
+ model:
110
+ _name: ecg_transformer_classifier
111
+ model_path: <REDACTED>
112
+ num_labels: 26
113
+ no_pretrained_weights: false
114
+ dropout: 0.0
115
+ attention_dropout: 0.0
116
+ activation_dropout: 0.1
117
+ feature_grad_mult: 0.0
118
+ freeze_finetune_updates: 0
119
+ in_d: 12
120
+ task:
121
+ _name: ecg_classification
122
+ data: <REDACTED>
123
+ normalize: false
124
+ enable_padding: true
125
+ enable_padding_leads: false
126
+ leads_to_load: null
127
+ label_file: <REDACTED>
128
+ criterion:
129
+ _name: binary_cross_entropy_with_logits
130
+ report_auc: true
131
+ report_cinc_score: false
132
+ weights_file: ???
133
+ pos_weight:
134
+ - 15.390331
135
+ - 9.366471
136
+ - 178.694517
137
+ - 307.623318
138
+ - 58.330172
139
+ - 16.724182
140
+ - 23.509615
141
+ - 45.128016
142
+ - 10.223581
143
+ - 38.081772
144
+ - 223.179153
145
+ - 53.063629
146
+ - 43.894325
147
+ - 48.763557
148
+ - 1.994518
149
+ - 25.936595
150
+ - 58.587013
151
+ - 143.283019
152
+ - 44.729568
153
+ - 40.310324
154
+ - 68.729483
155
+ - 23.999274
156
+ - 3.564768
157
+ - 8.647183
158
+ - 6.384442
159
+ - 20.731291
160
+ lr_scheduler:
161
+ _name: fixed
162
+ warmup_updates: 0
163
+ optimizer:
164
+ _name: adam
165
+ adam_betas: (0.9, 0.98)
166
+ adam_eps: 1.0e-08