Mohamed Boghdady commited on
Commit
070cb8d
1 Parent(s): dcf45cb

Training in progress, step 500

Browse files
config.json CHANGED
@@ -1,6 +1,5 @@
1
  {
2
- "_name_or_path": "Helsinki-NLP/opus-mt-ar-en",
3
- "_num_labels": 3,
4
  "activation_dropout": 0.0,
5
  "activation_function": "swish",
6
  "add_bias_logits": false,
@@ -11,7 +10,7 @@
11
  "attention_dropout": 0.0,
12
  "bad_words_ids": [
13
  [
14
- 62833
15
  ]
16
  ],
17
  "bos_token_id": 0,
@@ -22,14 +21,15 @@
22
  "decoder_ffn_dim": 2048,
23
  "decoder_layerdrop": 0.0,
24
  "decoder_layers": 6,
25
- "decoder_start_token_id": 62833,
26
- "decoder_vocab_size": 62834,
27
  "dropout": 0.1,
28
  "encoder_attention_heads": 8,
29
  "encoder_ffn_dim": 2048,
30
  "encoder_layerdrop": 0.0,
31
  "encoder_layers": 6,
32
  "eos_token_id": 0,
 
33
  "forced_eos_token_id": 0,
34
  "id2label": {
35
  "0": "LABEL_0",
@@ -50,12 +50,12 @@
50
  "normalize_embedding": false,
51
  "num_beams": 4,
52
  "num_hidden_layers": 6,
53
- "pad_token_id": 62833,
54
  "scale_embedding": true,
55
  "share_encoder_decoder_embeddings": true,
56
  "static_position_embeddings": true,
57
  "torch_dtype": "float32",
58
  "transformers_version": "4.42.3",
59
  "use_cache": true,
60
- "vocab_size": 62834
61
  }
 
1
  {
2
+ "_name_or_path": "Helsinki-NLP/opus-mt-en-ar",
 
3
  "activation_dropout": 0.0,
4
  "activation_function": "swish",
5
  "add_bias_logits": false,
 
10
  "attention_dropout": 0.0,
11
  "bad_words_ids": [
12
  [
13
+ 62801
14
  ]
15
  ],
16
  "bos_token_id": 0,
 
21
  "decoder_ffn_dim": 2048,
22
  "decoder_layerdrop": 0.0,
23
  "decoder_layers": 6,
24
+ "decoder_start_token_id": 62801,
25
+ "decoder_vocab_size": 62802,
26
  "dropout": 0.1,
27
  "encoder_attention_heads": 8,
28
  "encoder_ffn_dim": 2048,
29
  "encoder_layerdrop": 0.0,
30
  "encoder_layers": 6,
31
  "eos_token_id": 0,
32
+ "extra_pos_embeddings": 62802,
33
  "forced_eos_token_id": 0,
34
  "id2label": {
35
  "0": "LABEL_0",
 
50
  "normalize_embedding": false,
51
  "num_beams": 4,
52
  "num_hidden_layers": 6,
53
+ "pad_token_id": 62801,
54
  "scale_embedding": true,
55
  "share_encoder_decoder_embeddings": true,
56
  "static_position_embeddings": true,
57
  "torch_dtype": "float32",
58
  "transformers_version": "4.42.3",
59
  "use_cache": true,
60
+ "vocab_size": 62802
61
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:645f68eaf299a7884e21ebc86e9967c04f01119f195e983fa0eccc5bbdd71234
3
- size 305518408
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4218523dab2b175c7d1d4497ba3c92b3286ab7d3adcf1e3d0455197e13aa3b8a
3
+ size 305452744
runs/Jul19_15-02-56_8df4908137f9/events.out.tfevents.1721401382.8df4908137f9.35.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b86f69a9d3b740a21bc7accc8e044fb1032d0887455b57362e0162bf58cc257c
3
+ size 5736
runs/Jul19_15-13-23_8df4908137f9/events.out.tfevents.1721402005.8df4908137f9.35.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:831719608f96bdfd1a2ade697615b459616ed5ff6b5dda6759efe01b5e1c56b1
3
+ size 6317
source.spm CHANGED
Binary files a/source.spm and b/source.spm differ
 
target.spm CHANGED
Binary files a/target.spm and b/target.spm differ
 
tokenizer_config.json CHANGED
@@ -16,7 +16,7 @@
16
  "single_word": false,
17
  "special": true
18
  },
19
- "62833": {
20
  "content": "<pad>",
21
  "lstrip": false,
22
  "normalized": false,
@@ -30,9 +30,9 @@
30
  "model_max_length": 512,
31
  "pad_token": "<pad>",
32
  "separate_vocabs": false,
33
- "source_lang": "ar",
34
  "sp_model_kwargs": {},
35
- "target_lang": "en",
36
  "tokenizer_class": "MarianTokenizer",
37
  "unk_token": "<unk>"
38
  }
 
16
  "single_word": false,
17
  "special": true
18
  },
19
+ "62801": {
20
  "content": "<pad>",
21
  "lstrip": false,
22
  "normalized": false,
 
30
  "model_max_length": 512,
31
  "pad_token": "<pad>",
32
  "separate_vocabs": false,
33
+ "source_lang": "eng",
34
  "sp_model_kwargs": {},
35
+ "target_lang": "ara",
36
  "tokenizer_class": "MarianTokenizer",
37
  "unk_token": "<unk>"
38
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fa5e63d3cdd970284732710fdc228bc08c46ecb0200ba0cdf93ac2282e273cae
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c995dcf9c25e795bcde0911be7ccea8d5aa3b32de3b0cc2435226569bce9af31
3
  size 5304
vocab.json CHANGED
The diff for this file is too large to render. See raw diff
 
wandb/debug-internal.log CHANGED
The diff for this file is too large to render. See raw diff
 
wandb/debug.log CHANGED
@@ -1,79 +1,51 @@
1
- 2024-07-19 09:05:32,414 INFO MainThread:35 [wandb_setup.py:_flush():76] Current SDK version is 0.17.4
2
- 2024-07-19 09:05:32,414 INFO MainThread:35 [wandb_setup.py:_flush():76] Configure stats pid to 35
3
- 2024-07-19 09:05:32,414 INFO MainThread:35 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
- 2024-07-19 09:05:32,414 INFO MainThread:35 [wandb_setup.py:_flush():76] Loading settings from /kaggle/working/wandb/settings
5
- 2024-07-19 09:05:32,414 INFO MainThread:35 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
- 2024-07-19 09:05:32,414 INFO MainThread:35 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
- 2024-07-19 09:05:32,414 INFO MainThread:35 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program': '<python with no main file>'}
8
- 2024-07-19 09:05:32,414 INFO MainThread:35 [wandb_setup.py:_flush():76] Applying login settings: {}
9
- 2024-07-19 09:05:32,414 INFO MainThread:35 [wandb_setup.py:_flush():76] Applying login settings: {}
10
- 2024-07-19 09:05:32,414 INFO MainThread:35 [wandb_setup.py:_flush():76] Applying login settings: {'api_key': '***REDACTED***'}
11
- 2024-07-19 09:05:32,414 ERROR MainThread:35 [wandb_setup.py:_flush():78] error in wandb.init()
12
- Traceback (most recent call last):
13
- File "/opt/conda/lib/python3.10/site-packages/IPython/core/interactiveshell.py", line 3553, in run_code
14
- exec(code_obj, self.user_global_ns, self.user_ns)
15
- File "/tmp/ipykernel_35/4032920361.py", line 1, in <module>
16
- trainer.train()
17
- File "/opt/conda/lib/python3.10/site-packages/transformers/trainer.py", line 1923, in train
18
- return inner_training_loop(
19
- File "/opt/conda/lib/python3.10/site-packages/transformers/trainer.py", line 2196, in _inner_training_loop
20
- self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
21
- File "/opt/conda/lib/python3.10/site-packages/transformers/trainer_callback.py", line 461, in on_train_begin
22
- return self.call_event("on_train_begin", args, state, control)
23
- File "/opt/conda/lib/python3.10/site-packages/transformers/trainer_callback.py", line 508, in call_event
24
- result = getattr(callback, event)(
25
- File "/opt/conda/lib/python3.10/site-packages/transformers/integrations/integration_utils.py", line 842, in on_train_begin
26
- self.setup(args, state, model, **kwargs)
27
- File "/opt/conda/lib/python3.10/site-packages/transformers/integrations/integration_utils.py", line 776, in setup
28
- self._wandb.init(
29
- File "/opt/conda/lib/python3.10/site-packages/wandb/sdk/wandb_init.py", line 1195, in init
30
- wandb._sentry.reraise(e)
31
- File "/opt/conda/lib/python3.10/site-packages/wandb/analytics/sentry.py", line 155, in reraise
32
- raise exc.with_traceback(sys.exc_info()[2])
33
- File "/opt/conda/lib/python3.10/site-packages/wandb/sdk/wandb_init.py", line 1180, in init
34
- wi.setup(kwargs)
35
- File "/opt/conda/lib/python3.10/site-packages/wandb/sdk/wandb_init.py", line 300, in setup
36
- wandb_login._login(
37
- File "/opt/conda/lib/python3.10/site-packages/wandb/sdk/wandb_login.py", line 346, in _login
38
- wlogin.prompt_api_key()
39
- File "/opt/conda/lib/python3.10/site-packages/wandb/sdk/wandb_login.py", line 273, in prompt_api_key
40
- key, status = self._prompt_api_key()
41
- File "/opt/conda/lib/python3.10/site-packages/wandb/sdk/wandb_login.py", line 252, in _prompt_api_key
42
- key = apikey.prompt_api_key(
43
- File "/opt/conda/lib/python3.10/site-packages/wandb/sdk/lib/apikey.py", line 164, in prompt_api_key
44
- key = input_callback(api_ask).strip()
45
- File "/opt/conda/lib/python3.10/site-packages/click/termui.py", line 164, in prompt
46
- value = prompt_func(prompt)
47
- File "/opt/conda/lib/python3.10/site-packages/click/termui.py", line 147, in prompt_func
48
- raise Abort() from None
49
- click.exceptions.Abort
50
- 2024-07-19 09:05:32,415 INFO MainThread:35 [wandb_init.py:_log_setup():529] Logging user logs to /kaggle/working/wandb/run-20240719_090532-oy10h8oj/logs/debug.log
51
- 2024-07-19 09:05:32,415 INFO MainThread:35 [wandb_init.py:_log_setup():530] Logging internal logs to /kaggle/working/wandb/run-20240719_090532-oy10h8oj/logs/debug-internal.log
52
- 2024-07-19 09:05:32,415 INFO MainThread:35 [wandb_init.py:_jupyter_setup():475] configuring jupyter hooks <wandb.sdk.wandb_init._WandbInit object at 0x7a8b8b2dee00>
53
- 2024-07-19 09:05:32,415 INFO MainThread:35 [wandb_init.py:init():569] calling init triggers
54
- 2024-07-19 09:05:32,415 INFO MainThread:35 [wandb_init.py:init():576] wandb.init called with sweep_config: {}
55
  config: {}
56
- 2024-07-19 09:05:32,415 INFO MainThread:35 [wandb_init.py:init():619] starting backend
57
- 2024-07-19 09:05:32,415 INFO MainThread:35 [wandb_init.py:init():623] setting up manager
58
- 2024-07-19 09:05:32,417 INFO MainThread:35 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
59
- 2024-07-19 09:05:32,419 INFO MainThread:35 [wandb_init.py:init():631] backend started and connected
60
- 2024-07-19 09:05:32,431 INFO MainThread:35 [wandb_run.py:_label_probe_notebook():1334] probe notebook
61
- 2024-07-19 09:05:32,782 INFO MainThread:35 [wandb_init.py:init():720] updated telemetry
62
- 2024-07-19 09:05:32,786 INFO MainThread:35 [wandb_init.py:init():753] communicating run to backend with 90.0 second timeout
63
- 2024-07-19 09:05:33,254 INFO MainThread:35 [wandb_run.py:_on_init():2402] communicating current version
64
- 2024-07-19 09:05:33,284 INFO MainThread:35 [wandb_run.py:_on_init():2411] got version response
65
- 2024-07-19 09:05:33,285 INFO MainThread:35 [wandb_init.py:init():804] starting run threads in backend
66
- 2024-07-19 09:05:49,371 INFO MainThread:35 [wandb_run.py:_console_start():2380] atexit reg
67
- 2024-07-19 09:05:49,371 INFO MainThread:35 [wandb_run.py:_redirect():2235] redirect: wrap_raw
68
- 2024-07-19 09:05:49,371 INFO MainThread:35 [wandb_run.py:_redirect():2300] Wrapping output streams.
69
- 2024-07-19 09:05:49,371 INFO MainThread:35 [wandb_run.py:_redirect():2325] Redirects installed.
70
- 2024-07-19 09:05:49,377 INFO MainThread:35 [wandb_init.py:init():847] run started, returning control to user process
71
- 2024-07-19 09:05:49,383 INFO MainThread:35 [wandb_run.py:_config_callback():1382] config_cb None None {'vocab_size': 62834, 'decoder_vocab_size': 62834, 'max_position_embeddings': 512, 'd_model': 512, 'encoder_ffn_dim': 2048, 'encoder_layers': 6, 'encoder_attention_heads': 8, 'decoder_ffn_dim': 2048, 'decoder_layers': 6, 'decoder_attention_heads': 8, 'dropout': 0.1, 'attention_dropout': 0.0, 'activation_dropout': 0.0, 'activation_function': 'swish', 'init_std': 0.02, 'encoder_layerdrop': 0.0, 'decoder_layerdrop': 0.0, 'use_cache': True, 'num_hidden_layers': 6, 'scale_embedding': True, 'share_encoder_decoder_embeddings': True, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': True, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 512, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 4, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': [[62833]], 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': 0, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['MarianMTModel'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1', 2: 'LABEL_2'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 0, 'pad_token_id': 62833, 'eos_token_id': 0, 'sep_token_id': None, 'decoder_start_token_id': 62833, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'Helsinki-NLP/opus-mt-ar-en', 'transformers_version': '4.42.3', '_num_labels': 3, 'add_bias_logits': False, 'add_final_layer_norm': False, 'classif_dropout': 0.0, 'classifier_dropout': 0.0, 'model_type': 'marian', 'normalize_before': False, 'normalize_embedding': False, 'static_position_embeddings': True, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'eval_strategy': 'epoch', 'prediction_loss_only': False, 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 2e-05, 'weight_decay': 0.01, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 10, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/Jul19_09-05-25_cfc182b336d9', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 3, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'restore_callback_states_from_checkpoint': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'eval_do_concat_batches': True, 'fp16_backend': 'auto', 'evaluation_strategy': None, 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None, 'batch_eval_metrics': False, 'eval_on_start': False, 'sortish_sampler': False, 'predict_with_generate': True, 'generation_max_length': None, 'generation_num_beams': None, 'generation_config': None}
72
- 2024-07-19 09:05:49,391 INFO MainThread:35 [wandb_config.py:__setitem__():151] config set model/num_parameters = 76833792 - <bound method Run._config_callback of <wandb.sdk.wandb_run.Run object at 0x7a8b8a3a8040>>
73
- 2024-07-19 09:05:49,391 INFO MainThread:35 [wandb_run.py:_config_callback():1382] config_cb model/num_parameters 76833792 None
74
- 2024-07-19 10:06:42,046 INFO MainThread:35 [jupyter.py:save_ipynb():372] not saving jupyter notebook
75
- 2024-07-19 10:06:42,047 INFO MainThread:35 [wandb_init.py:_pause_backend():440] pausing backend
76
- 2024-07-19 10:06:42,064 INFO MainThread:35 [wandb_init.py:_resume_backend():445] resuming backend
77
- 2024-07-19 10:06:48,324 INFO MainThread:35 [jupyter.py:save_ipynb():372] not saving jupyter notebook
78
- 2024-07-19 10:06:48,324 INFO MainThread:35 [wandb_init.py:_pause_backend():440] pausing backend
79
- 2024-07-19 10:06:48,330 INFO MainThread:35 [wandb_init.py:_resume_backend():445] resuming backend
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Current SDK version is 0.17.4
2
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Configure stats pid to 35
3
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Loading settings from /kaggle/working/wandb/settings
5
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program': '<python with no main file>'}
8
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Applying login settings: {}
9
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Applying login settings: {'api_key': '***REDACTED***'}
10
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_init.py:_log_setup():529] Logging user logs to /kaggle/working/wandb/run-20240719_150308-wq61zns9/logs/debug.log
11
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_init.py:_log_setup():530] Logging internal logs to /kaggle/working/wandb/run-20240719_150308-wq61zns9/logs/debug-internal.log
12
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_init.py:_jupyter_setup():475] configuring jupyter hooks <wandb.sdk.wandb_init._WandbInit object at 0x7ac420802680>
13
+ 2024-07-19 15:03:08,534 INFO MainThread:35 [wandb_init.py:init():569] calling init triggers
14
+ 2024-07-19 15:03:08,534 INFO MainThread:35 [wandb_init.py:init():576] wandb.init called with sweep_config: {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  config: {}
16
+ 2024-07-19 15:03:08,534 INFO MainThread:35 [wandb_init.py:init():619] starting backend
17
+ 2024-07-19 15:03:08,534 INFO MainThread:35 [wandb_init.py:init():623] setting up manager
18
+ 2024-07-19 15:03:08,535 INFO MainThread:35 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
19
+ 2024-07-19 15:03:08,538 INFO MainThread:35 [wandb_init.py:init():631] backend started and connected
20
+ 2024-07-19 15:03:08,550 INFO MainThread:35 [wandb_run.py:_label_probe_notebook():1334] probe notebook
21
+ 2024-07-19 15:03:09,072 INFO MainThread:35 [wandb_init.py:init():720] updated telemetry
22
+ 2024-07-19 15:03:09,075 INFO MainThread:35 [wandb_init.py:init():753] communicating run to backend with 90.0 second timeout
23
+ 2024-07-19 15:03:09,407 INFO MainThread:35 [wandb_run.py:_on_init():2402] communicating current version
24
+ 2024-07-19 15:03:09,445 INFO MainThread:35 [wandb_run.py:_on_init():2411] got version response
25
+ 2024-07-19 15:03:09,445 INFO MainThread:35 [wandb_init.py:init():804] starting run threads in backend
26
+ 2024-07-19 15:03:25,491 INFO MainThread:35 [wandb_run.py:_console_start():2380] atexit reg
27
+ 2024-07-19 15:03:25,491 INFO MainThread:35 [wandb_run.py:_redirect():2235] redirect: wrap_raw
28
+ 2024-07-19 15:03:25,491 INFO MainThread:35 [wandb_run.py:_redirect():2300] Wrapping output streams.
29
+ 2024-07-19 15:03:25,492 INFO MainThread:35 [wandb_run.py:_redirect():2325] Redirects installed.
30
+ 2024-07-19 15:03:25,496 INFO MainThread:35 [wandb_init.py:init():847] run started, returning control to user process
31
+ 2024-07-19 15:03:25,502 INFO MainThread:35 [wandb_run.py:_config_callback():1382] config_cb None None {'vocab_size': 62802, 'decoder_vocab_size': 62802, 'max_position_embeddings': 512, 'd_model': 512, 'encoder_ffn_dim': 2048, 'encoder_layers': 6, 'encoder_attention_heads': 8, 'decoder_ffn_dim': 2048, 'decoder_layers': 6, 'decoder_attention_heads': 8, 'dropout': 0.1, 'attention_dropout': 0.0, 'activation_dropout': 0.0, 'activation_function': 'swish', 'init_std': 0.02, 'encoder_layerdrop': 0.0, 'decoder_layerdrop': 0.0, 'use_cache': True, 'num_hidden_layers': 6, 'scale_embedding': True, 'share_encoder_decoder_embeddings': True, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': True, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 512, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 4, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': [[62801]], 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': 0, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['MarianMTModel'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1', 2: 'LABEL_2'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 0, 'pad_token_id': 62801, 'eos_token_id': 0, 'sep_token_id': None, 'decoder_start_token_id': 62801, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'Helsinki-NLP/opus-mt-en-ar', 'transformers_version': '4.42.3', 'add_bias_logits': False, 'add_final_layer_norm': False, 'classif_dropout': 0.0, 'classifier_dropout': 0.0, 'extra_pos_embeddings': 62802, 'model_type': 'marian', 'normalize_before': False, 'normalize_embedding': False, 'static_position_embeddings': True, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'eval_strategy': 'epoch', 'prediction_loss_only': False, 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 2e-05, 'weight_decay': 0.01, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 10, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/Jul19_15-02-56_8df4908137f9', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 3, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'restore_callback_states_from_checkpoint': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'eval_do_concat_batches': True, 'fp16_backend': 'auto', 'evaluation_strategy': None, 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None, 'batch_eval_metrics': False, 'eval_on_start': False, 'sortish_sampler': False, 'predict_with_generate': True, 'generation_max_length': None, 'generation_num_beams': None, 'generation_config': None}
32
+ 2024-07-19 15:03:25,508 INFO MainThread:35 [wandb_config.py:__setitem__():151] config set model/num_parameters = 76817408 - <bound method Run._config_callback of <wandb.sdk.wandb_run.Run object at 0x7ac41ae98400>>
33
+ 2024-07-19 15:03:25,508 INFO MainThread:35 [wandb_run.py:_config_callback():1382] config_cb model/num_parameters 76817408 None
34
+ 2024-07-19 15:12:08,909 INFO MainThread:35 [jupyter.py:save_ipynb():372] not saving jupyter notebook
35
+ 2024-07-19 15:12:08,909 INFO MainThread:35 [wandb_init.py:_pause_backend():440] pausing backend
36
+ 2024-07-19 15:13:08,410 INFO MainThread:35 [wandb_init.py:_resume_backend():445] resuming backend
37
+ 2024-07-19 15:13:09,248 INFO MainThread:35 [jupyter.py:save_ipynb():372] not saving jupyter notebook
38
+ 2024-07-19 15:13:09,248 INFO MainThread:35 [wandb_init.py:_pause_backend():440] pausing backend
39
+ 2024-07-19 15:13:22,988 INFO MainThread:35 [wandb_init.py:_resume_backend():445] resuming backend
40
+ 2024-07-19 15:13:22,990 INFO MainThread:35 [jupyter.py:save_ipynb():372] not saving jupyter notebook
41
+ 2024-07-19 15:13:22,990 INFO MainThread:35 [wandb_init.py:_pause_backend():440] pausing backend
42
+ 2024-07-19 15:13:23,619 INFO MainThread:35 [wandb_init.py:_resume_backend():445] resuming backend
43
+ 2024-07-19 15:13:23,649 INFO MainThread:35 [jupyter.py:save_ipynb():372] not saving jupyter notebook
44
+ 2024-07-19 15:13:23,649 INFO MainThread:35 [wandb_init.py:_pause_backend():440] pausing backend
45
+ 2024-07-19 15:13:24,165 INFO MainThread:35 [wandb_init.py:_resume_backend():445] resuming backend
46
+ 2024-07-19 15:13:24,311 INFO MainThread:35 [jupyter.py:save_ipynb():372] not saving jupyter notebook
47
+ 2024-07-19 15:13:24,311 INFO MainThread:35 [wandb_init.py:_pause_backend():440] pausing backend
48
+ 2024-07-19 15:13:24,660 INFO MainThread:35 [wandb_init.py:_resume_backend():445] resuming backend
49
+ 2024-07-19 15:13:25,433 INFO MainThread:35 [wandb_run.py:_config_callback():1382] config_cb None None {'vocab_size': 62802, 'decoder_vocab_size': 62802, 'max_position_embeddings': 512, 'd_model': 512, 'encoder_ffn_dim': 2048, 'encoder_layers': 6, 'encoder_attention_heads': 8, 'decoder_ffn_dim': 2048, 'decoder_layers': 6, 'decoder_attention_heads': 8, 'dropout': 0.1, 'attention_dropout': 0.0, 'activation_dropout': 0.0, 'activation_function': 'swish', 'init_std': 0.02, 'encoder_layerdrop': 0.0, 'decoder_layerdrop': 0.0, 'use_cache': True, 'num_hidden_layers': 6, 'scale_embedding': True, 'share_encoder_decoder_embeddings': True, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': True, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 512, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 4, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': [[62801]], 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': 0, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['MarianMTModel'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1', 2: 'LABEL_2'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 0, 'pad_token_id': 62801, 'eos_token_id': 0, 'sep_token_id': None, 'decoder_start_token_id': 62801, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'Helsinki-NLP/opus-mt-en-ar', 'transformers_version': '4.42.3', 'add_bias_logits': False, 'add_final_layer_norm': False, 'classif_dropout': 0.0, 'classifier_dropout': 0.0, 'extra_pos_embeddings': 62802, 'model_type': 'marian', 'normalize_before': False, 'normalize_embedding': False, 'static_position_embeddings': True, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'eval_strategy': 'epoch', 'prediction_loss_only': False, 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 2e-05, 'weight_decay': 0.01, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 10, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/Jul19_15-13-23_8df4908137f9', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 3, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'restore_callback_states_from_checkpoint': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'eval_do_concat_batches': True, 'fp16_backend': 'auto', 'evaluation_strategy': None, 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None, 'batch_eval_metrics': False, 'eval_on_start': False, 'sortish_sampler': False, 'predict_with_generate': True, 'generation_max_length': None, 'generation_num_beams': None, 'generation_config': None}
50
+ 2024-07-19 15:13:25,439 INFO MainThread:35 [wandb_config.py:__setitem__():151] config set model/num_parameters = 76817408 - <bound method Run._config_callback of <wandb.sdk.wandb_run.Run object at 0x7ac41ae98400>>
51
+ 2024-07-19 15:13:25,439 INFO MainThread:35 [wandb_run.py:_config_callback():1382] config_cb model/num_parameters 76817408 None
wandb/run-20240719_150308-wq61zns9/files/conda-environment.yaml ADDED
File without changes
wandb/run-20240719_150308-wq61zns9/files/config.yaml ADDED
@@ -0,0 +1,762 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.13
7
+ cli_version: 0.17.4
8
+ framework: huggingface
9
+ huggingface_version: 4.42.3
10
+ is_jupyter_run: true
11
+ is_kaggle_kernel: true
12
+ start_time: 1721401388
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 2
17
+ - 3
18
+ - 5
19
+ - 11
20
+ - 12
21
+ - 41
22
+ - 49
23
+ - 51
24
+ - 53
25
+ - 55
26
+ - 71
27
+ - 100
28
+ - 105
29
+ 2:
30
+ - 1
31
+ - 2
32
+ - 3
33
+ - 5
34
+ - 11
35
+ - 12
36
+ - 41
37
+ - 49
38
+ - 51
39
+ - 53
40
+ - 55
41
+ - 71
42
+ - 100
43
+ - 105
44
+ 3:
45
+ - 7
46
+ - 13
47
+ - 19
48
+ - 23
49
+ - 66
50
+ 4: 3.10.13
51
+ 5: 0.17.4
52
+ 6: 4.42.3
53
+ 8:
54
+ - 1
55
+ - 2
56
+ - 5
57
+ 9:
58
+ 1: transformers_trainer
59
+ 13: linux-x86_64
60
+ m:
61
+ - 1: train/global_step
62
+ 6:
63
+ - 3
64
+ - 1: eval/loss
65
+ 5: 1
66
+ 6:
67
+ - 1
68
+ - 1: eval/bleu
69
+ 5: 1
70
+ 6:
71
+ - 1
72
+ - 1: eval/gen_len
73
+ 5: 1
74
+ 6:
75
+ - 1
76
+ - 1: eval/runtime
77
+ 5: 1
78
+ 6:
79
+ - 1
80
+ - 1: eval/samples_per_second
81
+ 5: 1
82
+ 6:
83
+ - 1
84
+ - 1: eval/steps_per_second
85
+ 5: 1
86
+ 6:
87
+ - 1
88
+ - 1: train/epoch
89
+ 5: 1
90
+ 6:
91
+ - 1
92
+ vocab_size:
93
+ desc: null
94
+ value: 62802
95
+ decoder_vocab_size:
96
+ desc: null
97
+ value: 62802
98
+ max_position_embeddings:
99
+ desc: null
100
+ value: 512
101
+ d_model:
102
+ desc: null
103
+ value: 512
104
+ encoder_ffn_dim:
105
+ desc: null
106
+ value: 2048
107
+ encoder_layers:
108
+ desc: null
109
+ value: 6
110
+ encoder_attention_heads:
111
+ desc: null
112
+ value: 8
113
+ decoder_ffn_dim:
114
+ desc: null
115
+ value: 2048
116
+ decoder_layers:
117
+ desc: null
118
+ value: 6
119
+ decoder_attention_heads:
120
+ desc: null
121
+ value: 8
122
+ dropout:
123
+ desc: null
124
+ value: 0.1
125
+ attention_dropout:
126
+ desc: null
127
+ value: 0.0
128
+ activation_dropout:
129
+ desc: null
130
+ value: 0.0
131
+ activation_function:
132
+ desc: null
133
+ value: swish
134
+ init_std:
135
+ desc: null
136
+ value: 0.02
137
+ encoder_layerdrop:
138
+ desc: null
139
+ value: 0.0
140
+ decoder_layerdrop:
141
+ desc: null
142
+ value: 0.0
143
+ use_cache:
144
+ desc: null
145
+ value: true
146
+ num_hidden_layers:
147
+ desc: null
148
+ value: 6
149
+ scale_embedding:
150
+ desc: null
151
+ value: true
152
+ share_encoder_decoder_embeddings:
153
+ desc: null
154
+ value: true
155
+ return_dict:
156
+ desc: null
157
+ value: true
158
+ output_hidden_states:
159
+ desc: null
160
+ value: false
161
+ output_attentions:
162
+ desc: null
163
+ value: false
164
+ torchscript:
165
+ desc: null
166
+ value: false
167
+ torch_dtype:
168
+ desc: null
169
+ value: null
170
+ use_bfloat16:
171
+ desc: null
172
+ value: false
173
+ tf_legacy_loss:
174
+ desc: null
175
+ value: false
176
+ pruned_heads:
177
+ desc: null
178
+ value: {}
179
+ tie_word_embeddings:
180
+ desc: null
181
+ value: true
182
+ chunk_size_feed_forward:
183
+ desc: null
184
+ value: 0
185
+ is_encoder_decoder:
186
+ desc: null
187
+ value: true
188
+ is_decoder:
189
+ desc: null
190
+ value: false
191
+ cross_attention_hidden_size:
192
+ desc: null
193
+ value: null
194
+ add_cross_attention:
195
+ desc: null
196
+ value: false
197
+ tie_encoder_decoder:
198
+ desc: null
199
+ value: false
200
+ max_length:
201
+ desc: null
202
+ value: 512
203
+ min_length:
204
+ desc: null
205
+ value: 0
206
+ do_sample:
207
+ desc: null
208
+ value: false
209
+ early_stopping:
210
+ desc: null
211
+ value: false
212
+ num_beams:
213
+ desc: null
214
+ value: 4
215
+ num_beam_groups:
216
+ desc: null
217
+ value: 1
218
+ diversity_penalty:
219
+ desc: null
220
+ value: 0.0
221
+ temperature:
222
+ desc: null
223
+ value: 1.0
224
+ top_k:
225
+ desc: null
226
+ value: 50
227
+ top_p:
228
+ desc: null
229
+ value: 1.0
230
+ typical_p:
231
+ desc: null
232
+ value: 1.0
233
+ repetition_penalty:
234
+ desc: null
235
+ value: 1.0
236
+ length_penalty:
237
+ desc: null
238
+ value: 1.0
239
+ no_repeat_ngram_size:
240
+ desc: null
241
+ value: 0
242
+ encoder_no_repeat_ngram_size:
243
+ desc: null
244
+ value: 0
245
+ bad_words_ids:
246
+ desc: null
247
+ value:
248
+ - - 62801
249
+ num_return_sequences:
250
+ desc: null
251
+ value: 1
252
+ output_scores:
253
+ desc: null
254
+ value: false
255
+ return_dict_in_generate:
256
+ desc: null
257
+ value: false
258
+ forced_bos_token_id:
259
+ desc: null
260
+ value: null
261
+ forced_eos_token_id:
262
+ desc: null
263
+ value: 0
264
+ remove_invalid_values:
265
+ desc: null
266
+ value: false
267
+ exponential_decay_length_penalty:
268
+ desc: null
269
+ value: null
270
+ suppress_tokens:
271
+ desc: null
272
+ value: null
273
+ begin_suppress_tokens:
274
+ desc: null
275
+ value: null
276
+ architectures:
277
+ desc: null
278
+ value:
279
+ - MarianMTModel
280
+ finetuning_task:
281
+ desc: null
282
+ value: null
283
+ id2label:
284
+ desc: null
285
+ value:
286
+ '0': LABEL_0
287
+ '1': LABEL_1
288
+ '2': LABEL_2
289
+ label2id:
290
+ desc: null
291
+ value:
292
+ LABEL_0: 0
293
+ LABEL_1: 1
294
+ LABEL_2: 2
295
+ tokenizer_class:
296
+ desc: null
297
+ value: null
298
+ prefix:
299
+ desc: null
300
+ value: null
301
+ bos_token_id:
302
+ desc: null
303
+ value: 0
304
+ pad_token_id:
305
+ desc: null
306
+ value: 62801
307
+ eos_token_id:
308
+ desc: null
309
+ value: 0
310
+ sep_token_id:
311
+ desc: null
312
+ value: null
313
+ decoder_start_token_id:
314
+ desc: null
315
+ value: 62801
316
+ task_specific_params:
317
+ desc: null
318
+ value: null
319
+ problem_type:
320
+ desc: null
321
+ value: null
322
+ _name_or_path:
323
+ desc: null
324
+ value: Helsinki-NLP/opus-mt-en-ar
325
+ transformers_version:
326
+ desc: null
327
+ value: 4.42.3
328
+ add_bias_logits:
329
+ desc: null
330
+ value: false
331
+ add_final_layer_norm:
332
+ desc: null
333
+ value: false
334
+ classif_dropout:
335
+ desc: null
336
+ value: 0.0
337
+ classifier_dropout:
338
+ desc: null
339
+ value: 0.0
340
+ extra_pos_embeddings:
341
+ desc: null
342
+ value: 62802
343
+ model_type:
344
+ desc: null
345
+ value: marian
346
+ normalize_before:
347
+ desc: null
348
+ value: false
349
+ normalize_embedding:
350
+ desc: null
351
+ value: false
352
+ static_position_embeddings:
353
+ desc: null
354
+ value: true
355
+ output_dir:
356
+ desc: null
357
+ value: /kaggle/working/
358
+ overwrite_output_dir:
359
+ desc: null
360
+ value: false
361
+ do_train:
362
+ desc: null
363
+ value: false
364
+ do_eval:
365
+ desc: null
366
+ value: true
367
+ do_predict:
368
+ desc: null
369
+ value: false
370
+ eval_strategy:
371
+ desc: null
372
+ value: epoch
373
+ prediction_loss_only:
374
+ desc: null
375
+ value: false
376
+ per_device_train_batch_size:
377
+ desc: null
378
+ value: 16
379
+ per_device_eval_batch_size:
380
+ desc: null
381
+ value: 16
382
+ per_gpu_train_batch_size:
383
+ desc: null
384
+ value: null
385
+ per_gpu_eval_batch_size:
386
+ desc: null
387
+ value: null
388
+ gradient_accumulation_steps:
389
+ desc: null
390
+ value: 1
391
+ eval_accumulation_steps:
392
+ desc: null
393
+ value: null
394
+ eval_delay:
395
+ desc: null
396
+ value: 0
397
+ learning_rate:
398
+ desc: null
399
+ value: 2.0e-05
400
+ weight_decay:
401
+ desc: null
402
+ value: 0.01
403
+ adam_beta1:
404
+ desc: null
405
+ value: 0.9
406
+ adam_beta2:
407
+ desc: null
408
+ value: 0.999
409
+ adam_epsilon:
410
+ desc: null
411
+ value: 1.0e-08
412
+ max_grad_norm:
413
+ desc: null
414
+ value: 1.0
415
+ num_train_epochs:
416
+ desc: null
417
+ value: 10
418
+ max_steps:
419
+ desc: null
420
+ value: -1
421
+ lr_scheduler_type:
422
+ desc: null
423
+ value: linear
424
+ lr_scheduler_kwargs:
425
+ desc: null
426
+ value: {}
427
+ warmup_ratio:
428
+ desc: null
429
+ value: 0.0
430
+ warmup_steps:
431
+ desc: null
432
+ value: 0
433
+ log_level:
434
+ desc: null
435
+ value: passive
436
+ log_level_replica:
437
+ desc: null
438
+ value: warning
439
+ log_on_each_node:
440
+ desc: null
441
+ value: true
442
+ logging_dir:
443
+ desc: null
444
+ value: /kaggle/working/runs/Jul19_15-13-23_8df4908137f9
445
+ logging_strategy:
446
+ desc: null
447
+ value: steps
448
+ logging_first_step:
449
+ desc: null
450
+ value: false
451
+ logging_steps:
452
+ desc: null
453
+ value: 500
454
+ logging_nan_inf_filter:
455
+ desc: null
456
+ value: true
457
+ save_strategy:
458
+ desc: null
459
+ value: steps
460
+ save_steps:
461
+ desc: null
462
+ value: 500
463
+ save_total_limit:
464
+ desc: null
465
+ value: 3
466
+ save_safetensors:
467
+ desc: null
468
+ value: true
469
+ save_on_each_node:
470
+ desc: null
471
+ value: false
472
+ save_only_model:
473
+ desc: null
474
+ value: false
475
+ restore_callback_states_from_checkpoint:
476
+ desc: null
477
+ value: false
478
+ no_cuda:
479
+ desc: null
480
+ value: false
481
+ use_cpu:
482
+ desc: null
483
+ value: false
484
+ use_mps_device:
485
+ desc: null
486
+ value: false
487
+ seed:
488
+ desc: null
489
+ value: 42
490
+ data_seed:
491
+ desc: null
492
+ value: null
493
+ jit_mode_eval:
494
+ desc: null
495
+ value: false
496
+ use_ipex:
497
+ desc: null
498
+ value: false
499
+ bf16:
500
+ desc: null
501
+ value: false
502
+ fp16:
503
+ desc: null
504
+ value: true
505
+ fp16_opt_level:
506
+ desc: null
507
+ value: O1
508
+ half_precision_backend:
509
+ desc: null
510
+ value: auto
511
+ bf16_full_eval:
512
+ desc: null
513
+ value: false
514
+ fp16_full_eval:
515
+ desc: null
516
+ value: false
517
+ tf32:
518
+ desc: null
519
+ value: null
520
+ local_rank:
521
+ desc: null
522
+ value: 0
523
+ ddp_backend:
524
+ desc: null
525
+ value: null
526
+ tpu_num_cores:
527
+ desc: null
528
+ value: null
529
+ tpu_metrics_debug:
530
+ desc: null
531
+ value: false
532
+ debug:
533
+ desc: null
534
+ value: []
535
+ dataloader_drop_last:
536
+ desc: null
537
+ value: false
538
+ eval_steps:
539
+ desc: null
540
+ value: null
541
+ dataloader_num_workers:
542
+ desc: null
543
+ value: 0
544
+ dataloader_prefetch_factor:
545
+ desc: null
546
+ value: null
547
+ past_index:
548
+ desc: null
549
+ value: -1
550
+ run_name:
551
+ desc: null
552
+ value: /kaggle/working/
553
+ disable_tqdm:
554
+ desc: null
555
+ value: false
556
+ remove_unused_columns:
557
+ desc: null
558
+ value: true
559
+ label_names:
560
+ desc: null
561
+ value: null
562
+ load_best_model_at_end:
563
+ desc: null
564
+ value: false
565
+ metric_for_best_model:
566
+ desc: null
567
+ value: null
568
+ greater_is_better:
569
+ desc: null
570
+ value: null
571
+ ignore_data_skip:
572
+ desc: null
573
+ value: false
574
+ fsdp:
575
+ desc: null
576
+ value: []
577
+ fsdp_min_num_params:
578
+ desc: null
579
+ value: 0
580
+ fsdp_config:
581
+ desc: null
582
+ value:
583
+ min_num_params: 0
584
+ xla: false
585
+ xla_fsdp_v2: false
586
+ xla_fsdp_grad_ckpt: false
587
+ fsdp_transformer_layer_cls_to_wrap:
588
+ desc: null
589
+ value: null
590
+ accelerator_config:
591
+ desc: null
592
+ value:
593
+ split_batches: false
594
+ dispatch_batches: null
595
+ even_batches: true
596
+ use_seedable_sampler: true
597
+ non_blocking: false
598
+ gradient_accumulation_kwargs: null
599
+ deepspeed:
600
+ desc: null
601
+ value: null
602
+ label_smoothing_factor:
603
+ desc: null
604
+ value: 0.0
605
+ optim:
606
+ desc: null
607
+ value: adamw_torch
608
+ optim_args:
609
+ desc: null
610
+ value: null
611
+ adafactor:
612
+ desc: null
613
+ value: false
614
+ group_by_length:
615
+ desc: null
616
+ value: false
617
+ length_column_name:
618
+ desc: null
619
+ value: length
620
+ report_to:
621
+ desc: null
622
+ value:
623
+ - tensorboard
624
+ - wandb
625
+ ddp_find_unused_parameters:
626
+ desc: null
627
+ value: null
628
+ ddp_bucket_cap_mb:
629
+ desc: null
630
+ value: null
631
+ ddp_broadcast_buffers:
632
+ desc: null
633
+ value: null
634
+ dataloader_pin_memory:
635
+ desc: null
636
+ value: true
637
+ dataloader_persistent_workers:
638
+ desc: null
639
+ value: false
640
+ skip_memory_metrics:
641
+ desc: null
642
+ value: true
643
+ use_legacy_prediction_loop:
644
+ desc: null
645
+ value: false
646
+ push_to_hub:
647
+ desc: null
648
+ value: true
649
+ resume_from_checkpoint:
650
+ desc: null
651
+ value: null
652
+ hub_model_id:
653
+ desc: null
654
+ value: null
655
+ hub_strategy:
656
+ desc: null
657
+ value: every_save
658
+ hub_token:
659
+ desc: null
660
+ value: <HUB_TOKEN>
661
+ hub_private_repo:
662
+ desc: null
663
+ value: false
664
+ hub_always_push:
665
+ desc: null
666
+ value: false
667
+ gradient_checkpointing:
668
+ desc: null
669
+ value: false
670
+ gradient_checkpointing_kwargs:
671
+ desc: null
672
+ value: null
673
+ include_inputs_for_metrics:
674
+ desc: null
675
+ value: false
676
+ eval_do_concat_batches:
677
+ desc: null
678
+ value: true
679
+ fp16_backend:
680
+ desc: null
681
+ value: auto
682
+ evaluation_strategy:
683
+ desc: null
684
+ value: null
685
+ push_to_hub_model_id:
686
+ desc: null
687
+ value: null
688
+ push_to_hub_organization:
689
+ desc: null
690
+ value: null
691
+ push_to_hub_token:
692
+ desc: null
693
+ value: <PUSH_TO_HUB_TOKEN>
694
+ mp_parameters:
695
+ desc: null
696
+ value: ''
697
+ auto_find_batch_size:
698
+ desc: null
699
+ value: false
700
+ full_determinism:
701
+ desc: null
702
+ value: false
703
+ torchdynamo:
704
+ desc: null
705
+ value: null
706
+ ray_scope:
707
+ desc: null
708
+ value: last
709
+ ddp_timeout:
710
+ desc: null
711
+ value: 1800
712
+ torch_compile:
713
+ desc: null
714
+ value: false
715
+ torch_compile_backend:
716
+ desc: null
717
+ value: null
718
+ torch_compile_mode:
719
+ desc: null
720
+ value: null
721
+ dispatch_batches:
722
+ desc: null
723
+ value: null
724
+ split_batches:
725
+ desc: null
726
+ value: null
727
+ include_tokens_per_second:
728
+ desc: null
729
+ value: false
730
+ include_num_input_tokens_seen:
731
+ desc: null
732
+ value: false
733
+ neftune_noise_alpha:
734
+ desc: null
735
+ value: null
736
+ optim_target_modules:
737
+ desc: null
738
+ value: null
739
+ batch_eval_metrics:
740
+ desc: null
741
+ value: false
742
+ eval_on_start:
743
+ desc: null
744
+ value: false
745
+ sortish_sampler:
746
+ desc: null
747
+ value: false
748
+ predict_with_generate:
749
+ desc: null
750
+ value: true
751
+ generation_max_length:
752
+ desc: null
753
+ value: null
754
+ generation_num_beams:
755
+ desc: null
756
+ value: null
757
+ generation_config:
758
+ desc: null
759
+ value: null
760
+ model/num_parameters:
761
+ desc: null
762
+ value: 76817408
wandb/run-20240719_150308-wq61zns9/files/output.log ADDED
@@ -0,0 +1 @@
 
 
1
+ Some non-default generation parameters are set in the model config. These should go into a GenerationConfig file (https://huggingface.co/docs/transformers/generation_strategies#save-a-custom-decoding-strategy-with-your-model) instead. This warning will be raised to an exception in v4.41.
wandb/run-20240719_150308-wq61zns9/files/requirements.txt ADDED
@@ -0,0 +1,875 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Babel==2.14.0
2
+ Boruta==0.3
3
+ Brotli==1.1.0
4
+ CVXcanon==0.1.2
5
+ Cartopy==0.23.0
6
+ Cython==3.0.8
7
+ Deprecated==1.2.14
8
+ Farama-Notifications==0.0.4
9
+ Flask==3.0.3
10
+ Geohash==1.0
11
+ GitPython==3.1.41
12
+ ImageHash==4.3.1
13
+ Janome==0.5.0
14
+ Jinja2==3.1.2
15
+ LunarCalendar==0.0.9
16
+ Mako==1.3.5
17
+ Markdown==3.5.2
18
+ MarkupSafe==2.1.3
19
+ MarkupSafe==2.1.5
20
+ Pillow==9.5.0
21
+ PuLP==2.8.0
22
+ PyArabic==0.6.15
23
+ PyJWT==2.8.0
24
+ PyMeeus==0.5.12
25
+ PySocks==1.7.1
26
+ PyUpSet==0.1.1.post7
27
+ PyWavelets==1.5.0
28
+ PyYAML==6.0.1
29
+ Pygments==2.17.2
30
+ Pympler==1.1
31
+ QtPy==2.4.1
32
+ Rtree==1.2.0
33
+ SQLAlchemy==2.0.25
34
+ SecretStorage==3.3.3
35
+ Send2Trash==1.8.2
36
+ Shapely==1.8.5.post1
37
+ Shimmy==1.3.0
38
+ SimpleITK==2.3.1
39
+ TPOT==0.12.1
40
+ Theano-PyMC==1.1.2
41
+ Theano==1.0.5
42
+ Wand==0.6.13
43
+ Werkzeug==3.0.3
44
+ absl-py==1.4.0
45
+ accelerate==0.32.1
46
+ access==1.1.9
47
+ affine==2.4.0
48
+ aiobotocore==2.13.1
49
+ aiofiles==22.1.0
50
+ aiohttp==3.9.1
51
+ aioitertools==0.11.0
52
+ aiorwlock==1.3.0
53
+ aiosignal==1.3.1
54
+ aiosqlite==0.19.0
55
+ albumentations==1.4.0
56
+ alembic==1.13.2
57
+ altair==5.3.0
58
+ annotated-types==0.6.0
59
+ annotated-types==0.7.0
60
+ annoy==1.17.3
61
+ anyio==4.2.0
62
+ apache-beam==2.46.0
63
+ aplus==0.11.0
64
+ appdirs==1.4.4
65
+ archspec==0.2.3
66
+ argon2-cffi-bindings==21.2.0
67
+ argon2-cffi==23.1.0
68
+ array-record==0.5.0
69
+ arrow==1.3.0
70
+ arviz==0.18.0
71
+ astroid==3.2.2
72
+ astropy-iers-data==0.2024.7.8.0.31.19
73
+ astropy==6.1.1
74
+ asttokens==2.4.1
75
+ astunparse==1.6.3
76
+ async-lru==2.0.4
77
+ async-timeout==4.0.3
78
+ attrs==23.2.0
79
+ audioread==3.0.1
80
+ autopep8==2.0.4
81
+ backoff==2.2.1
82
+ bayesian-optimization==1.5.0
83
+ beatrix_jupyterlab==2023.128.151533
84
+ beautifulsoup4==4.12.2
85
+ blake3==0.2.1
86
+ bleach==6.1.0
87
+ blessed==1.20.0
88
+ blinker==1.8.2
89
+ blis==0.7.10
90
+ blosc2==2.7.0
91
+ bokeh==3.4.2
92
+ boltons==23.1.1
93
+ boto3==1.26.100
94
+ botocore==1.34.131
95
+ bq_helper==0.4.1
96
+ bqplot==0.12.43
97
+ branca==0.7.2
98
+ brewer2mpl==1.4.1
99
+ brotlipy==0.7.0
100
+ cached-property==1.5.2
101
+ cachetools==4.2.4
102
+ cachetools==5.3.2
103
+ catalogue==2.0.10
104
+ catalyst==22.4
105
+ catboost==1.2.5
106
+ category-encoders==2.6.3
107
+ certifi==2024.7.4
108
+ cesium==0.12.1
109
+ cffi==1.16.0
110
+ charset-normalizer==3.3.2
111
+ chex==0.1.86
112
+ cleverhans==4.0.0
113
+ click-plugins==1.1.1
114
+ click==8.1.7
115
+ cligj==0.7.2
116
+ cloud-tpu-client==0.10
117
+ cloud-tpu-profiler==2.4.0
118
+ cloudpathlib==0.18.1
119
+ cloudpickle==2.2.1
120
+ cloudpickle==3.0.0
121
+ cmdstanpy==1.2.4
122
+ colorama==0.4.6
123
+ colorcet==3.1.0
124
+ colorful==0.5.6
125
+ colorlog==6.8.2
126
+ colorlover==0.3.0
127
+ comm==0.2.1
128
+ conda-libmamba-solver==23.12.0
129
+ conda-package-handling==2.2.0
130
+ conda==24.5.0
131
+ conda_package_streaming==0.9.0
132
+ confection==0.1.4
133
+ contextily==1.6.0
134
+ contourpy==1.2.0
135
+ contourpy==1.2.1
136
+ convertdate==2.4.0
137
+ crcmod==1.7
138
+ cryptography==41.0.7
139
+ cuda-python==12.5.0
140
+ cudf==24.6.1
141
+ cufflinks==0.17.3
142
+ cuml==24.6.1
143
+ cupy==13.2.0
144
+ cycler==0.12.1
145
+ cymem==2.0.8
146
+ cytoolz==0.12.3
147
+ daal4py==2024.5.0
148
+ daal==2024.5.0
149
+ dacite==1.8.1
150
+ dask-cuda==24.6.0
151
+ dask-cudf==24.6.1
152
+ dask-expr==1.1.7
153
+ dask==2024.7.0
154
+ dataclasses-json==0.6.7
155
+ dataproc_jupyter_plugin==0.1.66
156
+ datasets==2.20.0
157
+ datashader==0.16.3
158
+ datatile==1.0.3
159
+ db-dtypes==1.2.0
160
+ deap==1.4.1
161
+ debugpy==1.8.0
162
+ decorator==5.1.1
163
+ deepdiff==7.0.1
164
+ defusedxml==0.7.1
165
+ deprecation==2.1.0
166
+ descartes==1.1.0
167
+ dill==0.3.8
168
+ dipy==1.9.0
169
+ distlib==0.3.8
170
+ distributed-ucxx==0.38.0
171
+ distributed==2024.5.1
172
+ distro==1.9.0
173
+ dm-tree==0.1.8
174
+ docker-pycreds==0.4.0
175
+ docker==7.0.0
176
+ docopt==0.6.2
177
+ docstring-parser==0.15
178
+ docstring-to-markdown==0.15
179
+ docutils==0.21.2
180
+ earthengine-api==0.1.410
181
+ easydict==1.13
182
+ easyocr==1.7.1
183
+ ecos==2.0.14
184
+ eli5==0.13.0
185
+ emoji==2.12.1
186
+ en-core-web-lg==3.7.1
187
+ en-core-web-sm==3.7.1
188
+ entrypoints==0.4
189
+ ephem==4.1.5
190
+ esda==2.5.1
191
+ essentia==2.1b6.dev1110
192
+ et-xmlfile==1.1.0
193
+ etils==1.6.0
194
+ evaluate==0.4.2
195
+ exceptiongroup==1.2.0
196
+ executing==2.0.1
197
+ explainable-ai-sdk==1.3.3
198
+ fastai==2.7.15
199
+ fastapi==0.108.0
200
+ fastavro==1.9.3
201
+ fastcore==1.5.49
202
+ fastdownload==0.0.7
203
+ fasteners==0.19
204
+ fastjsonschema==2.19.1
205
+ fastprogress==1.0.3
206
+ fastrlock==0.8.2
207
+ fasttext==0.9.3
208
+ feather-format==0.4.1
209
+ featuretools==1.31.0
210
+ filelock==3.13.1
211
+ fiona==1.9.6
212
+ fitter==1.7.1
213
+ flake8==7.0.0
214
+ flashtext==2.7
215
+ flatbuffers==23.5.26
216
+ flax==0.8.4
217
+ folium==0.17.0
218
+ fonttools==4.47.0
219
+ fonttools==4.53.1
220
+ fqdn==1.5.1
221
+ frozendict==2.4.4
222
+ frozenlist==1.4.1
223
+ fsspec==2024.5.0
224
+ fsspec==2024.6.1
225
+ funcy==2.0
226
+ fury==0.10.0
227
+ future==1.0.0
228
+ fuzzywuzzy==0.18.0
229
+ gast==0.5.4
230
+ gatspy==0.3
231
+ gcsfs==2024.5.0
232
+ gensim==4.3.2
233
+ geographiclib==2.0
234
+ geojson==3.1.0
235
+ geopandas==0.14.4
236
+ geoplot==0.5.1
237
+ geopy==2.4.1
238
+ geoviews==1.12.0
239
+ ggplot==0.11.5
240
+ giddy==2.3.5
241
+ gitdb==4.0.11
242
+ google-ai-generativelanguage==0.6.6
243
+ google-api-core==2.11.1
244
+ google-api-core==2.19.1
245
+ google-api-python-client==2.136.0
246
+ google-apitools==0.5.31
247
+ google-auth-httplib2==0.2.0
248
+ google-auth-oauthlib==1.2.0
249
+ google-auth==2.26.1
250
+ google-cloud-aiplatform==0.6.0a1
251
+ google-cloud-artifact-registry==1.10.0
252
+ google-cloud-automl==1.0.1
253
+ google-cloud-bigquery==2.34.4
254
+ google-cloud-bigtable==1.7.3
255
+ google-cloud-core==2.4.1
256
+ google-cloud-datastore==2.19.0
257
+ google-cloud-dlp==3.14.0
258
+ google-cloud-jupyter-config==0.0.5
259
+ google-cloud-language==2.13.4
260
+ google-cloud-monitoring==2.18.0
261
+ google-cloud-pubsub==2.19.0
262
+ google-cloud-pubsublite==1.9.0
263
+ google-cloud-recommendations-ai==0.7.1
264
+ google-cloud-resource-manager==1.11.0
265
+ google-cloud-spanner==3.40.1
266
+ google-cloud-storage==1.44.0
267
+ google-cloud-translate==3.12.1
268
+ google-cloud-videointelligence==2.13.4
269
+ google-cloud-vision==2.8.0
270
+ google-crc32c==1.5.0
271
+ google-generativeai==0.7.2
272
+ google-pasta==0.2.0
273
+ google-resumable-media==2.7.0
274
+ googleapis-common-protos==1.62.0
275
+ gplearn==0.4.2
276
+ gpustat==1.0.0
277
+ gpxpy==1.6.2
278
+ graphviz==0.20.3
279
+ greenlet==3.0.3
280
+ grpc-google-iam-v1==0.12.7
281
+ grpcio-status==1.48.0
282
+ grpcio-status==1.48.2
283
+ grpcio==1.60.0
284
+ grpcio==1.62.2
285
+ gviz-api==1.10.0
286
+ gym-notices==0.0.8
287
+ gym==0.26.2
288
+ gymnasium==0.29.0
289
+ h11==0.14.0
290
+ h2o==3.46.0.3
291
+ h5netcdf==1.3.0
292
+ h5py==3.10.0
293
+ haversine==2.8.1
294
+ hdfs==2.7.3
295
+ hep-ml==0.7.2
296
+ hijri-converter==2.3.1
297
+ hmmlearn==0.3.2
298
+ holidays==0.24
299
+ holoviews==1.19.1
300
+ hpsklearn==0.1.0
301
+ html5lib==1.1
302
+ htmlmin==0.1.12
303
+ httpcore==1.0.5
304
+ httplib2==0.21.0
305
+ httptools==0.6.1
306
+ httpx==0.27.0
307
+ huggingface-hub==0.23.4
308
+ hunspell==0.5.5
309
+ hydra-slayer==0.5.0
310
+ hyperopt==0.2.7
311
+ hypertools==0.8.0
312
+ idna==3.6
313
+ igraph==0.11.6
314
+ imagecodecs==2024.6.1
315
+ imageio==2.33.1
316
+ imbalanced-learn==0.12.3
317
+ imgaug==0.4.0
318
+ importlib-metadata==6.11.0
319
+ importlib-metadata==7.0.1
320
+ importlib-resources==6.1.1
321
+ inequality==1.0.1
322
+ iniconfig==2.0.0
323
+ ipydatawidgets==4.3.5
324
+ ipykernel==6.28.0
325
+ ipyleaflet==0.19.1
326
+ ipympl==0.7.0
327
+ ipython-genutils==0.2.0
328
+ ipython-genutils==0.2.0
329
+ ipython-sql==0.5.0
330
+ ipython==8.20.0
331
+ ipyvolume==0.6.3
332
+ ipyvue==1.11.1
333
+ ipyvuetify==1.9.4
334
+ ipywebrtc==0.6.0
335
+ ipywidgets==7.7.1
336
+ isoduration==20.11.0
337
+ isort==5.13.2
338
+ isoweek==1.3.3
339
+ itsdangerous==2.2.0
340
+ jaraco.classes==3.3.0
341
+ jax-jumpy==1.0.0
342
+ jax==0.4.26
343
+ jaxlib==0.4.26.dev20240504
344
+ jedi==0.19.1
345
+ jeepney==0.8.0
346
+ jieba==0.42.1
347
+ jmespath==1.0.1
348
+ joblib==1.4.2
349
+ json5==0.9.14
350
+ jsonpatch==1.33
351
+ jsonpointer==2.4
352
+ jsonschema-specifications==2023.12.1
353
+ jsonschema==4.20.0
354
+ jupyter-console==6.6.3
355
+ jupyter-events==0.9.0
356
+ jupyter-http-over-ws==0.0.8
357
+ jupyter-leaflet==0.19.1
358
+ jupyter-lsp==1.5.1
359
+ jupyter-server-mathjax==0.2.6
360
+ jupyter-ydoc==0.2.5
361
+ jupyter_client==7.4.9
362
+ jupyter_client==8.6.0
363
+ jupyter_core==5.7.1
364
+ jupyter_server==2.12.5
365
+ jupyter_server_fileid==0.9.1
366
+ jupyter_server_proxy==4.1.0
367
+ jupyter_server_terminals==0.5.1
368
+ jupyter_server_ydoc==0.8.0
369
+ jupyterlab-lsp==5.1.0
370
+ jupyterlab-widgets==3.0.9
371
+ jupyterlab==4.2.3
372
+ jupyterlab_git==0.44.0
373
+ jupyterlab_pygments==0.3.0
374
+ jupyterlab_server==2.27.2
375
+ jupytext==1.16.0
376
+ kaggle-environments==1.14.15
377
+ kaggle==1.6.14
378
+ kagglehub==0.2.7
379
+ keras-cv==0.9.0
380
+ keras-nlp==0.14.0
381
+ keras-tuner==1.4.6
382
+ keras==3.4.1
383
+ kernels-mixer==0.0.7
384
+ keyring==24.3.0
385
+ keyrings.google-artifactregistry-auth==1.1.2
386
+ kfp-pipeline-spec==0.2.2
387
+ kfp-server-api==2.0.5
388
+ kfp==2.5.0
389
+ kiwisolver==1.4.5
390
+ kmapper==2.1.0
391
+ kmodes==0.12.2
392
+ korean-lunar-calendar==0.3.1
393
+ kornia==0.7.3
394
+ kornia_rs==0.1.4
395
+ kt-legacy==1.0.5
396
+ kubernetes==26.1.0
397
+ langcodes==3.4.0
398
+ langid==1.1.6
399
+ language_data==1.2.0
400
+ lazy_loader==0.3
401
+ learntools==0.3.4
402
+ leven==1.0.4
403
+ libclang==16.0.6
404
+ libmambapy==1.5.8
405
+ libpysal==4.9.2
406
+ librosa==0.10.2.post1
407
+ lightgbm==4.2.0
408
+ lightning-utilities==0.11.3.post0
409
+ lime==0.2.0.1
410
+ line_profiler==4.1.3
411
+ linkify-it-py==2.0.3
412
+ llvmlite==0.41.1
413
+ llvmlite==0.43.0
414
+ lml==0.1.0
415
+ locket==1.0.0
416
+ loguru==0.7.2
417
+ lxml==5.2.2
418
+ lz4==4.3.3
419
+ mamba==1.5.8
420
+ mapclassify==2.7.0
421
+ marisa-trie==1.1.0
422
+ markdown-it-py==3.0.0
423
+ marshmallow==3.21.3
424
+ matplotlib-inline==0.1.6
425
+ matplotlib-venn==0.11.10
426
+ matplotlib==3.7.5
427
+ matplotlib==3.8.4
428
+ mccabe==0.7.0
429
+ mdit-py-plugins==0.4.0
430
+ mdurl==0.1.2
431
+ memory-profiler==0.61.0
432
+ menuinst==2.0.1
433
+ mercantile==1.2.1
434
+ mgwr==2.2.1
435
+ missingno==0.5.2
436
+ mistune==0.8.4
437
+ mizani==0.11.4
438
+ ml-dtypes==0.2.0
439
+ mlcrate==0.2.0
440
+ mlens==0.2.3
441
+ mlxtend==0.23.1
442
+ mne==1.7.1
443
+ mnist==0.2.2
444
+ momepy==0.7.2
445
+ more-itertools==10.2.0
446
+ mpld3==0.5.10
447
+ mpmath==1.3.0
448
+ msgpack==1.0.7
449
+ msgpack==1.0.8
450
+ multidict==6.0.4
451
+ multimethod==1.10
452
+ multipledispatch==1.0.0
453
+ multiprocess==0.70.16
454
+ munkres==1.1.4
455
+ murmurhash==1.0.10
456
+ mypy-extensions==1.0.0
457
+ namex==0.0.8
458
+ nb-conda-kernels==2.3.1
459
+ nb_conda==2.2.1
460
+ nbclassic==1.0.0
461
+ nbclient==0.5.13
462
+ nbclient==0.9.0
463
+ nbconvert==6.4.5
464
+ nbdime==3.2.0
465
+ nbformat==5.9.2
466
+ ndindex==1.8
467
+ nest-asyncio==1.5.8
468
+ networkx==3.2.1
469
+ nibabel==5.2.1
470
+ nilearn==0.10.4
471
+ ninja==1.11.1.1
472
+ nltk==3.2.4
473
+ nose==1.3.7
474
+ notebook==6.5.4
475
+ notebook==6.5.6
476
+ notebook_executor==0.2
477
+ notebook_shim==0.2.3
478
+ numba==0.58.1
479
+ numba==0.60.0
480
+ numexpr==2.10.1
481
+ numpy==1.26.4
482
+ nvidia-ml-py==11.495.46
483
+ nvtx==0.2.10
484
+ oauth2client==4.1.3
485
+ oauthlib==3.2.2
486
+ objsize==0.6.1
487
+ odfpy==1.4.1
488
+ olefile==0.47
489
+ onnx==1.16.1
490
+ opencensus-context==0.1.3
491
+ opencensus==0.11.4
492
+ opencv-contrib-python==4.10.0.84
493
+ opencv-python-headless==4.10.0.84
494
+ opencv-python==4.10.0.84
495
+ openpyxl==3.1.5
496
+ openslide-python==1.3.1
497
+ opentelemetry-api==1.22.0
498
+ opentelemetry-exporter-otlp-proto-common==1.22.0
499
+ opentelemetry-exporter-otlp-proto-grpc==1.22.0
500
+ opentelemetry-exporter-otlp-proto-http==1.22.0
501
+ opentelemetry-exporter-otlp==1.22.0
502
+ opentelemetry-proto==1.22.0
503
+ opentelemetry-sdk==1.22.0
504
+ opentelemetry-semantic-conventions==0.43b0
505
+ opt-einsum==3.3.0
506
+ optax==0.2.2
507
+ optree==0.12.1
508
+ optuna==3.6.1
509
+ orbax-checkpoint==0.5.20
510
+ ordered-set==4.1.0
511
+ orjson==3.9.10
512
+ ortools==9.4.1874
513
+ osmnx==1.9.3
514
+ overrides==7.4.0
515
+ packaging==21.3
516
+ pandas-datareader==0.10.0
517
+ pandas-profiling==3.6.6
518
+ pandas-summary==0.2.0
519
+ pandas==2.2.2
520
+ pandasql==0.7.3
521
+ pandocfilters==1.5.0
522
+ panel==1.4.4
523
+ papermill==2.5.0
524
+ param==2.1.1
525
+ parso==0.8.3
526
+ partd==1.4.2
527
+ path.py==12.5.0
528
+ path==16.14.0
529
+ pathos==0.3.2
530
+ patsy==0.5.6
531
+ pdf2image==1.17.0
532
+ pettingzoo==1.24.0
533
+ pexpect==4.8.0
534
+ pexpect==4.9.0
535
+ phik==0.12.4
536
+ pickleshare==0.7.5
537
+ pillow==10.4.0
538
+ pip==23.3.2
539
+ pkgutil_resolve_name==1.3.10
540
+ platformdirs==3.11.0
541
+ platformdirs==4.1.0
542
+ plotly-express==0.4.1
543
+ plotly==5.18.0
544
+ plotnine==0.13.6
545
+ pluggy==1.5.0
546
+ pointpats==2.5.0
547
+ polars==1.1.0
548
+ polyglot==16.7.4
549
+ pooch==1.8.2
550
+ portalocker==2.10.1
551
+ pox==0.3.4
552
+ ppca==0.0.4
553
+ ppft==1.7.6.8
554
+ preprocessing==0.1.13
555
+ preshed==3.0.9
556
+ prettytable==3.9.0
557
+ progressbar2==4.4.2
558
+ prometheus-client==0.19.0
559
+ promise==2.3
560
+ prompt-toolkit==3.0.42
561
+ prompt-toolkit==3.0.43
562
+ prophet==1.1.1
563
+ proto-plus==1.23.0
564
+ protobuf==3.20.3
565
+ protobuf==4.25.3
566
+ psutil==5.9.3
567
+ psutil==5.9.7
568
+ ptyprocess==0.7.0
569
+ pudb==2024.1
570
+ pure-eval==0.2.2
571
+ py-cpuinfo==9.0.0
572
+ py-spy==0.3.14
573
+ py4j==0.10.9.7
574
+ pyLDAvis==3.4.1
575
+ pyOpenSSL==23.3.0
576
+ pyaml==24.4.0
577
+ pyarrow-hotfix==0.6
578
+ pyarrow==16.1.0
579
+ pyasn1-modules==0.3.0
580
+ pyasn1==0.5.1
581
+ pybind11==2.13.1
582
+ pyclipper==1.3.0.post5
583
+ pycodestyle==2.11.1
584
+ pycosat==0.6.6
585
+ pycparser==2.21
586
+ pycryptodome==3.20.0
587
+ pyct==0.5.0
588
+ pycuda==2024.1
589
+ pydantic==2.5.3
590
+ pydantic==2.8.2
591
+ pydantic_core==2.14.6
592
+ pydantic_core==2.20.1
593
+ pydegensac==0.1.2
594
+ pydicom==2.4.4
595
+ pydocstyle==6.3.0
596
+ pydot==1.4.2
597
+ pydub==0.25.1
598
+ pyemd==1.0.0
599
+ pyerfa==2.0.1.4
600
+ pyexcel-io==0.6.6
601
+ pyexcel-ods==0.6.0
602
+ pyflakes==3.2.0
603
+ pygltflib==1.16.2
604
+ pykalman==0.9.7
605
+ pylibraft==24.6.0
606
+ pylint==3.2.5
607
+ pymc3==3.11.4
608
+ pymongo==3.13.0
609
+ pynndescent==0.5.13
610
+ pynvjitlink-cu12==0.2.4
611
+ pynvml==11.4.1
612
+ pynvrtc==9.2
613
+ pyogrio==0.9.0
614
+ pyparsing==3.1.1
615
+ pyparsing==3.1.2
616
+ pypdf==4.2.0
617
+ pyproj==3.6.1
618
+ pysal==24.1
619
+ pyshp==2.3.1
620
+ pytesseract==0.3.10
621
+ pytest==8.2.2
622
+ python-bidi==0.4.2
623
+ python-dateutil==2.9.0.post0
624
+ python-dotenv==1.0.0
625
+ python-json-logger==2.0.7
626
+ python-louvain==0.16
627
+ python-lsp-jsonrpc==1.1.2
628
+ python-lsp-server==1.11.0
629
+ python-slugify==8.0.4
630
+ python-utils==3.8.2
631
+ pythreejs==2.4.2
632
+ pytoolconfig==1.3.1
633
+ pytools==2024.1.6
634
+ pytorch-ignite==0.5.0.post2
635
+ pytorch-lightning==2.3.3
636
+ pytz==2023.3.post1
637
+ pytz==2024.1
638
+ pyu2f==0.1.5
639
+ pyviz_comms==3.0.2
640
+ pyzmq==24.0.1
641
+ pyzmq==25.1.2
642
+ qgrid==1.3.1
643
+ qtconsole==5.5.2
644
+ quantecon==0.7.2
645
+ qudida==0.0.4
646
+ raft-dask==24.6.0
647
+ rapids-dask-dependency==24.6.0a0
648
+ rasterio==1.3.10
649
+ rasterstats==0.19.0
650
+ ray-cpp==2.9.0
651
+ ray==2.9.0
652
+ referencing==0.32.1
653
+ regex==2023.12.25
654
+ requests-oauthlib==1.3.1
655
+ requests-toolbelt==0.10.1
656
+ requests==2.32.3
657
+ retrying==1.3.3
658
+ retrying==1.3.4
659
+ rfc3339-validator==0.1.4
660
+ rfc3986-validator==0.1.1
661
+ rgf-python==3.12.0
662
+ rich-click==1.8.3
663
+ rich==13.7.0
664
+ rich==13.7.1
665
+ rmm==24.6.0
666
+ rope==1.13.0
667
+ rpds-py==0.16.2
668
+ rsa==4.9
669
+ ruamel-yaml-conda==0.15.100
670
+ ruamel.yaml.clib==0.2.7
671
+ ruamel.yaml==0.18.5
672
+ s2sphere==0.2.5
673
+ s3fs==2024.5.0
674
+ s3transfer==0.6.2
675
+ sacrebleu==2.4.2
676
+ safetensors==0.4.3
677
+ scattertext==0.1.19
678
+ scikit-image==0.22.0
679
+ scikit-learn-intelex==2024.5.0
680
+ scikit-learn==1.2.2
681
+ scikit-multilearn==0.2.0
682
+ scikit-optimize==0.10.2
683
+ scikit-plot==0.3.7
684
+ scikit-surprise==1.1.4
685
+ scipy==1.11.4
686
+ scipy==1.14.0
687
+ seaborn==0.12.2
688
+ segment_anything==1.0
689
+ segregation==2.5
690
+ semver==3.0.2
691
+ sentencepiece==0.2.0
692
+ sentry-sdk==2.8.0
693
+ setproctitle==1.3.3
694
+ setuptools-git==1.2
695
+ setuptools-scm==8.1.0
696
+ setuptools==69.0.3
697
+ shap==0.44.1
698
+ shapely==2.0.4
699
+ shellingham==1.5.4
700
+ simpervisor==1.0.0
701
+ simplejson==3.19.2
702
+ six==1.16.0
703
+ sklearn-pandas==2.2.0
704
+ slicer==0.0.7
705
+ smart-open==6.4.0
706
+ smart_open==7.0.4
707
+ smmap==5.0.1
708
+ sniffio==1.3.0
709
+ snowballstemmer==2.2.0
710
+ snuggs==1.4.7
711
+ sortedcontainers==2.4.0
712
+ soundfile==0.12.1
713
+ soupsieve==2.5
714
+ soxr==0.3.7
715
+ spacy-legacy==3.0.12
716
+ spacy-loggers==1.0.5
717
+ spacy==3.7.5
718
+ spaghetti==1.7.6
719
+ spectral==0.23.1
720
+ spglm==1.1.0
721
+ sphinx-rtd-theme==0.2.4
722
+ spint==1.0.7
723
+ splot==1.1.5.post1
724
+ spopt==0.6.1
725
+ spreg==1.5.0
726
+ spvcm==0.3.0
727
+ sqlparse==0.4.4
728
+ squarify==0.4.3
729
+ srsly==2.4.8
730
+ stable-baselines3==2.1.0
731
+ stack-data==0.6.2
732
+ stack-data==0.6.3
733
+ stanio==0.5.1
734
+ starlette==0.32.0.post1
735
+ statsmodels==0.14.1
736
+ stemming==1.0.1
737
+ stop-words==2018.7.23
738
+ stopit==1.1.2
739
+ stumpy==1.13.0
740
+ sympy==1.13.0
741
+ tables==3.9.2
742
+ tabulate==0.9.0
743
+ tangled-up-in-unicode==0.2.0
744
+ tbb==2021.13.0
745
+ tblib==3.0.0
746
+ tenacity==8.2.3
747
+ tensorboard-data-server==0.7.2
748
+ tensorboard-plugin-profile==2.15.0
749
+ tensorboard==2.15.1
750
+ tensorboardX==2.6.2.2
751
+ tensorflow-cloud==0.1.16
752
+ tensorflow-datasets==4.9.4
753
+ tensorflow-decision-forests==1.8.1
754
+ tensorflow-estimator==2.15.0
755
+ tensorflow-hub==0.16.1
756
+ tensorflow-io-gcs-filesystem==0.35.0
757
+ tensorflow-io==0.35.0
758
+ tensorflow-metadata==0.14.0
759
+ tensorflow-probability==0.23.0
760
+ tensorflow-serving-api==2.14.1
761
+ tensorflow-text==2.15.0
762
+ tensorflow-transform==0.14.0
763
+ tensorflow==2.15.0
764
+ tensorstore==0.1.63
765
+ termcolor==2.4.0
766
+ terminado==0.18.0
767
+ testpath==0.6.0
768
+ text-unidecode==1.3
769
+ textblob==0.18.0.post0
770
+ texttable==1.7.0
771
+ tf_keras==2.15.1
772
+ tfp-nightly==0.24.0.dev0
773
+ thinc==8.2.3
774
+ threadpoolctl==3.2.0
775
+ tifffile==2023.12.9
776
+ timm==1.0.7
777
+ tinycss2==1.2.1
778
+ tobler==0.11.2
779
+ tokenizers==0.19.1
780
+ toml==0.10.2
781
+ tomli==2.0.1
782
+ tomlkit==0.12.5
783
+ toolz==0.12.1
784
+ torch==2.1.2
785
+ torchaudio==2.1.2
786
+ torchdata==0.7.1
787
+ torchinfo==1.8.0
788
+ torchmetrics==1.4.0.post0
789
+ torchtext==0.16.2
790
+ torchvision==0.16.2
791
+ tornado==6.3.3
792
+ tqdm==4.66.4
793
+ traceml==1.0.8
794
+ traitlets==5.9.0
795
+ traittypes==0.2.1
796
+ transformers==4.42.3
797
+ treelite==4.1.2
798
+ truststore==0.8.0
799
+ trx-python==0.3
800
+ tsfresh==0.20.2
801
+ typeguard==4.1.5
802
+ typer-slim==0.12.3
803
+ typer==0.12.3
804
+ typer==0.9.0
805
+ types-python-dateutil==2.8.19.20240106
806
+ typing-inspect==0.9.0
807
+ typing-utils==0.1.0
808
+ typing_extensions==4.9.0
809
+ tzdata==2023.4
810
+ tzdata==2024.1
811
+ uc-micro-py==1.0.3
812
+ ucx-py==0.38.0
813
+ ucxx==0.38.0
814
+ ujson==5.10.0
815
+ umap-learn==0.5.6
816
+ unicodedata2==15.1.0
817
+ update-checker==0.18.0
818
+ uri-template==1.3.0
819
+ uritemplate==3.0.1
820
+ urllib3==1.26.18
821
+ urllib3==2.1.0
822
+ urwid==2.6.15
823
+ urwid_readline==0.14
824
+ uvicorn==0.25.0
825
+ uvloop==0.19.0
826
+ vaex-astro==0.9.3
827
+ vaex-core==4.17.1
828
+ vaex-hdf5==0.14.1
829
+ vaex-jupyter==0.8.2
830
+ vaex-ml==0.18.3
831
+ vaex-server==0.9.0
832
+ vaex-viz==0.5.4
833
+ vaex==4.17.0
834
+ vec_noise==1.1.4
835
+ vecstack==0.4.0
836
+ virtualenv==20.21.0
837
+ visions==0.7.5
838
+ vowpalwabbit==9.9.0
839
+ vtk==9.3.1
840
+ wandb==0.17.4
841
+ wasabi==1.1.2
842
+ watchfiles==0.21.0
843
+ wavio==0.0.9
844
+ wcwidth==0.2.13
845
+ weasel==0.4.1
846
+ webcolors==1.13
847
+ webencodings==0.5.1
848
+ websocket-client==1.7.0
849
+ websockets==12.0
850
+ wfdb==4.1.2
851
+ whatthepatch==1.0.5
852
+ wheel==0.42.0
853
+ widgetsnbextension==3.6.7
854
+ witwidget==1.8.1
855
+ woodwork==0.31.0
856
+ wordcloud==1.9.3
857
+ wordsegment==1.3.1
858
+ wrapt==1.14.1
859
+ wrapt==1.16.0
860
+ xarray-einstats==0.7.0
861
+ xarray==2024.6.0
862
+ xgboost==2.0.3
863
+ xvfbwrapper==0.2.9
864
+ xxhash==3.4.1
865
+ xyzservices==2024.6.0
866
+ y-py==0.6.2
867
+ yapf==0.40.2
868
+ yarl==1.9.3
869
+ yarl==1.9.4
870
+ ydata-profiling==4.6.4
871
+ yellowbrick==1.5
872
+ ypy-websocket==0.8.4
873
+ zict==3.0.0
874
+ zipp==3.17.0
875
+ zstandard==0.22.0
wandb/run-20240719_150308-wq61zns9/files/wandb-metadata.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.154+-x86_64-with-glibc2.31",
3
+ "python": "3.10.13",
4
+ "heartbeatAt": "2024-07-19T15:03:09.512854",
5
+ "startedAt": "2024-07-19T15:03:08.531392",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [],
9
+ "state": "running",
10
+ "program": "kaggle.ipynb",
11
+ "codePathLocal": null,
12
+ "root": "/kaggle/working",
13
+ "host": "8df4908137f9",
14
+ "username": "root",
15
+ "executable": "/opt/conda/bin/python3.10",
16
+ "cpu_count": 2,
17
+ "cpu_count_logical": 4,
18
+ "cpu_freq": {
19
+ "current": 2000.188,
20
+ "min": 0.0,
21
+ "max": 0.0
22
+ },
23
+ "cpu_freq_per_core": [
24
+ {
25
+ "current": 2000.188,
26
+ "min": 0.0,
27
+ "max": 0.0
28
+ },
29
+ {
30
+ "current": 2000.188,
31
+ "min": 0.0,
32
+ "max": 0.0
33
+ },
34
+ {
35
+ "current": 2000.188,
36
+ "min": 0.0,
37
+ "max": 0.0
38
+ },
39
+ {
40
+ "current": 2000.188,
41
+ "min": 0.0,
42
+ "max": 0.0
43
+ }
44
+ ],
45
+ "disk": {
46
+ "/": {
47
+ "total": 8062.387607574463,
48
+ "used": 5772.083881378174
49
+ }
50
+ },
51
+ "gpu": "Tesla P100-PCIE-16GB",
52
+ "gpu_count": 1,
53
+ "gpu_devices": [
54
+ {
55
+ "name": "Tesla P100-PCIE-16GB",
56
+ "memory_total": 17179869184
57
+ }
58
+ ],
59
+ "memory": {
60
+ "total": 31.357574462890625
61
+ }
62
+ }
wandb/run-20240719_150308-wq61zns9/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eval/loss": 0.1450444906949997, "eval/bleu": 13.6092, "eval/gen_len": 66.5229, "eval/runtime": 278.1908, "eval/samples_per_second": 4.483, "eval/steps_per_second": 0.28, "train/epoch": 1.6025641025641026, "train/global_step": 500, "_timestamp": 1721402643.8727162, "_runtime": 1255.3338971138, "_step": 1, "train/loss": 0.1446, "train/grad_norm": 0.4473249912261963, "train/learning_rate": 1.6794871794871796e-05}
wandb/run-20240719_150308-wq61zns9/logs/debug-internal.log ADDED
The diff for this file is too large to render. See raw diff
 
wandb/run-20240719_150308-wq61zns9/logs/debug.log ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Current SDK version is 0.17.4
2
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Configure stats pid to 35
3
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Loading settings from /kaggle/working/wandb/settings
5
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program': '<python with no main file>'}
8
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Applying login settings: {}
9
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_setup.py:_flush():76] Applying login settings: {'api_key': '***REDACTED***'}
10
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_init.py:_log_setup():529] Logging user logs to /kaggle/working/wandb/run-20240719_150308-wq61zns9/logs/debug.log
11
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_init.py:_log_setup():530] Logging internal logs to /kaggle/working/wandb/run-20240719_150308-wq61zns9/logs/debug-internal.log
12
+ 2024-07-19 15:03:08,533 INFO MainThread:35 [wandb_init.py:_jupyter_setup():475] configuring jupyter hooks <wandb.sdk.wandb_init._WandbInit object at 0x7ac420802680>
13
+ 2024-07-19 15:03:08,534 INFO MainThread:35 [wandb_init.py:init():569] calling init triggers
14
+ 2024-07-19 15:03:08,534 INFO MainThread:35 [wandb_init.py:init():576] wandb.init called with sweep_config: {}
15
+ config: {}
16
+ 2024-07-19 15:03:08,534 INFO MainThread:35 [wandb_init.py:init():619] starting backend
17
+ 2024-07-19 15:03:08,534 INFO MainThread:35 [wandb_init.py:init():623] setting up manager
18
+ 2024-07-19 15:03:08,535 INFO MainThread:35 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
19
+ 2024-07-19 15:03:08,538 INFO MainThread:35 [wandb_init.py:init():631] backend started and connected
20
+ 2024-07-19 15:03:08,550 INFO MainThread:35 [wandb_run.py:_label_probe_notebook():1334] probe notebook
21
+ 2024-07-19 15:03:09,072 INFO MainThread:35 [wandb_init.py:init():720] updated telemetry
22
+ 2024-07-19 15:03:09,075 INFO MainThread:35 [wandb_init.py:init():753] communicating run to backend with 90.0 second timeout
23
+ 2024-07-19 15:03:09,407 INFO MainThread:35 [wandb_run.py:_on_init():2402] communicating current version
24
+ 2024-07-19 15:03:09,445 INFO MainThread:35 [wandb_run.py:_on_init():2411] got version response
25
+ 2024-07-19 15:03:09,445 INFO MainThread:35 [wandb_init.py:init():804] starting run threads in backend
26
+ 2024-07-19 15:03:25,491 INFO MainThread:35 [wandb_run.py:_console_start():2380] atexit reg
27
+ 2024-07-19 15:03:25,491 INFO MainThread:35 [wandb_run.py:_redirect():2235] redirect: wrap_raw
28
+ 2024-07-19 15:03:25,491 INFO MainThread:35 [wandb_run.py:_redirect():2300] Wrapping output streams.
29
+ 2024-07-19 15:03:25,492 INFO MainThread:35 [wandb_run.py:_redirect():2325] Redirects installed.
30
+ 2024-07-19 15:03:25,496 INFO MainThread:35 [wandb_init.py:init():847] run started, returning control to user process
31
+ 2024-07-19 15:03:25,502 INFO MainThread:35 [wandb_run.py:_config_callback():1382] config_cb None None {'vocab_size': 62802, 'decoder_vocab_size': 62802, 'max_position_embeddings': 512, 'd_model': 512, 'encoder_ffn_dim': 2048, 'encoder_layers': 6, 'encoder_attention_heads': 8, 'decoder_ffn_dim': 2048, 'decoder_layers': 6, 'decoder_attention_heads': 8, 'dropout': 0.1, 'attention_dropout': 0.0, 'activation_dropout': 0.0, 'activation_function': 'swish', 'init_std': 0.02, 'encoder_layerdrop': 0.0, 'decoder_layerdrop': 0.0, 'use_cache': True, 'num_hidden_layers': 6, 'scale_embedding': True, 'share_encoder_decoder_embeddings': True, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': True, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 512, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 4, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': [[62801]], 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': 0, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['MarianMTModel'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1', 2: 'LABEL_2'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 0, 'pad_token_id': 62801, 'eos_token_id': 0, 'sep_token_id': None, 'decoder_start_token_id': 62801, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'Helsinki-NLP/opus-mt-en-ar', 'transformers_version': '4.42.3', 'add_bias_logits': False, 'add_final_layer_norm': False, 'classif_dropout': 0.0, 'classifier_dropout': 0.0, 'extra_pos_embeddings': 62802, 'model_type': 'marian', 'normalize_before': False, 'normalize_embedding': False, 'static_position_embeddings': True, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'eval_strategy': 'epoch', 'prediction_loss_only': False, 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 2e-05, 'weight_decay': 0.01, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 10, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/Jul19_15-02-56_8df4908137f9', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 3, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'restore_callback_states_from_checkpoint': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'eval_do_concat_batches': True, 'fp16_backend': 'auto', 'evaluation_strategy': None, 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None, 'batch_eval_metrics': False, 'eval_on_start': False, 'sortish_sampler': False, 'predict_with_generate': True, 'generation_max_length': None, 'generation_num_beams': None, 'generation_config': None}
32
+ 2024-07-19 15:03:25,508 INFO MainThread:35 [wandb_config.py:__setitem__():151] config set model/num_parameters = 76817408 - <bound method Run._config_callback of <wandb.sdk.wandb_run.Run object at 0x7ac41ae98400>>
33
+ 2024-07-19 15:03:25,508 INFO MainThread:35 [wandb_run.py:_config_callback():1382] config_cb model/num_parameters 76817408 None
34
+ 2024-07-19 15:12:08,909 INFO MainThread:35 [jupyter.py:save_ipynb():372] not saving jupyter notebook
35
+ 2024-07-19 15:12:08,909 INFO MainThread:35 [wandb_init.py:_pause_backend():440] pausing backend
36
+ 2024-07-19 15:13:08,410 INFO MainThread:35 [wandb_init.py:_resume_backend():445] resuming backend
37
+ 2024-07-19 15:13:09,248 INFO MainThread:35 [jupyter.py:save_ipynb():372] not saving jupyter notebook
38
+ 2024-07-19 15:13:09,248 INFO MainThread:35 [wandb_init.py:_pause_backend():440] pausing backend
39
+ 2024-07-19 15:13:22,988 INFO MainThread:35 [wandb_init.py:_resume_backend():445] resuming backend
40
+ 2024-07-19 15:13:22,990 INFO MainThread:35 [jupyter.py:save_ipynb():372] not saving jupyter notebook
41
+ 2024-07-19 15:13:22,990 INFO MainThread:35 [wandb_init.py:_pause_backend():440] pausing backend
42
+ 2024-07-19 15:13:23,619 INFO MainThread:35 [wandb_init.py:_resume_backend():445] resuming backend
43
+ 2024-07-19 15:13:23,649 INFO MainThread:35 [jupyter.py:save_ipynb():372] not saving jupyter notebook
44
+ 2024-07-19 15:13:23,649 INFO MainThread:35 [wandb_init.py:_pause_backend():440] pausing backend
45
+ 2024-07-19 15:13:24,165 INFO MainThread:35 [wandb_init.py:_resume_backend():445] resuming backend
46
+ 2024-07-19 15:13:24,311 INFO MainThread:35 [jupyter.py:save_ipynb():372] not saving jupyter notebook
47
+ 2024-07-19 15:13:24,311 INFO MainThread:35 [wandb_init.py:_pause_backend():440] pausing backend
48
+ 2024-07-19 15:13:24,660 INFO MainThread:35 [wandb_init.py:_resume_backend():445] resuming backend
49
+ 2024-07-19 15:13:25,433 INFO MainThread:35 [wandb_run.py:_config_callback():1382] config_cb None None {'vocab_size': 62802, 'decoder_vocab_size': 62802, 'max_position_embeddings': 512, 'd_model': 512, 'encoder_ffn_dim': 2048, 'encoder_layers': 6, 'encoder_attention_heads': 8, 'decoder_ffn_dim': 2048, 'decoder_layers': 6, 'decoder_attention_heads': 8, 'dropout': 0.1, 'attention_dropout': 0.0, 'activation_dropout': 0.0, 'activation_function': 'swish', 'init_std': 0.02, 'encoder_layerdrop': 0.0, 'decoder_layerdrop': 0.0, 'use_cache': True, 'num_hidden_layers': 6, 'scale_embedding': True, 'share_encoder_decoder_embeddings': True, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': True, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 512, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 4, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': [[62801]], 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': 0, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['MarianMTModel'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1', 2: 'LABEL_2'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 0, 'pad_token_id': 62801, 'eos_token_id': 0, 'sep_token_id': None, 'decoder_start_token_id': 62801, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'Helsinki-NLP/opus-mt-en-ar', 'transformers_version': '4.42.3', 'add_bias_logits': False, 'add_final_layer_norm': False, 'classif_dropout': 0.0, 'classifier_dropout': 0.0, 'extra_pos_embeddings': 62802, 'model_type': 'marian', 'normalize_before': False, 'normalize_embedding': False, 'static_position_embeddings': True, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'eval_strategy': 'epoch', 'prediction_loss_only': False, 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 2e-05, 'weight_decay': 0.01, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 10, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/Jul19_15-13-23_8df4908137f9', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 3, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'restore_callback_states_from_checkpoint': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'eval_do_concat_batches': True, 'fp16_backend': 'auto', 'evaluation_strategy': None, 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None, 'batch_eval_metrics': False, 'eval_on_start': False, 'sortish_sampler': False, 'predict_with_generate': True, 'generation_max_length': None, 'generation_num_beams': None, 'generation_config': None}
50
+ 2024-07-19 15:13:25,439 INFO MainThread:35 [wandb_config.py:__setitem__():151] config set model/num_parameters = 76817408 - <bound method Run._config_callback of <wandb.sdk.wandb_run.Run object at 0x7ac41ae98400>>
51
+ 2024-07-19 15:13:25,439 INFO MainThread:35 [wandb_run.py:_config_callback():1382] config_cb model/num_parameters 76817408 None
wandb/run-20240719_150308-wq61zns9/run-wq61zns9.wandb ADDED
Binary file (40.4 kB). View file