|
[INFO|configuration_utils.py:731] 2024-09-25 21:54:48,859 >> loading configuration file /mnt/c/checkout/Phi-3.5-mini-instruct/config.json |
|
|
|
[INFO|configuration_utils.py:731] 2024-09-25 21:54:48,876 >> loading configuration file /mnt/c/checkout/Phi-3.5-mini-instruct/config.json |
|
|
|
[INFO|configuration_utils.py:800] 2024-09-25 21:54:48,877 >> Model config Phi3Config { |
|
"_name_or_path": "/mnt/c/checkout/Phi-3.5-mini-instruct", |
|
"architectures": [ |
|
"Phi3ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"auto_map": { |
|
"AutoConfig": "configuration_phi3.Phi3Config", |
|
"AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM" |
|
}, |
|
"bos_token_id": 1, |
|
"embd_pdrop": 0.0, |
|
"eos_token_id": 32000, |
|
"hidden_act": "silu", |
|
"hidden_size": 3072, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 8192, |
|
"max_position_embeddings": 131072, |
|
"model_type": "phi3", |
|
"num_attention_heads": 32, |
|
"num_hidden_layers": 32, |
|
"num_key_value_heads": 32, |
|
"original_max_position_embeddings": 4096, |
|
"pad_token_id": 32000, |
|
"resid_pdrop": 0.0, |
|
"rms_norm_eps": 1e-05, |
|
"rope_scaling": { |
|
"long_factor": [ |
|
1.0800000429153442, |
|
1.1100000143051147, |
|
1.1399999856948853, |
|
1.340000033378601, |
|
1.5899999141693115, |
|
1.600000023841858, |
|
1.6200000047683716, |
|
2.620000123977661, |
|
3.2300000190734863, |
|
3.2300000190734863, |
|
4.789999961853027, |
|
7.400000095367432, |
|
7.700000286102295, |
|
9.09000015258789, |
|
12.199999809265137, |
|
17.670000076293945, |
|
24.46000099182129, |
|
28.57000160217285, |
|
30.420001983642578, |
|
30.840002059936523, |
|
32.590003967285156, |
|
32.93000411987305, |
|
42.320003509521484, |
|
44.96000289916992, |
|
50.340003967285156, |
|
50.45000457763672, |
|
57.55000305175781, |
|
57.93000411987305, |
|
58.21000289916992, |
|
60.1400032043457, |
|
62.61000442504883, |
|
62.62000274658203, |
|
62.71000289916992, |
|
63.1400032043457, |
|
63.1400032043457, |
|
63.77000427246094, |
|
63.93000411987305, |
|
63.96000289916992, |
|
63.970001220703125, |
|
64.02999877929688, |
|
64.06999969482422, |
|
64.08000183105469, |
|
64.12000274658203, |
|
64.41000366210938, |
|
64.4800033569336, |
|
64.51000213623047, |
|
64.52999877929688, |
|
64.83999633789062 |
|
], |
|
"short_factor": [ |
|
1.0, |
|
1.0199999809265137, |
|
1.0299999713897705, |
|
1.0299999713897705, |
|
1.0499999523162842, |
|
1.0499999523162842, |
|
1.0499999523162842, |
|
1.0499999523162842, |
|
1.0499999523162842, |
|
1.0699999332427979, |
|
1.0999999046325684, |
|
1.1099998950958252, |
|
1.1599998474121094, |
|
1.1599998474121094, |
|
1.1699998378753662, |
|
1.2899998426437378, |
|
1.339999794960022, |
|
1.679999828338623, |
|
1.7899998426437378, |
|
1.8199998140335083, |
|
1.8499997854232788, |
|
1.8799997568130493, |
|
1.9099997282028198, |
|
1.9399996995925903, |
|
1.9899996519088745, |
|
2.0199997425079346, |
|
2.0199997425079346, |
|
2.0199997425079346, |
|
2.0199997425079346, |
|
2.0199997425079346, |
|
2.0199997425079346, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0799996852874756, |
|
2.0899996757507324, |
|
2.189999580383301, |
|
2.2199995517730713, |
|
2.5899994373321533, |
|
2.729999542236328, |
|
2.749999523162842, |
|
2.8399994373321533 |
|
], |
|
"type": "longrope" |
|
}, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 262144, |
|
"tie_word_embeddings": false, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.44.2", |
|
"use_cache": true, |
|
"vocab_size": 32064 |
|
} |
|
|
|
|
|
[INFO|tokenization_utils_base.py:2267] 2024-09-25 21:54:48,919 >> loading file tokenizer.model |
|
|
|
[INFO|tokenization_utils_base.py:2267] 2024-09-25 21:54:48,919 >> loading file tokenizer.json |
|
|
|
[INFO|tokenization_utils_base.py:2267] 2024-09-25 21:54:48,919 >> loading file added_tokens.json |
|
|
|
[INFO|tokenization_utils_base.py:2267] 2024-09-25 21:54:48,919 >> loading file special_tokens_map.json |
|
|
|
[INFO|tokenization_utils_base.py:2267] 2024-09-25 21:54:48,919 >> loading file tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2513] 2024-09-25 21:54:48,960 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. |
|
|
|
[INFO|configuration_utils.py:731] 2024-09-25 21:54:48,969 >> loading configuration file /mnt/c/checkout/Phi-3.5-mini-instruct/config.json |
|
|
|
[INFO|configuration_utils.py:731] 2024-09-25 21:54:48,979 >> loading configuration file /mnt/c/checkout/Phi-3.5-mini-instruct/config.json |
|
|
|
[INFO|configuration_utils.py:800] 2024-09-25 21:54:48,979 >> Model config Phi3Config { |
|
"_name_or_path": "/mnt/c/checkout/Phi-3.5-mini-instruct", |
|
"architectures": [ |
|
"Phi3ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"auto_map": { |
|
"AutoConfig": "configuration_phi3.Phi3Config", |
|
"AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM" |
|
}, |
|
"bos_token_id": 1, |
|
"embd_pdrop": 0.0, |
|
"eos_token_id": 32000, |
|
"hidden_act": "silu", |
|
"hidden_size": 3072, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 8192, |
|
"max_position_embeddings": 131072, |
|
"model_type": "phi3", |
|
"num_attention_heads": 32, |
|
"num_hidden_layers": 32, |
|
"num_key_value_heads": 32, |
|
"original_max_position_embeddings": 4096, |
|
"pad_token_id": 32000, |
|
"resid_pdrop": 0.0, |
|
"rms_norm_eps": 1e-05, |
|
"rope_scaling": { |
|
"long_factor": [ |
|
1.0800000429153442, |
|
1.1100000143051147, |
|
1.1399999856948853, |
|
1.340000033378601, |
|
1.5899999141693115, |
|
1.600000023841858, |
|
1.6200000047683716, |
|
2.620000123977661, |
|
3.2300000190734863, |
|
3.2300000190734863, |
|
4.789999961853027, |
|
7.400000095367432, |
|
7.700000286102295, |
|
9.09000015258789, |
|
12.199999809265137, |
|
17.670000076293945, |
|
24.46000099182129, |
|
28.57000160217285, |
|
30.420001983642578, |
|
30.840002059936523, |
|
32.590003967285156, |
|
32.93000411987305, |
|
42.320003509521484, |
|
44.96000289916992, |
|
50.340003967285156, |
|
50.45000457763672, |
|
57.55000305175781, |
|
57.93000411987305, |
|
58.21000289916992, |
|
60.1400032043457, |
|
62.61000442504883, |
|
62.62000274658203, |
|
62.71000289916992, |
|
63.1400032043457, |
|
63.1400032043457, |
|
63.77000427246094, |
|
63.93000411987305, |
|
63.96000289916992, |
|
63.970001220703125, |
|
64.02999877929688, |
|
64.06999969482422, |
|
64.08000183105469, |
|
64.12000274658203, |
|
64.41000366210938, |
|
64.4800033569336, |
|
64.51000213623047, |
|
64.52999877929688, |
|
64.83999633789062 |
|
], |
|
"short_factor": [ |
|
1.0, |
|
1.0199999809265137, |
|
1.0299999713897705, |
|
1.0299999713897705, |
|
1.0499999523162842, |
|
1.0499999523162842, |
|
1.0499999523162842, |
|
1.0499999523162842, |
|
1.0499999523162842, |
|
1.0699999332427979, |
|
1.0999999046325684, |
|
1.1099998950958252, |
|
1.1599998474121094, |
|
1.1599998474121094, |
|
1.1699998378753662, |
|
1.2899998426437378, |
|
1.339999794960022, |
|
1.679999828338623, |
|
1.7899998426437378, |
|
1.8199998140335083, |
|
1.8499997854232788, |
|
1.8799997568130493, |
|
1.9099997282028198, |
|
1.9399996995925903, |
|
1.9899996519088745, |
|
2.0199997425079346, |
|
2.0199997425079346, |
|
2.0199997425079346, |
|
2.0199997425079346, |
|
2.0199997425079346, |
|
2.0199997425079346, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0799996852874756, |
|
2.0899996757507324, |
|
2.189999580383301, |
|
2.2199995517730713, |
|
2.5899994373321533, |
|
2.729999542236328, |
|
2.749999523162842, |
|
2.8399994373321533 |
|
], |
|
"type": "longrope" |
|
}, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 262144, |
|
"tie_word_embeddings": false, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.44.2", |
|
"use_cache": true, |
|
"vocab_size": 32064 |
|
} |
|
|
|
|
|
[INFO|tokenization_utils_base.py:2267] 2024-09-25 21:54:48,992 >> loading file tokenizer.model |
|
|
|
[INFO|tokenization_utils_base.py:2267] 2024-09-25 21:54:48,992 >> loading file tokenizer.json |
|
|
|
[INFO|tokenization_utils_base.py:2267] 2024-09-25 21:54:48,993 >> loading file added_tokens.json |
|
|
|
[INFO|tokenization_utils_base.py:2267] 2024-09-25 21:54:48,993 >> loading file special_tokens_map.json |
|
|
|
[INFO|tokenization_utils_base.py:2267] 2024-09-25 21:54:48,993 >> loading file tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2513] 2024-09-25 21:54:49,030 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. |
|
|
|
[INFO|configuration_utils.py:731] 2024-09-25 21:54:49,791 >> loading configuration file /mnt/c/checkout/Phi-3.5-mini-instruct/config.json |
|
|
|
[INFO|configuration_utils.py:731] 2024-09-25 21:54:49,799 >> loading configuration file /mnt/c/checkout/Phi-3.5-mini-instruct/config.json |
|
|
|
[INFO|configuration_utils.py:800] 2024-09-25 21:54:49,800 >> Model config Phi3Config { |
|
"_name_or_path": "/mnt/c/checkout/Phi-3.5-mini-instruct", |
|
"architectures": [ |
|
"Phi3ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"auto_map": { |
|
"AutoConfig": "configuration_phi3.Phi3Config", |
|
"AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM" |
|
}, |
|
"bos_token_id": 1, |
|
"embd_pdrop": 0.0, |
|
"eos_token_id": 32000, |
|
"hidden_act": "silu", |
|
"hidden_size": 3072, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 8192, |
|
"max_position_embeddings": 131072, |
|
"model_type": "phi3", |
|
"num_attention_heads": 32, |
|
"num_hidden_layers": 32, |
|
"num_key_value_heads": 32, |
|
"original_max_position_embeddings": 4096, |
|
"pad_token_id": 32000, |
|
"resid_pdrop": 0.0, |
|
"rms_norm_eps": 1e-05, |
|
"rope_scaling": { |
|
"long_factor": [ |
|
1.0800000429153442, |
|
1.1100000143051147, |
|
1.1399999856948853, |
|
1.340000033378601, |
|
1.5899999141693115, |
|
1.600000023841858, |
|
1.6200000047683716, |
|
2.620000123977661, |
|
3.2300000190734863, |
|
3.2300000190734863, |
|
4.789999961853027, |
|
7.400000095367432, |
|
7.700000286102295, |
|
9.09000015258789, |
|
12.199999809265137, |
|
17.670000076293945, |
|
24.46000099182129, |
|
28.57000160217285, |
|
30.420001983642578, |
|
30.840002059936523, |
|
32.590003967285156, |
|
32.93000411987305, |
|
42.320003509521484, |
|
44.96000289916992, |
|
50.340003967285156, |
|
50.45000457763672, |
|
57.55000305175781, |
|
57.93000411987305, |
|
58.21000289916992, |
|
60.1400032043457, |
|
62.61000442504883, |
|
62.62000274658203, |
|
62.71000289916992, |
|
63.1400032043457, |
|
63.1400032043457, |
|
63.77000427246094, |
|
63.93000411987305, |
|
63.96000289916992, |
|
63.970001220703125, |
|
64.02999877929688, |
|
64.06999969482422, |
|
64.08000183105469, |
|
64.12000274658203, |
|
64.41000366210938, |
|
64.4800033569336, |
|
64.51000213623047, |
|
64.52999877929688, |
|
64.83999633789062 |
|
], |
|
"short_factor": [ |
|
1.0, |
|
1.0199999809265137, |
|
1.0299999713897705, |
|
1.0299999713897705, |
|
1.0499999523162842, |
|
1.0499999523162842, |
|
1.0499999523162842, |
|
1.0499999523162842, |
|
1.0499999523162842, |
|
1.0699999332427979, |
|
1.0999999046325684, |
|
1.1099998950958252, |
|
1.1599998474121094, |
|
1.1599998474121094, |
|
1.1699998378753662, |
|
1.2899998426437378, |
|
1.339999794960022, |
|
1.679999828338623, |
|
1.7899998426437378, |
|
1.8199998140335083, |
|
1.8499997854232788, |
|
1.8799997568130493, |
|
1.9099997282028198, |
|
1.9399996995925903, |
|
1.9899996519088745, |
|
2.0199997425079346, |
|
2.0199997425079346, |
|
2.0199997425079346, |
|
2.0199997425079346, |
|
2.0199997425079346, |
|
2.0199997425079346, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0299997329711914, |
|
2.0799996852874756, |
|
2.0899996757507324, |
|
2.189999580383301, |
|
2.2199995517730713, |
|
2.5899994373321533, |
|
2.729999542236328, |
|
2.749999523162842, |
|
2.8399994373321533 |
|
], |
|
"type": "longrope" |
|
}, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 262144, |
|
"tie_word_embeddings": false, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.44.2", |
|
"use_cache": true, |
|
"vocab_size": 32064 |
|
} |
|
|
|
|
|
[WARNING|modeling_phi3.py:62] 2024-09-25 21:54:51,178 >> `flash-attention` package not found, consider installing for better performance: No module named 'flash_attn'. |
|
|
|
[WARNING|modeling_phi3.py:66] 2024-09-25 21:54:51,178 >> Current `flash-attention` does not support `window_size`. Either upgrade or use `attn_implementation='eager'`. |
|
|
|
[INFO|modeling_utils.py:3675] 2024-09-25 21:54:51,183 >> loading weights file /mnt/c/checkout/Phi-3.5-mini-instruct/model.safetensors.index.json |
|
|
|
[INFO|modeling_utils.py:1606] 2024-09-25 21:54:51,191 >> Instantiating Phi3ForCausalLM model under default dtype torch.bfloat16. |
|
|
|
[INFO|configuration_utils.py:1038] 2024-09-25 21:54:51,191 >> Generate config GenerationConfig { |
|
"bos_token_id": 1, |
|
"eos_token_id": 32000, |
|
"pad_token_id": 32000 |
|
} |
|
|
|
|
|
[INFO|modeling_utils.py:4507] 2024-09-25 21:56:43,132 >> All model checkpoint weights were used when initializing Phi3ForCausalLM. |
|
|
|
|
|
[INFO|modeling_utils.py:4515] 2024-09-25 21:56:43,132 >> All the weights of Phi3ForCausalLM were initialized from the model checkpoint at /mnt/c/checkout/Phi-3.5-mini-instruct. |
|
If your task is similar to the task the model of the checkpoint was trained on, you can already use Phi3ForCausalLM for predictions without further training. |
|
|
|
[INFO|configuration_utils.py:991] 2024-09-25 21:56:43,142 >> loading configuration file /mnt/c/checkout/Phi-3.5-mini-instruct/generation_config.json |
|
|
|
[INFO|configuration_utils.py:1038] 2024-09-25 21:56:43,142 >> Generate config GenerationConfig { |
|
"bos_token_id": 1, |
|
"eos_token_id": [ |
|
32007, |
|
32001, |
|
32000 |
|
], |
|
"pad_token_id": 32000 |
|
} |
|
|
|
|
|
[INFO|trainer.py:648] 2024-09-25 21:56:43,208 >> Using auto half precision backend |
|
|
|
[INFO|trainer.py:2134] 2024-09-25 21:57:01,081 >> ***** Running training ***** |
|
|
|
[INFO|trainer.py:2135] 2024-09-25 21:57:01,081 >> Num examples = 3,900 |
|
|
|
[INFO|trainer.py:2136] 2024-09-25 21:57:01,081 >> Num Epochs = 32 |
|
|
|
[INFO|trainer.py:2137] 2024-09-25 21:57:01,081 >> Instantaneous batch size per device = 4 |
|
|
|
[INFO|trainer.py:2140] 2024-09-25 21:57:01,081 >> Total train batch size (w. parallel, distributed & accumulation) = 16 |
|
|
|
[INFO|trainer.py:2141] 2024-09-25 21:57:01,081 >> Gradient Accumulation steps = 4 |
|
|
|
[INFO|trainer.py:2142] 2024-09-25 21:57:01,081 >> Total optimization steps = 7,776 |
|
|
|
[INFO|trainer.py:2143] 2024-09-25 21:57:01,082 >> Number of trainable parameters = 113,252,352 |
|
|
|
[WARNING|logging.py:328] 2024-09-25 21:57:01,353 >> You are not running the flash-attention implementation, expect numerical differences. |
|
|
|
[INFO|trainer.py:3503] 2024-09-26 01:55:14,826 >> Saving model checkpoint to saves/Custom/full/SpecAI/checkpoint-2000 |
|
|
|
[INFO|configuration_utils.py:472] 2024-09-26 01:55:14,876 >> Configuration saved in saves/Custom/full/SpecAI/checkpoint-2000/config.json |
|
|
|
[INFO|configuration_utils.py:807] 2024-09-26 01:55:14,882 >> Configuration saved in saves/Custom/full/SpecAI/checkpoint-2000/generation_config.json |
|
|
|
[INFO|modeling_utils.py:2807] 2024-09-26 01:55:31,164 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 2 checkpoint shards. You can find where each parameters has been saved in the index located at saves/Custom/full/SpecAI/checkpoint-2000/model.safetensors.index.json. |
|
|
|
[INFO|tokenization_utils_base.py:2684] 2024-09-26 01:55:31,171 >> tokenizer config file saved in saves/Custom/full/SpecAI/checkpoint-2000/tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2693] 2024-09-26 01:55:31,175 >> Special tokens file saved in saves/Custom/full/SpecAI/checkpoint-2000/special_tokens_map.json |
|
|
|
[INFO|trainer.py:3503] 2024-09-26 06:01:21,589 >> Saving model checkpoint to saves/Custom/full/SpecAI/checkpoint-4000 |
|
|
|
[INFO|configuration_utils.py:472] 2024-09-26 06:01:21,634 >> Configuration saved in saves/Custom/full/SpecAI/checkpoint-4000/config.json |
|
|
|
[INFO|configuration_utils.py:807] 2024-09-26 06:01:21,639 >> Configuration saved in saves/Custom/full/SpecAI/checkpoint-4000/generation_config.json |
|
|
|
[INFO|modeling_utils.py:2807] 2024-09-26 06:01:33,186 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 2 checkpoint shards. You can find where each parameters has been saved in the index located at saves/Custom/full/SpecAI/checkpoint-4000/model.safetensors.index.json. |
|
|
|
[INFO|tokenization_utils_base.py:2684] 2024-09-26 06:01:33,193 >> tokenizer config file saved in saves/Custom/full/SpecAI/checkpoint-4000/tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2693] 2024-09-26 06:01:33,196 >> Special tokens file saved in saves/Custom/full/SpecAI/checkpoint-4000/special_tokens_map.json |
|
|
|
[INFO|trainer.py:2394] 2024-09-26 07:57:18,192 >> |
|
|
|
Training completed. Do not forget to share your model on huggingface.co/models =) |
|
|
|
|
|
|
|
[INFO|trainer.py:3503] 2024-09-26 07:57:18,202 >> Saving model checkpoint to saves/Custom/full/SpecAI |
|
|
|
[INFO|configuration_utils.py:472] 2024-09-26 07:57:18,244 >> Configuration saved in saves/Custom/full/SpecAI/config.json |
|
|
|
[INFO|configuration_utils.py:807] 2024-09-26 07:57:18,247 >> Configuration saved in saves/Custom/full/SpecAI/generation_config.json |
|
|
|
[INFO|modeling_utils.py:2807] 2024-09-26 07:57:29,353 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 2 checkpoint shards. You can find where each parameters has been saved in the index located at saves/Custom/full/SpecAI/model.safetensors.index.json. |
|
|
|
[INFO|tokenization_utils_base.py:2684] 2024-09-26 07:57:29,357 >> tokenizer config file saved in saves/Custom/full/SpecAI/tokenizer_config.json |
|
|
|
[INFO|tokenization_utils_base.py:2693] 2024-09-26 07:57:29,359 >> Special tokens file saved in saves/Custom/full/SpecAI/special_tokens_map.json |
|
|
|
[INFO|modelcard.py:449] 2024-09-26 07:57:29,626 >> Dropping the following result as it does not have all the necessary fields: |
|
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}} |
|
|
|
|