jofaichow commited on
Commit
af71030
1 Parent(s): b4a7ced

Upload cfg.yaml

Browse files
Files changed (1) hide show
  1. cfg.yaml +108 -0
cfg.yaml ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ architecture:
2
+ backbone_dtype: int4
3
+ gradient_checkpointing: true
4
+ intermediate_dropout: 0.0
5
+ pretrained: true
6
+ pretrained_weights: ''
7
+ augmentation:
8
+ neftune_noise_alpha: 0.0
9
+ random_parent_probability: 0.0
10
+ skip_parent_probability: 0.0
11
+ token_mask_probability: 0.0
12
+ dataset:
13
+ add_eos_token_to_answer: true
14
+ add_eos_token_to_prompt: true
15
+ add_eos_token_to_system: true
16
+ answer_column: Answer
17
+ chatbot_author: JC
18
+ chatbot_name: JCBOT
19
+ data_sample: 1.0
20
+ data_sample_choice:
21
+ - Train
22
+ - Validation
23
+ limit_chained_samples: false
24
+ mask_prompt_labels: true
25
+ parent_id_column: None
26
+ personalize: true
27
+ prompt_column:
28
+ - Question
29
+ system_column: "System\r"
30
+ text_answer_separator: ''
31
+ text_prompt_start: <|user|>
32
+ text_system_start: <|system|>
33
+ train_dataframe: /workspace/data/user/flock_task5_data_v3/flock_task5_data_v3.csv
34
+ validation_dataframe: None
35
+ validation_size: 0.01
36
+ validation_strategy: automatic
37
+ environment:
38
+ compile_model: false
39
+ deepspeed_allgather_bucket_size: 1000000
40
+ deepspeed_method: ZeRO2
41
+ deepspeed_reduce_bucket_size: 1000000
42
+ deepspeed_stage3_param_persistence_threshold: 1000000
43
+ deepspeed_stage3_prefetch_bucket_size: 1000000
44
+ find_unused_parameters: false
45
+ gpus:
46
+ - '0'
47
+ huggingface_branch: main
48
+ mixed_precision: true
49
+ mixed_precision_dtype: bfloat16
50
+ number_of_workers: 8
51
+ seed: 1234
52
+ trust_remote_code: true
53
+ use_deepspeed: false
54
+ experiment_name: z7b-beta-data-v3
55
+ llm_backbone: HuggingFaceH4/zephyr-7b-beta
56
+ logging:
57
+ logger: None
58
+ neptune_project: ''
59
+ output_directory: /workspace/output/user/z7b-beta-data-v3/
60
+ prediction:
61
+ batch_size_inference: 0
62
+ do_sample: false
63
+ max_length_inference: 1024
64
+ max_time: 0.0
65
+ metric: BLEU
66
+ metric_gpt_model: gpt-3.5-turbo-0301
67
+ metric_gpt_template: general
68
+ min_length_inference: 2
69
+ num_beams: 1
70
+ num_history: 4
71
+ repetition_penalty: 1.0
72
+ stop_tokens: ''
73
+ temperature: 0.0
74
+ top_k: 0
75
+ top_p: 1.0
76
+ problem_type: text_causal_language_modeling
77
+ tokenizer:
78
+ add_prompt_answer_tokens: false
79
+ max_length: 1024
80
+ padding_quantile: 1.0
81
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
82
+ training:
83
+ batch_size: 2
84
+ differential_learning_rate: 1.0e-05
85
+ differential_learning_rate_layers: []
86
+ drop_last_batch: true
87
+ epochs: 4
88
+ evaluate_before_training: false
89
+ evaluation_epochs: 1.0
90
+ freeze_layers: []
91
+ grad_accumulation: 4
92
+ gradient_clip: 0.0
93
+ learning_rate: 0.0001
94
+ lora: true
95
+ lora_alpha: 512
96
+ lora_dropout: 0.1
97
+ lora_r: 256
98
+ lora_target_modules: ''
99
+ lora_unfreeze_layers: []
100
+ loss_function: TokenAveragedCrossEntropy
101
+ optimizer: AdamW
102
+ save_checkpoint: last
103
+ schedule: Cosine
104
+ train_validation_data: false
105
+ use_dora: false
106
+ use_flash_attention_2: false
107
+ warmup_epochs: 0.0
108
+ weight_decay: 0.0