pythia410m-sft-tldr / code /configs /dpo_20konly_1b_bf16.yml
mnoukhov's picture
Training in progress, step 500
1904ee8 verified
raw
history blame
1.01 kB
## costa stuff
model_name: vwxyzjn/EleutherAI_pythia-1b-deduped__sft__tldr
model_revision: sft__55513__1706646024
dataset_name: vwxyzjn/summarize_from_feedback_oai_preprocessing_1706381144
tokenizer_name: EleutherAI/pythia-1b-deduped
eval_split: validation
prompt_field: query
gold_model_name: vwxyzjn/EleutherAI_pythia-6.9b-deduped__reward__tldr
gold_model_revision: reward__55513__1706651113
gold_dataset_name: vwxyzjn/summarize_from_feedback_tldr_3_filtered_oai_preprocessing_1706381144
gold_prompt_field: query
gold_target_field: reference_response
gold_eval_split: validation
strip_prompt: False
## training stuff
eval_first_step: False
pseudo_dataset_name: mnoukhov/summarize_from_feedback_tldr3_generated_20k_relabel_pythia1b_dpo
beta: 0.5
max_steps: 10000
eval_steps: 1000
load_in_8bit: False
bf16: True
fp16: False
learning_rate: 1e-5
use_peft: True
lora_all_linear: True
lora_r: 8
lora_alpha: 32
lora_dropout: 0.05
gradient_accumulation_steps: 16
per_device_train_batch_size: 4
warmup_steps: 150