|
|
|
import gradio as gr |
|
import os, torch |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import datasets, sys, logging, torch, transformers |
|
from datasets import load_dataset |
|
from peft import LoraConfig |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig |
|
from trl import SFTTrainer |
|
|
|
|
|
|
|
hf_profile = "bstraehle" |
|
|
|
action_1 = "Fine-tune pre-trained model" |
|
action_2 = "Prompt fine-tuned model" |
|
|
|
system_prompt = "You are a text to SQL query translator. Given a question in English, generate a SQL query based on the provided SCHEMA. Do not generate any additional text. SCHEMA: {schema}" |
|
user_prompt = "What is the total trade value and average price for each trader and stock in the trade_history table?" |
|
schema = "CREATE TABLE trade_history (id INT, trader_id INT, stock VARCHAR(255), price DECIMAL(5,2), quantity INT, trade_time TIMESTAMP);" |
|
|
|
base_model_id = "microsoft/Phi-3-mini-4k-instruct" |
|
dataset = "b-mc2/sql-create-context" |
|
|
|
def prompt_model(model_id, system_prompt, user_prompt, schema): |
|
pipe = pipeline("text-generation", |
|
model=model_id, |
|
model_kwargs={"torch_dtype": torch.bfloat16}, |
|
device_map="auto", |
|
max_new_tokens=1000) |
|
messages = [ |
|
{"role": "system", "content": system_prompt.format(schema=schema)}, |
|
{"role": "user", "content": user_prompt}, |
|
{"role": "assistant", "content": ""} |
|
] |
|
output = pipe(messages) |
|
result = output[0]["generated_text"][-1]["content"] |
|
print(result) |
|
return result |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def fine_tune_model(base_model_id, dataset): |
|
test(base_model_id, dataset) |
|
|
|
|
|
|
|
|
|
return "fine_tuned_model_id" |
|
|
|
def create_conversation(sample): |
|
return { |
|
"messages": [ |
|
{"role": "system", "content": system_prompt.format(schema=sample["context"])}, |
|
{"role": "user", "content": sample["question"]}, |
|
{"role": "assistant", "content": sample["answer"]} |
|
] |
|
} |
|
|
|
|
|
def formatting_prompts_func(examples): |
|
convos = examples["conversations"] |
|
texts = [] |
|
mapper = {"system": "system\n", "human": "\nuser\n", "gpt": "\nassistant\n"} |
|
end_mapper = {"system": "", "human": "", "gpt": ""} |
|
for convo in convos: |
|
text = "".join(f"{mapper[(turn := x['from'])]} {x['value']}\n{end_mapper[turn]}" for x in convo) |
|
texts.append(f"{text}{tokenizer.eos_token}") |
|
return {"text": texts} |
|
|
|
def test(base_model_id, dataset): |
|
|
|
|
|
|
|
print("111") |
|
training_config = { |
|
"bf16": True, |
|
"do_eval": False, |
|
"learning_rate": 5.0e-06, |
|
"log_level": "info", |
|
"logging_steps": 20, |
|
"logging_strategy": "steps", |
|
"lr_scheduler_type": "cosine", |
|
"num_train_epochs": 1, |
|
"max_steps": -1, |
|
"output_dir": "./checkpoint_dir", |
|
"overwrite_output_dir": True, |
|
"per_device_eval_batch_size": 4, |
|
"per_device_train_batch_size": 4, |
|
"remove_unused_columns": True, |
|
"save_steps": 100, |
|
"save_total_limit": 1, |
|
"seed": 0, |
|
"gradient_checkpointing": True, |
|
"gradient_checkpointing_kwargs":{"use_reentrant": False}, |
|
"gradient_accumulation_steps": 1, |
|
"warmup_ratio": 0.2, |
|
} |
|
|
|
print("222") |
|
peft_config = { |
|
"r": 16, |
|
"lora_alpha": 32, |
|
"lora_dropout": 0.05, |
|
"bias": "none", |
|
"task_type": "CAUSAL_LM", |
|
"target_modules": "all-linear", |
|
"modules_to_save": None, |
|
} |
|
train_conf = TrainingArguments(**training_config) |
|
peft_conf = LoraConfig(**peft_config) |
|
|
|
|
|
|
|
|
|
|
|
print("333") |
|
logging.basicConfig( |
|
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
|
datefmt="%Y-%m-%d %H:%M:%S", |
|
handlers=[logging.StreamHandler(sys.stdout)], |
|
) |
|
log_level = train_conf.get_process_log_level() |
|
logger = logging.getLogger("FT") |
|
logger.setLevel(log_level) |
|
datasets.utils.logging.set_verbosity(log_level) |
|
transformers.utils.logging.set_verbosity(log_level) |
|
transformers.utils.logging.enable_default_handler() |
|
transformers.utils.logging.enable_explicit_format() |
|
|
|
|
|
print("444") |
|
logger.warning( |
|
f"Process rank: {train_conf.local_rank}, device: {train_conf.device}, n_gpu: {train_conf.n_gpu}" |
|
+ f" distributed training: {bool(train_conf.local_rank != -1)}, 16-bits training: {train_conf.fp16}" |
|
) |
|
logger.info(f"Training/evaluation parameters {train_conf}") |
|
logger.info(f"PEFT parameters {peft_conf}") |
|
|
|
|
|
|
|
|
|
|
|
print("444") |
|
checkpoint_path = "microsoft/Phi-3-mini-4k-instruct" |
|
|
|
model_kwargs = dict( |
|
use_cache=False, |
|
trust_remote_code=True, |
|
|
|
torch_dtype=torch.bfloat16, |
|
device_map=None |
|
) |
|
print("555") |
|
model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs) |
|
tokenizer = AutoTokenizer.from_pretrained(checkpoint_path) |
|
tokenizer.model_max_length = 2048 |
|
tokenizer.pad_token = tokenizer.unk_token |
|
tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) |
|
tokenizer.padding_side = 'right' |
|
|
|
|
|
|
|
|
|
|
|
print("666") |
|
def apply_chat_template(example, tokenizer): |
|
messages = example["messages"] |
|
example["text"] = tokenizer.apply_chat_template( |
|
messages, tokenize=False, add_generation_prompt=False) |
|
return example |
|
|
|
raw_dataset = load_dataset("HuggingFaceH4/ultrachat_200k") |
|
train_dataset = raw_dataset["train_sft"] |
|
test_dataset = raw_dataset["test_sft"] |
|
column_names = list(train_dataset.features) |
|
|
|
print("777") |
|
processed_train_dataset = train_dataset.map( |
|
apply_chat_template, |
|
fn_kwargs={"tokenizer": tokenizer}, |
|
num_proc=10, |
|
remove_columns=column_names, |
|
desc="Applying chat template to train_sft", |
|
) |
|
|
|
print("888") |
|
processed_test_dataset = test_dataset.map( |
|
apply_chat_template, |
|
fn_kwargs={"tokenizer": tokenizer}, |
|
num_proc=10, |
|
remove_columns=column_names, |
|
desc="Applying chat template to test_sft", |
|
) |
|
|
|
|
|
|
|
|
|
|
|
print("999") |
|
trainer = SFTTrainer( |
|
model=model, |
|
args=train_conf, |
|
peft_config=peft_conf, |
|
train_dataset=processed_train_dataset, |
|
eval_dataset=processed_test_dataset, |
|
max_seq_length=2048, |
|
dataset_text_field="text", |
|
tokenizer=tokenizer, |
|
packing=True |
|
) |
|
train_result = trainer.train() |
|
metrics = train_result.metrics |
|
trainer.log_metrics("train", metrics) |
|
trainer.save_metrics("train", metrics) |
|
trainer.save_state() |
|
|
|
|
|
|
|
|
|
|
|
print("aaa") |
|
tokenizer.padding_side = 'left' |
|
metrics = trainer.evaluate() |
|
metrics["eval_samples"] = len(processed_test_dataset) |
|
trainer.log_metrics("eval", metrics) |
|
trainer.save_metrics("eval", metrics) |
|
|
|
|
|
|
|
|
|
|
|
print("bbb") |
|
trainer.save_model(train_conf.output_dir) |
|
|
|
def download_model(base_model_id): |
|
tokenizer = AutoTokenizer.from_pretrained(base_model_id) |
|
model = AutoModelForCausalLM.from_pretrained(base_model_id) |
|
model.save_pretrained(base_model_id) |
|
return tokenizer |
|
|
|
def prepare_dataset(dataset): |
|
dataset = load_dataset(dataset, split="train") |
|
dataset = dataset.shuffle().select(range(12500)) |
|
|
|
|
|
dataset = dataset.map(create_conversation, remove_columns=dataset.features,batched=False) |
|
|
|
dataset = dataset.train_test_split(test_size=2500/12500) |
|
|
|
print(dataset["train"][345]["messages"]) |
|
|
|
|
|
dataset["train"].to_json("train_dataset.json", orient="records") |
|
dataset["test"].to_json("test_dataset.json", orient="records") |
|
|
|
|
|
def train_model(model_id): |
|
print("111") |
|
dataset = load_dataset("json", data_files="train_dataset.json", split="train") |
|
|
|
bnb_config = BitsAndBytesConfig( |
|
load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 |
|
) |
|
|
|
print("222") |
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_id, |
|
device_map="auto", |
|
|
|
torch_dtype=torch.bfloat16, |
|
quantization_config=bnb_config |
|
) |
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
tokenizer.padding_side = 'right' |
|
|
|
print("333") |
|
|
|
model, tokenizer = setup_chat_format(model, tokenizer) |
|
|
|
peft_config = LoraConfig( |
|
lora_alpha=128, |
|
lora_dropout=0.05, |
|
r=256, |
|
bias="none", |
|
target_modules="all-linear", |
|
task_type="CAUSAL_LM", |
|
) |
|
|
|
print("444") |
|
args = TrainingArguments( |
|
output_dir="code-llama-7b-text-to-sql", |
|
num_train_epochs=3, |
|
per_device_train_batch_size=3, |
|
gradient_accumulation_steps=2, |
|
gradient_checkpointing=True, |
|
optim="adamw_torch_fused", |
|
logging_steps=10, |
|
save_strategy="epoch", |
|
learning_rate=2e-4, |
|
bf16=True, |
|
tf32=True, |
|
max_grad_norm=0.3, |
|
warmup_ratio=0.03, |
|
lr_scheduler_type="constant", |
|
push_to_hub=True, |
|
report_to="tensorboard", |
|
) |
|
|
|
max_seq_length = 3072 |
|
|
|
print("555") |
|
trainer = SFTTrainer( |
|
model=model, |
|
args=args, |
|
train_dataset=dataset, |
|
peft_config=peft_config, |
|
max_seq_length=max_seq_length, |
|
tokenizer=tokenizer, |
|
packing=True, |
|
dataset_kwargs={ |
|
"add_special_tokens": False, |
|
"append_concat_token": False, |
|
} |
|
) |
|
|
|
print("666") |
|
|
|
trainer.train() |
|
|
|
print("777") |
|
|
|
trainer.save_model() |
|
|
|
del model |
|
del trainer |
|
torch.cuda.empty_cache() |
|
|
|
def upload_model(base_model_id, tokenizer): |
|
fine_tuned_model_id = replace_hf_profile(base_model_id) |
|
login(token=os.environ["HF_TOKEN"]) |
|
api = HfApi() |
|
|
|
api.create_repo(repo_id=fine_tuned_model_id) |
|
api.upload_folder( |
|
folder_path=base_model_id, |
|
repo_id=fine_tuned_model_id |
|
) |
|
tokenizer.push_to_hub(fine_tuned_model_id) |
|
return fine_tuned_model_id |
|
|
|
def replace_hf_profile(base_model_id): |
|
model_id = base_model_id[base_model_id.rfind('/')+1:] |
|
return f"{hf_profile}/{model_id}" |
|
|
|
def process(action, base_model_id, dataset, system_prompt, user_prompt, schema): |
|
|
|
if action == action_1: |
|
result = fine_tune_model(base_model_id, dataset) |
|
elif action == action_2: |
|
fine_tuned_model_id = replace_hf_profile(base_model_id) |
|
result = prompt_model(fine_tuned_model_id, system_prompt, user_prompt, schema) |
|
return result |
|
|
|
demo = gr.Interface(fn=process, |
|
inputs=[gr.Radio([action_1, action_2], label = "Action", value = action_1), |
|
gr.Textbox(label = "Base Model ID", value = base_model_id, lines = 1), |
|
gr.Textbox(label = "Dataset", value = dataset, lines = 1), |
|
gr.Textbox(label = "System Prompt", value = system_prompt, lines = 2), |
|
gr.Textbox(label = "User Prompt", value = user_prompt, lines = 2), |
|
gr.Textbox(label = "Schema", value = schema, lines = 2)], |
|
outputs=[gr.Textbox(label = "Completion", value = os.environ["OUTPUT"])]) |
|
demo.launch() |