sft / app.py
bstraehle's picture
Update app.py
467c88a verified
raw
history blame
3.56 kB
import gradio as gr
import os
from datasets import load_dataset
from huggingface_hub import HfApi, login
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# Run on NVidia A10G Large (sleep after 1 hour)
# Model IDs:
#
# google/gemma-2-9b-it
# meta-llama/Meta-Llama-3-8B-Instruct
# Datasets:
#
# gretelai/synthetic_text_to_sql
profile = "bstraehle"
action_1 = "Prompt base model"
action_2 = "Prompt fine-tuned model"
action_3 = "Fine-tune base model"
system_prompt = "You are a text to SQL query translator. Given a question in English, generate a SQL query based on the provided SCHEMA. Do not generate any additional text. SCHEMA: "
user_prompt = "What is the total trade value and average price for each trader and stock in the trade_history table?"
schema = "CREATE TABLE trade_history (id INT, trader_id INT, stock VARCHAR(255), price DECIMAL(5,2), quantity INT, trade_time TIMESTAMP);"
base_model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
fine_tuned_model_id = "bstraehle/Meta-Llama-3-8B-Instruct"
dataset = "gretelai/synthetic_text_to_sql"
def prompt_model(model_id, system_prompt, user_prompt):
pipe = pipeline("text-generation", model=model_id)
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
{"role": "assistant", "content": ""}
]
output = pipe(messages, model_kwargs={"torch_dtype": torch.bfloat16}, device="cuda")
return output[0]["generated_text"][-1]["content"]
def fine_tune_model(model_id):
tokenizer = download_model(model_id)
model_repo_name = upload_model(model_id, tokenizer)
return model_repo_name
def download_model(model_id):
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
model.save_pretrained(model_id)
return tokenizer
#def download_dataset(dataset):
# ds = load_dataset(dataset)
# return ""
def upload_model(model_id, tokenizer):
model_name = model_id[model_id.rfind('/')+1:]
model_repo_name = f"{profile}/{model_name}"
login(token=os.environ["HF_TOKEN"])
api = HfApi()
api.create_repo(repo_id=model_repo_name)
api.upload_folder(
folder_path=model_id,
repo_id=model_repo_name
)
tokenizer.push_to_hub(model_repo_name)
return model_repo_name
def process(action, system_prompt, user_prompt, schema, base_model_id, fine_tuned_model_id, dataset):
if action == action_1:
result = prompt_model(base_model_id, system_prompt, user_prompt)
elif action == action_2:
result = prompt_model(fine_tuned_model_id, system_prompt, user_prompt)
elif action == action_3:
result = fine_tune_model(base_model_id)
return result
demo = gr.Interface(fn=process,
inputs=[gr.Radio([action_1, action_2, action_3], label = "Action", value = action_2),
gr.Textbox(label = "System Prompt", value = system_prompt, lines = 2),
gr.Textbox(label = "User Prompt", value = user_prompt, lines = 2),
gr.Textbox(label = "Schema", value = schema, lines = 2),
gr.Textbox(label = "Base Model ID", value = base_model_id, lines = 1),
gr.Textbox(label = "Fine-Tuned Model ID", value = fine_tuned_model_id, lines = 1),
gr.Textbox(label = "Dataset", value = dataset, lines = 1)],
outputs=[gr.Textbox(label = "Completion")])
demo.launch()