Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import os, torch
|
3 |
from datasets import load_dataset
|
4 |
-
from huggingface_hub import
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer, Seq2SeqTrainer, Seq2SeqTrainingArguments, pipeline
|
6 |
|
7 |
ACTION_1 = "Prompt base model"
|
@@ -106,10 +106,15 @@ def fine_tune_model(base_model_name, dataset_name):
|
|
106 |
)
|
107 |
|
108 |
# Train model
|
109 |
-
trainer.train()
|
110 |
|
111 |
# Save model to HF
|
112 |
-
|
|
|
|
|
|
|
|
|
|
|
113 |
|
114 |
def prompt_model(model_name, system_prompt, user_prompt, sql_context):
|
115 |
pipe = pipeline("text-generation",
|
|
|
1 |
import gradio as gr
|
2 |
import os, torch
|
3 |
from datasets import load_dataset
|
4 |
+
from huggingface_hub import push_to_hub
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer, Seq2SeqTrainer, Seq2SeqTrainingArguments, pipeline
|
6 |
|
7 |
ACTION_1 = "Prompt base model"
|
|
|
106 |
)
|
107 |
|
108 |
# Train model
|
109 |
+
#trainer.train()
|
110 |
|
111 |
# Save model to HF
|
112 |
+
push_to_hub(
|
113 |
+
local_dir="./output",
|
114 |
+
repo_id=FT_MODEL_NAME,
|
115 |
+
repo_type="model",
|
116 |
+
use_auth_token=True,
|
117 |
+
)
|
118 |
|
119 |
def prompt_model(model_name, system_prompt, user_prompt, sql_context):
|
120 |
pipe = pipeline("text-generation",
|