bstraehle commited on
Commit
065dd39
1 Parent(s): 38576e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -7
app.py CHANGED
@@ -18,8 +18,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
18
  profile = "bstraehle"
19
 
20
  action_1 = "Prompt base model"
21
- action_2 = "Prompt fine-tuned model"
22
- action_3 = "Fine-tune base model"
23
 
24
  system_prompt = "You are a text to SQL query translator. Given a question in English, generate a SQL query based on the provided SCHEMA. Do not generate any additional text. SCHEMA: {schema}"
25
  user_prompt = "What is the total trade value and average price for each trader and stock in the trade_history table?"
@@ -37,9 +37,7 @@ def prompt_model(model_id, system_prompt, user_prompt, schema):
37
  {"role": "user", "content": user_prompt},
38
  {"role": "assistant", "content": ""}
39
  ]
40
-
41
- print(messages)
42
-
43
  output = pipe(messages)
44
 
45
  return output[0]["generated_text"][-1]["content"]
@@ -82,9 +80,9 @@ def process(action, system_prompt, user_prompt, schema, base_model_id, fine_tune
82
  if action == action_1:
83
  result = prompt_model(base_model_id, system_prompt, user_prompt, schema)
84
  elif action == action_2:
85
- result = prompt_model(fine_tuned_model_id, system_prompt, user_prompt, schema)
86
- elif action == action_3:
87
  result = fine_tune_model(base_model_id)
 
 
88
 
89
  return result
90
 
 
18
  profile = "bstraehle"
19
 
20
  action_1 = "Prompt base model"
21
+ action_2 = "Fine-tune base model"
22
+ action_3 = "Prompt fine-tuned model"
23
 
24
  system_prompt = "You are a text to SQL query translator. Given a question in English, generate a SQL query based on the provided SCHEMA. Do not generate any additional text. SCHEMA: {schema}"
25
  user_prompt = "What is the total trade value and average price for each trader and stock in the trade_history table?"
 
37
  {"role": "user", "content": user_prompt},
38
  {"role": "assistant", "content": ""}
39
  ]
40
+
 
 
41
  output = pipe(messages)
42
 
43
  return output[0]["generated_text"][-1]["content"]
 
80
  if action == action_1:
81
  result = prompt_model(base_model_id, system_prompt, user_prompt, schema)
82
  elif action == action_2:
 
 
83
  result = fine_tune_model(base_model_id)
84
+ elif action == action_3:
85
+ result = prompt_model(fine_tuned_model_id, system_prompt, user_prompt, schema)
86
 
87
  return result
88