bstraehle commited on
Commit
6f711d2
1 Parent(s): 93508c3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -3
app.py CHANGED
@@ -37,7 +37,7 @@ def fine_tune_model(base_model_id, dataset):
37
 
38
  # Load pre-trained model and tokenizer
39
  model_name = "meta-llama/Meta-Llama-3.1-8B-Instruct"
40
- model = AutoModelForCausalLM.from_pretrained(model_name)
41
  tokenizer = AutoTokenizer.from_pretrained(model_name)
42
 
43
  # Preprocess the dataset
@@ -51,8 +51,6 @@ def fine_tune_model(base_model_id, dataset):
51
  train_dataset = dataset["train"].shuffle(seed=42).select(range(1000)) # Adjust the range as needed
52
  val_dataset = dataset["test"].shuffle(seed=42).select(range(100)) # Adjust the range as needed
53
 
54
- torch.cuda.empty_cache()
55
-
56
  # Set training arguments
57
  training_args = Seq2SeqTrainingArguments(
58
  output_dir="./results",
 
37
 
38
  # Load pre-trained model and tokenizer
39
  model_name = "meta-llama/Meta-Llama-3.1-8B-Instruct"
40
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
41
  tokenizer = AutoTokenizer.from_pretrained(model_name)
42
 
43
  # Preprocess the dataset
 
51
  train_dataset = dataset["train"].shuffle(seed=42).select(range(1000)) # Adjust the range as needed
52
  val_dataset = dataset["test"].shuffle(seed=42).select(range(100)) # Adjust the range as needed
53
 
 
 
54
  # Set training arguments
55
  training_args = Seq2SeqTrainingArguments(
56
  output_dir="./results",