Update app.py
Browse files
app.py
CHANGED
@@ -5,9 +5,6 @@ from datasets import load_dataset
|
|
5 |
from huggingface_hub import HfApi, login
|
6 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
7 |
|
8 |
-
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
9 |
-
from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments
|
10 |
-
|
11 |
hf_profile = "bstraehle"
|
12 |
|
13 |
action_1 = "Fine-tune pre-trained model"
|
@@ -43,9 +40,9 @@ def fine_tune_model(base_model_id, dataset):
|
|
43 |
|
44 |
# Load pre-trained model and tokenizer
|
45 |
model_name = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
46 |
-
model =
|
47 |
-
tokenizer =
|
48 |
-
|
49 |
dataset = dataset.map(preprocess, batched=True)
|
50 |
|
51 |
# Split dataset to training and validation sets
|
|
|
5 |
from huggingface_hub import HfApi, login
|
6 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
7 |
|
|
|
|
|
|
|
8 |
hf_profile = "bstraehle"
|
9 |
|
10 |
action_1 = "Fine-tune pre-trained model"
|
|
|
40 |
|
41 |
# Load pre-trained model and tokenizer
|
42 |
model_name = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
43 |
+
model = LlamaForCausalLM.from_pretrained(model_name)
|
44 |
+
tokenizer = LlamaTokenizer.from_pretrained(model_name)
|
45 |
+
|
46 |
dataset = dataset.map(preprocess, batched=True)
|
47 |
|
48 |
# Split dataset to training and validation sets
|