Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,14 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
import pandas as pd
|
4 |
-
from datasets import Dataset, DatasetDict, load_dataset
|
5 |
-
from peft import LoraConfig, PeftModel, prepare_model_for_kbit_training, get_peft_model
|
6 |
-
from transformers import (AutoTokenizer, BitsAndBytesConfig, TrainingArguments, AutoModelForSequenceClassification, Trainer, EarlyStoppingCallback, DataCollatorWithPadding)
|
7 |
import bitsandbytes as bnb
|
8 |
import evaluate
|
9 |
import numpy as np
|
10 |
import random
|
|
|
|
|
|
|
|
|
11 |
|
12 |
def process(model_id, dataset):
|
13 |
# Step 1: Load dataset
|
@@ -45,6 +46,8 @@ def process(model_id, dataset):
|
|
45 |
|
46 |
#Step 5: Fine-tune the model
|
47 |
|
|
|
|
|
48 |
model_id = model_id
|
49 |
model = AutoModelForSequenceClassification.from_pretrained(model_id)
|
50 |
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
import pandas as pd
|
|
|
|
|
|
|
4 |
import bitsandbytes as bnb
|
5 |
import evaluate
|
6 |
import numpy as np
|
7 |
import random
|
8 |
+
import huggingface_hub
|
9 |
+
from datasets import Dataset, DatasetDict, load_dataset
|
10 |
+
from peft import LoraConfig, PeftModel, prepare_model_for_kbit_training, get_peft_model
|
11 |
+
from transformers import (AutoTokenizer, BitsAndBytesConfig, TrainingArguments, AutoModelForSequenceClassification, Trainer, EarlyStoppingCallback, DataCollatorWithPadding)
|
12 |
|
13 |
def process(model_id, dataset):
|
14 |
# Step 1: Load dataset
|
|
|
46 |
|
47 |
#Step 5: Fine-tune the model
|
48 |
|
49 |
+
huggingface_hub.login()
|
50 |
+
|
51 |
model_id = model_id
|
52 |
model = AutoModelForSequenceClassification.from_pretrained(model_id)
|
53 |
|