Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,18 @@
|
|
1 |
# https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/sample_finetune.py
|
2 |
import gradio as gr
|
3 |
import os, torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
from datasets import load_dataset
|
5 |
-
from
|
6 |
-
from
|
7 |
-
from
|
8 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TrainingArguments, pipeline
|
9 |
-
from trl import SFTTrainer, setup_chat_format
|
10 |
|
11 |
# Fine-tune on NVidia 4xL4 (sleep after 10 hours)
|
12 |
|
|
|
1 |
# https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/sample_finetune.py
|
2 |
import gradio as gr
|
3 |
import os, torch
|
4 |
+
#from datasets import load_dataset
|
5 |
+
#from huggingface_hub import HfApi, login
|
6 |
+
#from peft import AutoPeftModelForCausalLM, LoraConfig
|
7 |
+
#from random import randint
|
8 |
+
#from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TrainingArguments, pipeline
|
9 |
+
#from trl import SFTTrainer, setup_chat_format
|
10 |
+
|
11 |
+
import datasets, sys, logging, torch, transformers
|
12 |
from datasets import load_dataset
|
13 |
+
from peft import LoraConfig
|
14 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig
|
15 |
+
from trl import SFTTrainer
|
|
|
|
|
16 |
|
17 |
# Fine-tune on NVidia 4xL4 (sleep after 10 hours)
|
18 |
|