File size: 1,302 Bytes
ac66ae2
cbbb9fd
 
2acdb22
083fde1
2371111
e51ebe6
cbbb9fd
 
2acdb22
cbbb9fd
e51ebe6
ac66ae2
cbbb9fd
 
 
635e786
cbbb9fd
 
e51ebe6
cbbb9fd
 
 
e51ebe6
cbbb9fd
 
 
0fb434b
2371111
cbbb9fd
e51ebe6
cbbb9fd
 
b937d88
083fde1
2371111
635e786
d91c99b
2371111
083fde1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr
import os
from huggingface_hub import HfApi, login
from transformers import AutoTokenizer, AutoModelForCausalLM

def process(model_id, dataset):
    print("111")
    # Download Sample Model from Hugging Face to Publish Again 
    tokenizer = AutoTokenizer.from_pretrained(model_id)
    model = AutoModelForCausalLM.from_pretrained(model_id)
    # Local Path of Model
    print("222")
    model_path = model_id
    model.save_pretrained(model_path)
    login(token=os.environ["HF_TOKEN"])
    api = HfApi()
    model_repo_name = "bstraehle/Meta-Llama-3-8B"
    
    #Create Repo in Hugging Face
    print("333")
    api.create_repo(repo_id=model_repo_name)
    
    #Upload Model folder from Local to HuggingFace 
    print("444")
    api.upload_folder(
        folder_path=model_path,
        repo_id=model_repo_name
    )
    
    # Publish Model Tokenizer on Hugging Face
    print("555")
    tokenizer.push_to_hub(model_repo_name)    

    return "Done"

demo = gr.Interface(fn=process, 
                    inputs=[gr.Textbox(label = "Model ID", value = "meta-llama/Meta-Llama-3-8B", lines = 1), # google/gemma-2b
                            gr.Textbox(label = "Dataset", value = "imdb", lines = 1)],
                    outputs=[gr.Textbox(label = "Completion")])
demo.launch()