File size: 1,188 Bytes
cbbb9fd
 
 
083fde1
2371111
cbbb9fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0fb434b
2371111
cbbb9fd
 
 
b937d88
083fde1
2371111
d91c99b
 
2371111
083fde1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import os
from huggingface_hub import HfApi, login
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

def process(model_id, dataset):
    # Download Sample Model from Hugging Face to Publish Again 
    tokenizer = AutoTokenizer.from_pretrained(model_id)
    model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
    # Local Path of Model
    model_path = 't5-fine-tune-save-example'
    model.save_pretrained(model_path)
    login(token=os.environ["HF_TOKEN"])
    api = HfApi()
    model_repo_name = f"bstraehle/{model_id}"   
    
    #Create Repo in Hugging Face
    api.create_repo(repo_id=model_repo_name)
    
    #Upload Model folder from Local to HuggingFace 
    api.upload_folder(
        folder_path=model_path,
        repo_id=model_repo_name
    )
    
    # Publish Model Tokenizer on Hugging Face
    tokenizer.push_to_hub(model_repo_name)    

    return "Done"

demo = gr.Interface(fn=process, 
                    inputs=[gr.Textbox(label = "Model ID", value = "google/gemma-7b", lines = 1),
                            gr.Textbox(label = "Dataset", value = "imdb", lines = 1)],
                    outputs=[gr.Textbox(label = "Completion")])
demo.launch()