bstraehle commited on
Commit
c8534fb
1 Parent(s): 6f58fe9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -18
app.py CHANGED
@@ -3,34 +3,36 @@ import os
3
  from huggingface_hub import HfApi, login
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
6
- def process(model_id, dataset):
7
- print("111")
8
- # Download Sample Model from Hugging Face to Publish Again
 
 
 
 
 
9
  tokenizer = AutoTokenizer.from_pretrained(model_id)
10
  model = AutoModelForCausalLM.from_pretrained(model_id)
11
- # Local Path of Model
12
- print("222")
13
- model_path = model_id
14
- model.save_pretrained(model_path)
 
 
 
15
  login(token=os.environ["HF_TOKEN"])
16
  api = HfApi()
17
- model_repo_name = "bstraehle/Meta-Llama-3-8B-Instruct"
18
-
19
- #Create Repo in Hugging Face
20
- print("333")
21
  api.create_repo(repo_id=model_repo_name)
22
-
23
- #Upload Model folder from Local to HuggingFace
24
- print("444")
25
  api.upload_folder(
26
  folder_path=model_path,
27
  repo_id=model_repo_name
28
  )
 
 
 
 
 
29
 
30
- # Publish Model Tokenizer on Hugging Face
31
- print("555")
32
- tokenizer.push_to_hub(model_repo_name)
33
-
34
  return "Done"
35
 
36
  demo = gr.Interface(fn=process,
 
3
  from huggingface_hub import HfApi, login
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
6
+ # NVidia A10G Large
7
+
8
+ # google/gemma-2-9b-it
9
+ # meta-llama/Meta-Llama-3-8B-Instruct
10
+
11
+ profile = "bstraehle"
12
+
13
+ def download_tokenizer_and_model(model_id)
14
  tokenizer = AutoTokenizer.from_pretrained(model_id)
15
  model = AutoModelForCausalLM.from_pretrained(model_id)
16
+ model.save_pretrained(model_id)
17
+ return tokenizer, model
18
+
19
+ def upload_model(tokenizer, model)
20
+ model_name = model_id[model_id.rfind('/')+1:]
21
+ print(model_name)
22
+ model_repo_name = f"{profile}/{model_name}"
23
  login(token=os.environ["HF_TOKEN"])
24
  api = HfApi()
 
 
 
 
25
  api.create_repo(repo_id=model_repo_name)
 
 
 
26
  api.upload_folder(
27
  folder_path=model_path,
28
  repo_id=model_repo_name
29
  )
30
+ tokenizer.push_to_hub(model_repo_name)
31
+
32
+ def process(model_id, dataset):
33
+ tokenizer, model = download_model(model_id)
34
+ upload_model(tokenizer, model)
35
 
 
 
 
 
36
  return "Done"
37
 
38
  demo = gr.Interface(fn=process,