reach-vb HF staff Wauplin HF staff commited on
Commit
d9267f6
1 Parent(s): 69d19e7

Use OAuth ("Sign in with Hugging Face") (#49)

Browse files

- Use OAuth ("Sign in with Hugging Face") (07dffe1045ff91065b02a69ca126db7ca759842f)
- Update README.md (036051e16897f822b459d660af57712c5b5ee18b)
- Update Dockerfile (34f7cd9aff645bcce09f2eb977b82a360c59e79e)


Co-authored-by: Lucain Pouget <[email protected]>

Files changed (3) hide show
  1. Dockerfile +1 -1
  2. README.md +5 -0
  3. app.py +13 -15
Dockerfile CHANGED
@@ -37,7 +37,7 @@ RUN pyenv install ${PYTHON_VERSION} && \
37
  pyenv global ${PYTHON_VERSION} && \
38
  pyenv rehash && \
39
  pip install --no-cache-dir -U pip setuptools wheel && \
40
- pip install "huggingface-hub" "hf-transfer" "gradio>=4.26.0" "gradio_huggingfacehub_search==0.0.7"
41
 
42
  COPY --chown=1000 . ${HOME}/app
43
  RUN git clone https://github.com/ggerganov/llama.cpp
 
37
  pyenv global ${PYTHON_VERSION} && \
38
  pyenv rehash && \
39
  pip install --no-cache-dir -U pip setuptools wheel && \
40
+ pip install "huggingface-hub" "hf-transfer" "gradio>=4.28.0" "gradio_huggingfacehub_search==0.0.7"
41
 
42
  COPY --chown=1000 . ${HOME}/app
43
  RUN git clone https://github.com/ggerganov/llama.cpp
README.md CHANGED
@@ -4,6 +4,11 @@ emoji: 🦙
4
  colorFrom: gray
5
  colorTo: pink
6
  sdk: docker
 
 
 
 
 
7
  pinned: false
8
  ---
9
 
 
4
  colorFrom: gray
5
  colorTo: pink
6
  sdk: docker
7
+ hf_oauth: true
8
+ hf_oauth_scopes:
9
+ - read-repos
10
+ - write-repos
11
+ - manage-repos
12
  pinned: false
13
  ---
14
 
app.py CHANGED
@@ -25,12 +25,14 @@ def script_to_use(model_id, api):
25
  arch = arch[0]
26
  return "convert.py" if arch in LLAMA_LIKE_ARCHS else "convert-hf-to-gguf.py"
27
 
28
- def process_model(model_id, q_method, hf_token, private_repo):
 
 
29
  model_name = model_id.split('/')[-1]
30
  fp16 = f"{model_name}/{model_name.lower()}.fp16.bin"
31
 
32
  try:
33
- api = HfApi(token=hf_token)
34
 
35
  dl_pattern = ["*.md", "*.json", "*.model"]
36
 
@@ -48,7 +50,7 @@ def process_model(model_id, q_method, hf_token, private_repo):
48
 
49
  dl_pattern += pattern
50
 
51
- snapshot_download(repo_id=model_id, local_dir=model_name, local_dir_use_symlinks=False, token=hf_token, allow_patterns=dl_pattern)
52
  print("Model downloaded successully!")
53
 
54
  conversion_script = script_to_use(model_id, api)
@@ -72,11 +74,13 @@ def process_model(model_id, q_method, hf_token, private_repo):
72
  print("Repo created successfully!", new_repo_url)
73
 
74
  try:
75
- card = ModelCard.load(model_id,)
76
  except:
77
  card = ModelCard("")
78
- card.data.tags = ["llama-cpp"] if card.data.tags is None else card.data.tags + ["llama-cpp"]
79
- card.data.tags += ["gguf-my-repo"]
 
 
80
  card.text = dedent(
81
  f"""
82
  # {new_repo_id}
@@ -155,25 +159,19 @@ iface = gr.Interface(
155
  value="Q4_K_M",
156
  filterable=False
157
  ),
158
- gr.Textbox(
159
- lines=1,
160
- label="HF Write Token",
161
- info="https://hf.co/settings/token",
162
- type="password",
163
- ),
164
  gr.Checkbox(
165
  value=False,
166
  label="Private Repo",
167
  info="Create a private repo under your username."
168
- )
 
169
  ],
170
  outputs=[
171
  gr.Markdown(label="output"),
172
  gr.Image(show_label=False),
173
  ],
174
  title="Create your own GGUF Quants, blazingly fast ⚡!",
175
- description="The space takes an HF repo as an input, quantises it and creates a Public repo containing the selected quant under your HF user namespace. You need to specify a write token obtained in https://hf.co/settings/tokens.",
176
- article="<p>Find your write token at <a href='https://huggingface.co/settings/tokens' target='_blank'>token settings</a></p>",
177
  )
178
 
179
  # Launch the interface
 
25
  arch = arch[0]
26
  return "convert.py" if arch in LLAMA_LIKE_ARCHS else "convert-hf-to-gguf.py"
27
 
28
+ def process_model(model_id, q_method, private_repo, oauth_token: gr.OAuthToken | None):
29
+ if token is None:
30
+ raise ValueError("You must be logged in to use GGUF-my-repo")
31
  model_name = model_id.split('/')[-1]
32
  fp16 = f"{model_name}/{model_name.lower()}.fp16.bin"
33
 
34
  try:
35
+ api = HfApi(token=oauth_token.token)
36
 
37
  dl_pattern = ["*.md", "*.json", "*.model"]
38
 
 
50
 
51
  dl_pattern += pattern
52
 
53
+ api.snapshot_download(repo_id=model_id, local_dir=model_name, local_dir_use_symlinks=False, allow_patterns=dl_pattern)
54
  print("Model downloaded successully!")
55
 
56
  conversion_script = script_to_use(model_id, api)
 
74
  print("Repo created successfully!", new_repo_url)
75
 
76
  try:
77
+ card = ModelCard.load(model_id, token=oauth_token.token)
78
  except:
79
  card = ModelCard("")
80
+ if card.data.tags is None:
81
+ card.data.tags = []
82
+ card.data.tags.append("llama-cpp")
83
+ card.data.tags.append("gguf-my-repo")
84
  card.text = dedent(
85
  f"""
86
  # {new_repo_id}
 
159
  value="Q4_K_M",
160
  filterable=False
161
  ),
 
 
 
 
 
 
162
  gr.Checkbox(
163
  value=False,
164
  label="Private Repo",
165
  info="Create a private repo under your username."
166
+ ),
167
+ gr.LoginButton(min_width=250),
168
  ],
169
  outputs=[
170
  gr.Markdown(label="output"),
171
  gr.Image(show_label=False),
172
  ],
173
  title="Create your own GGUF Quants, blazingly fast ⚡!",
174
+ description="The space takes an HF repo as an input, quantises it and creates a Public repo containing the selected quant under your HF user namespace.",
 
175
  )
176
 
177
  # Launch the interface