eduagarcia commited on
Commit
1841941
1 Parent(s): a6f1b1f

Transfer main configs to ENV variables

Browse files
Files changed (2) hide show
  1. README.md +2 -1
  2. src/envs.py +11 -9
README.md CHANGED
@@ -14,7 +14,8 @@ space_ci: # See https://huggingface.co/spaces/Wauplin/gradio-space-ci
14
  private: true
15
  secrets:
16
  - HF_TOKEN
17
- - H4_TOKEN
 
18
  ---
19
 
20
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
14
  private: true
15
  secrets:
16
  - HF_TOKEN
17
+ - IS_PUBLIC
18
+ - HAS_HIGHER_RATE_LIMIT
19
  ---
20
 
21
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
src/envs.py CHANGED
@@ -5,13 +5,15 @@ from huggingface_hub import HfApi
5
  # clone / pull the lmeh eval data
6
  H4_TOKEN = os.environ.get("H4_TOKEN", None)
7
 
8
- REPO_ID = "HuggingFaceH4/open_llm_leaderboard"
9
- QUEUE_REPO = "open-llm-leaderboard/requests"
10
  DYNAMIC_INFO_REPO = "open-llm-leaderboard/dynamic_model_information"
11
- RESULTS_REPO = "open-llm-leaderboard/results"
12
 
13
- PRIVATE_QUEUE_REPO = "open-llm-leaderboard/private-requests"
14
- PRIVATE_RESULTS_REPO = "open-llm-leaderboard/private-results"
 
 
15
 
16
  IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True))
17
 
@@ -25,11 +27,11 @@ DYNAMIC_INFO_FILE_PATH = os.path.join(DYNAMIC_INFO_PATH, "model_infos.json")
25
  EVAL_REQUESTS_PATH_PRIVATE = "eval-queue-private"
26
  EVAL_RESULTS_PATH_PRIVATE = "eval-results-private"
27
 
28
- PATH_TO_COLLECTION = "open-llm-leaderboard/llm-leaderboard-best-models-652d6c7965a4619fb5c27a03"
29
 
30
  # Rate limit variables
31
- RATE_LIMIT_PERIOD = 7
32
- RATE_LIMIT_QUOTA = 5
33
- HAS_HIGHER_RATE_LIMIT = ["TheBloke"]
34
 
35
  API = HfApi(token=H4_TOKEN)
 
5
  # clone / pull the lmeh eval data
6
  H4_TOKEN = os.environ.get("H4_TOKEN", None)
7
 
8
+ REPO_ID = os.getenv("REPO_ID", "HuggingFaceH4/open_llm_leaderboard")
9
+ QUEUE_REPO = os.getenv("QUEUE_REPO", "open-llm-leaderboard/requests")
10
  DYNAMIC_INFO_REPO = "open-llm-leaderboard/dynamic_model_information"
11
+ RESULTS_REPO = os.getenv("RESULTS_REPO", "open-llm-leaderboard/results")
12
 
13
+ PRIVATE_QUEUE_REPO = QUEUE_REPO
14
+ PRIVATE_RESULTS_REPO = RESULTS_REPO
15
+ #PRIVATE_QUEUE_REPO = "open-llm-leaderboard/private-requests"
16
+ #PRIVATE_RESULTS_REPO = "open-llm-leaderboard/private-results"
17
 
18
  IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True))
19
 
 
27
  EVAL_REQUESTS_PATH_PRIVATE = "eval-queue-private"
28
  EVAL_RESULTS_PATH_PRIVATE = "eval-results-private"
29
 
30
+ PATH_TO_COLLECTION = os.getenv("PATH_TO_COLLECTION", "open-llm-leaderboard/llm-leaderboard-best-models-652d6c7965a4619fb5c27a03")
31
 
32
  # Rate limit variables
33
+ RATE_LIMIT_PERIOD = int(os.getenv("RATE_LIMIT_PERIOD", 7))
34
+ RATE_LIMIT_QUOTA = int(os.getenv("RATE_LIMIT_QUOTA", 5))
35
+ HAS_HIGHER_RATE_LIMIT = os.environ.get("HAS_HIGHER_RATE_LIMIT", "TheBloke").split(',')
36
 
37
  API = HfApi(token=H4_TOKEN)