eduagarcia commited on
Commit
b81a33b
1 Parent(s): 0cb9327

fix bool env variabled

Browse files
Files changed (1) hide show
  1. src/envs.py +6 -3
src/envs.py CHANGED
@@ -20,6 +20,9 @@ def get_config(name, default):
20
  return default
21
  return res
22
 
 
 
 
23
  # clone / pull the lmeh eval data
24
  H4_TOKEN = get_config("H4_TOKEN", None)
25
 
@@ -36,7 +39,7 @@ PRIVATE_RESULTS_REPO = RESULTS_REPO
36
  #PRIVATE_QUEUE_REPO = "open-llm-leaderboard/private-requests"
37
  #PRIVATE_RESULTS_REPO = "open-llm-leaderboard/private-results"
38
 
39
- IS_PUBLIC = bool(get_config("IS_PUBLIC", True))
40
 
41
  CACHE_PATH=get_config("HF_HOME", ".")
42
 
@@ -55,10 +58,10 @@ RATE_LIMIT_PERIOD = int(get_config("RATE_LIMIT_PERIOD", 7))
55
  RATE_LIMIT_QUOTA = int(get_config("RATE_LIMIT_QUOTA", 5))
56
  HAS_HIGHER_RATE_LIMIT = get_config("HAS_HIGHER_RATE_LIMIT", "TheBloke").split(',')
57
 
58
- TRUST_REMOTE_CODE = bool(get_config("TRUST_REMOTE_CODE", False))
59
 
60
  #Set if you want to get an extra field with the average eval results from the HF leaderboard
61
- GET_ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS = bool(get_config("GET_ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS", False))
62
  ORIGINAL_HF_LEADERBOARD_RESULTS_REPO = get_config("ORIGINAL_HF_LEADERBOARD_RESULTS_REPO", "open-llm-leaderboard/results")
63
  ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, 'original_results')
64
 
 
20
  return default
21
  return res
22
 
23
+ def str2bool(v):
24
+ return str(v).lower() in ("yes", "true", "t", "1")
25
+
26
  # clone / pull the lmeh eval data
27
  H4_TOKEN = get_config("H4_TOKEN", None)
28
 
 
39
  #PRIVATE_QUEUE_REPO = "open-llm-leaderboard/private-requests"
40
  #PRIVATE_RESULTS_REPO = "open-llm-leaderboard/private-results"
41
 
42
+ IS_PUBLIC = str2bool(get_config("IS_PUBLIC", True))
43
 
44
  CACHE_PATH=get_config("HF_HOME", ".")
45
 
 
58
  RATE_LIMIT_QUOTA = int(get_config("RATE_LIMIT_QUOTA", 5))
59
  HAS_HIGHER_RATE_LIMIT = get_config("HAS_HIGHER_RATE_LIMIT", "TheBloke").split(',')
60
 
61
+ TRUST_REMOTE_CODE = str2bool(get_config("TRUST_REMOTE_CODE", False))
62
 
63
  #Set if you want to get an extra field with the average eval results from the HF leaderboard
64
+ GET_ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS = str2bool(get_config("GET_ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS", False))
65
  ORIGINAL_HF_LEADERBOARD_RESULTS_REPO = get_config("ORIGINAL_HF_LEADERBOARD_RESULTS_REPO", "open-llm-leaderboard/results")
66
  ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, 'original_results')
67