hi-melnikov commited on
Commit
46398df
·
1 Parent(s): 1b14058
Files changed (1) hide show
  1. app.py +4 -0
app.py CHANGED
@@ -20,6 +20,7 @@ from src.envs import (
20
  API,
21
  H4_TOKEN,
22
  HF_HOME,
 
23
  METAINFO_DATASET,
24
  PERSISTENT_FILE_CHECK,
25
  PERSISTENT_FILE_CHECK_PATH,
@@ -84,6 +85,7 @@ def build_demo():
84
  path_in_repo="model_answers/external/" + file_path,
85
  repo_id="Vikhrmodels/openbench-eval",
86
  repo_type="dataset",
 
87
  )
88
  with open(PERSISTENT_FILE_CHECK_PATH, "w", encoding="utf-8") as file:
89
  file.write("1")
@@ -92,6 +94,7 @@ def build_demo():
92
  path_in_repo="",
93
  repo_id=METAINFO_DATASET,
94
  repo_type="dataset",
 
95
  )
96
  os.environ[RESET_JUDGEMENT_ENV] = "1"
97
  return file.name
@@ -141,6 +144,7 @@ if __name__ == "__main__":
141
  path_in_repo="",
142
  repo_id=METAINFO_DATASET,
143
  repo_type="dataset",
 
144
  )
145
  # gen_judgement_file = os.path.join(HF_HOME, "src/gen/gen_judgement.py")
146
  # subprocess.run(["python3", gen_judgement_file], check=True)
 
20
  API,
21
  H4_TOKEN,
22
  HF_HOME,
23
+ HF_TOKEN_PRIVATE,
24
  METAINFO_DATASET,
25
  PERSISTENT_FILE_CHECK,
26
  PERSISTENT_FILE_CHECK_PATH,
 
85
  path_in_repo="model_answers/external/" + file_path,
86
  repo_id="Vikhrmodels/openbench-eval",
87
  repo_type="dataset",
88
+ token=HF_TOKEN_PRIVATE,
89
  )
90
  with open(PERSISTENT_FILE_CHECK_PATH, "w", encoding="utf-8") as file:
91
  file.write("1")
 
94
  path_in_repo="",
95
  repo_id=METAINFO_DATASET,
96
  repo_type="dataset",
97
+ token=HF_TOKEN_PRIVATE,
98
  )
99
  os.environ[RESET_JUDGEMENT_ENV] = "1"
100
  return file.name
 
144
  path_in_repo="",
145
  repo_id=METAINFO_DATASET,
146
  repo_type="dataset",
147
+ token=HF_TOKEN_PRIVATE,
148
  )
149
  # gen_judgement_file = os.path.join(HF_HOME, "src/gen/gen_judgement.py")
150
  # subprocess.run(["python3", gen_judgement_file], check=True)