mridzuan commited on
Commit
fd761b2
·
1 Parent(s): 6a29f7a

replace st.secrets

Browse files
Files changed (1) hide show
  1. src/model_utils.py +12 -12
src/model_utils.py CHANGED
@@ -121,15 +121,15 @@ def save_model(model, user_model_name, metrics_result_single=None):
121
 
122
  # Upload to HF dataset
123
  scheduler = CommitScheduler(
124
- repo_id=st.secrets["HF_REPO_ID"],
125
  repo_type="dataset",
126
  path_in_repo="models",
127
- token=st.secrets["HF_TOKEN"],
128
  private=True,
129
  folder_path="dummy"
130
  )
131
  scheduler.api.upload_file(
132
- repo_id=st.secrets["HF_REPO_ID"],
133
  repo_type="dataset",
134
  path_in_repo=f"models/{uuid.uuid4()}.parquet",
135
  path_or_fileobj=buf
@@ -209,15 +209,15 @@ def save_model_ensemble(models, user_model_name, best_iterations=None, fold_scor
209
  buf.seek(0)
210
 
211
  scheduler = CommitScheduler(
212
- repo_id=st.secrets["HF_REPO_ID"],
213
  repo_type="dataset",
214
  path_in_repo="models",
215
- token=st.secrets["HF_TOKEN"],
216
  private=True,
217
  folder_path="dummy"
218
  )
219
  scheduler.api.upload_file(
220
- repo_id=st.secrets["HF_REPO_ID"],
221
  repo_type="dataset",
222
  path_in_repo=f"models/{uuid.uuid4()}.parquet",
223
  path_or_fileobj=buf
@@ -246,9 +246,9 @@ def load_model(model_name):
246
  login(token=os.environ["HF_TOKEN"])
247
 
248
  files = hf_hub_download(
249
- repo_id=st.secrets["HF_REPO_ID"],
250
  repo_type="dataset",
251
- token=st.secrets["HF_TOKEN"],
252
  filename=None, # Get whole repo listing
253
  cache_dir=None,
254
  local_dir=None,
@@ -258,18 +258,18 @@ def load_model(model_name):
258
  )
259
 
260
  from huggingface_hub import HfApi
261
- api = HfApi(token=st.secrets["HF_TOKEN"])
262
- all_files = api.list_repo_files(repo_id=st.secrets["HF_REPO_ID"], repo_type="dataset")
263
  model_files = [f for f in all_files if f.startswith("models/") and f.endswith(".parquet")]
264
 
265
  # Find matching filename
266
  target_file = None
267
  for f in model_files:
268
  downloaded = hf_hub_download(
269
- repo_id=st.secrets["HF_REPO_ID"],
270
  repo_type="dataset",
271
  filename=f,
272
- token=st.secrets["HF_TOKEN"]
273
  )
274
  table = pq.read_table(downloaded)
275
  row = table.to_pylist()[0]
 
121
 
122
  # Upload to HF dataset
123
  scheduler = CommitScheduler(
124
+ repo_id=os.environ["HF_REPO_ID"],
125
  repo_type="dataset",
126
  path_in_repo="models",
127
+ token=os.environ["HF_TOKEN"],
128
  private=True,
129
  folder_path="dummy"
130
  )
131
  scheduler.api.upload_file(
132
+ repo_id=os.environ["HF_REPO_ID"],
133
  repo_type="dataset",
134
  path_in_repo=f"models/{uuid.uuid4()}.parquet",
135
  path_or_fileobj=buf
 
209
  buf.seek(0)
210
 
211
  scheduler = CommitScheduler(
212
+ repo_id=os.environ["HF_REPO_ID"],
213
  repo_type="dataset",
214
  path_in_repo="models",
215
+ token=os.environ["HF_TOKEN"],
216
  private=True,
217
  folder_path="dummy"
218
  )
219
  scheduler.api.upload_file(
220
+ repo_id=os.environ["HF_REPO_ID"],
221
  repo_type="dataset",
222
  path_in_repo=f"models/{uuid.uuid4()}.parquet",
223
  path_or_fileobj=buf
 
246
  login(token=os.environ["HF_TOKEN"])
247
 
248
  files = hf_hub_download(
249
+ repo_id=os.environ["HF_REPO_ID"],
250
  repo_type="dataset",
251
+ token=os.environ["HF_TOKEN"],
252
  filename=None, # Get whole repo listing
253
  cache_dir=None,
254
  local_dir=None,
 
258
  )
259
 
260
  from huggingface_hub import HfApi
261
+ api = HfApi(token=os.environ["HF_TOKEN"])
262
+ all_files = api.list_repo_files(repo_id=os.environ["HF_REPO_ID"], repo_type="dataset")
263
  model_files = [f for f in all_files if f.startswith("models/") and f.endswith(".parquet")]
264
 
265
  # Find matching filename
266
  target_file = None
267
  for f in model_files:
268
  downloaded = hf_hub_download(
269
+ repo_id=os.environ["HF_REPO_ID"],
270
  repo_type="dataset",
271
  filename=f,
272
+ token=os.environ["HF_TOKEN"]
273
  )
274
  table = pq.read_table(downloaded)
275
  row = table.to_pylist()[0]