Spaces:
Running
Running
Update mtdna_backend.py
Browse files- mtdna_backend.py +4 -4
mtdna_backend.py
CHANGED
|
@@ -23,10 +23,10 @@ import threading
|
|
| 23 |
# return classify_sample_location(accession)
|
| 24 |
|
| 25 |
#@lru_cache(maxsize=3600)
|
| 26 |
-
def pipeline_classify_sample_location_cached(accession,stop_flag=None, save_df=None):
|
| 27 |
print("inside pipeline_classify_sample_location_cached, and [accession] is ", [accession])
|
| 28 |
print("len of save df: ", len(save_df))
|
| 29 |
-
return pipeline.pipeline_with_gemini([accession],stop_flag=stop_flag, save_df=save_df)
|
| 30 |
|
| 31 |
# Count and suggest final location
|
| 32 |
# def compute_final_suggested_location(rows):
|
|
@@ -152,7 +152,7 @@ def get_incomplete_accessions(file_path):
|
|
| 152 |
# GOOGLE_SHEET_NAME = "known_samples"
|
| 153 |
# USAGE_DRIVE_FILENAME = "user_usage_log.json"
|
| 154 |
|
| 155 |
-
def summarize_results(accession, stop_flag=None):
|
| 156 |
# Early bail
|
| 157 |
if stop_flag is not None and stop_flag.value:
|
| 158 |
print(f"🛑 Skipping {accession} before starting.")
|
|
@@ -189,7 +189,7 @@ def summarize_results(accession, stop_flag=None):
|
|
| 189 |
|
| 190 |
save_df = pd.DataFrame(data[1:], columns=data[0])
|
| 191 |
print("before pipeline, len of save df: ", len(save_df))
|
| 192 |
-
outputs = pipeline_classify_sample_location_cached(accession, stop_flag, save_df)
|
| 193 |
if stop_flag is not None and stop_flag.value:
|
| 194 |
print(f"🛑 Skipped {accession} mid-pipeline.")
|
| 195 |
return []
|
|
|
|
| 23 |
# return classify_sample_location(accession)
|
| 24 |
|
| 25 |
#@lru_cache(maxsize=3600)
|
| 26 |
+
async def pipeline_classify_sample_location_cached(accession,stop_flag=None, save_df=None):
|
| 27 |
print("inside pipeline_classify_sample_location_cached, and [accession] is ", [accession])
|
| 28 |
print("len of save df: ", len(save_df))
|
| 29 |
+
return await pipeline.pipeline_with_gemini([accession],stop_flag=stop_flag, save_df=save_df)
|
| 30 |
|
| 31 |
# Count and suggest final location
|
| 32 |
# def compute_final_suggested_location(rows):
|
|
|
|
| 152 |
# GOOGLE_SHEET_NAME = "known_samples"
|
| 153 |
# USAGE_DRIVE_FILENAME = "user_usage_log.json"
|
| 154 |
|
| 155 |
+
async def summarize_results(accession, stop_flag=None):
|
| 156 |
# Early bail
|
| 157 |
if stop_flag is not None and stop_flag.value:
|
| 158 |
print(f"🛑 Skipping {accession} before starting.")
|
|
|
|
| 189 |
|
| 190 |
save_df = pd.DataFrame(data[1:], columns=data[0])
|
| 191 |
print("before pipeline, len of save df: ", len(save_df))
|
| 192 |
+
outputs = await pipeline_classify_sample_location_cached(accession, stop_flag, save_df)
|
| 193 |
if stop_flag is not None and stop_flag.value:
|
| 194 |
print(f"🛑 Skipped {accession} mid-pipeline.")
|
| 195 |
return []
|