| | import os |
| | from Bio import SeqIO |
| | from tqdm.auto import tqdm |
| | import pandas as pd |
| | from huggingface_hub import HfApi |
| |
|
| |
|
| | aav_files = [ |
| | "des_mut", |
| | "low_vs_high", |
| | "mut_des", |
| | "one_vs_many", |
| | "sampled", |
| | "seven_vs_many", |
| | "two_vs_many", |
| | ] |
| |
|
| |
|
| | meltome_files = [ |
| | "human", |
| | "human_cell", |
| | "mixed_split", |
| | ] |
| |
|
| |
|
| | sav_files = [ |
| | "human", |
| | "only_savs", |
| | "mixed", |
| | ] |
| |
|
| |
|
| | scl_files = [ |
| | "balanced", |
| | "human_hard", |
| | "human_soft", |
| | "mixed_hard", |
| | "mixed_soft", |
| | ] |
| |
|
| |
|
| | gb1_files = [ |
| | "low_vs_high", |
| | "one_vs_rest", |
| | "sampled", |
| | "three_vs_rest", |
| | "two_vs_rest", |
| | ] |
| |
|
| |
|
| | def download_wget(filename, repo): |
| | if os.path.exists(f"{filename}.fasta"): |
| | return |
| | url = f"http://data.bioembeddings.com/public/FLIP/fasta/{repo}/{filename}.fasta" |
| | os.system(f"wget {url}") |
| |
|
| |
|
| | def upload_aav(): |
| | repo = "aav" |
| | for filename in aav_files: |
| | download_wget(filename, repo) |
| | fasta_file = f"{filename}.fasta" |
| | csv_file = f"{filename}.csv" |
| | db = SeqIO.index(fasta_file, "fasta") |
| | output = [] |
| |
|
| | for _, record in tqdm(db.items()): |
| | description = record.description |
| | description = description.split() |
| | sequence = str(record.seq) |
| | seqid = description[0] |
| | label = float(description[1].split("=")[1]) |
| | split = description[2].split("=")[1] |
| | validation = description[3].split("=")[1] |
| |
|
| | output.append({ |
| | "seqid": seqid, |
| | "label": label, |
| | "sequence": sequence, |
| | "split": split.lower(), |
| | "validation": validation.lower(), |
| | }) |
| | pd.DataFrame(output).to_csv(csv_file, index=False) |
| |
|
| | api = HfApi() |
| | api.create_repo( |
| | repo_id=f"hazemessam/{repo}", |
| | repo_type="dataset", |
| | exist_ok=True, |
| | ) |
| | api.upload_file( |
| | path_or_fileobj=csv_file, |
| | path_in_repo=csv_file, |
| | repo_id=f"hazemessam/{repo}", |
| | repo_type="dataset", |
| | ) |
| | os.system(f"rm -rf {fasta_file}") |
| | os.system(f"rm -rf {csv_file}") |
| |
|
| |
|
| | def upload_meltome(): |
| | repo = "meltome" |
| | for filename in meltome_files: |
| | download_wget(filename, repo) |
| | fasta_file = f"{filename}.fasta" |
| | csv_file = f"{filename}.csv" |
| | db = SeqIO.index(fasta_file, "fasta") |
| | output = [] |
| | for _, record in tqdm(db.items()): |
| | description = record.description |
| | description = description.split() |
| | sequence = str(record.seq) |
| | seqid = description[0] |
| | label = float(description[1].split("=")[1]) |
| | split = description[2].split("=")[1] |
| | validation = description[3].split("=")[1] |
| |
|
| | output.append({ |
| | "seqid": seqid, |
| | "label": label, |
| | "sequence": sequence, |
| | "split": split.lower(), |
| | "validation": validation.lower(), |
| | }) |
| | pd.DataFrame(output).to_csv(csv_file, index=False) |
| |
|
| | api = HfApi() |
| | api.create_repo( |
| | repo_id=f"hazemessam/{repo}", |
| | repo_type="dataset", |
| | exist_ok=True, |
| | ) |
| | api.upload_file( |
| | path_or_fileobj=csv_file, |
| | path_in_repo=csv_file, |
| | repo_id=f"hazemessam/{repo}", |
| | repo_type="dataset", |
| | ) |
| | os.system(f"rm -rf {fasta_file}") |
| | os.system(f"rm -rf {csv_file}") |
| |
|
| |
|
| | def upload_sav(): |
| | repo = "sav" |
| | for filename in sav_files: |
| | download_wget(filename, repo) |
| | fasta_file = f"{filename}.fasta" |
| | csv_file = f"{filename}.csv" |
| | db = SeqIO.index(fasta_file, "fasta") |
| | output = [] |
| | for _, record in tqdm(db.items()): |
| | description = record.description |
| | description = description.split() |
| | sequence = str(record.seq) |
| | seqid = description[0] |
| | |
| | label = description[1].split("=")[1] |
| | split = description[2].split("=")[1] |
| | validation = description[3].split("=")[1] |
| |
|
| | output.append({ |
| | "seqid": seqid, |
| | "label": label, |
| | "sequence": sequence, |
| | "split": split.lower(), |
| | "validation": validation.lower(), |
| | }) |
| | pd.DataFrame(output).to_csv(csv_file, index=False) |
| | api = HfApi() |
| | api.create_repo( |
| | repo_id=f"hazemessam/{repo}", |
| | repo_type="dataset", |
| | exist_ok=True, |
| | ) |
| | api.upload_file( |
| | path_or_fileobj=csv_file, |
| | path_in_repo=csv_file, |
| | repo_id=f"hazemessam/{repo}", |
| | repo_type="dataset", |
| | ) |
| | os.system(f"rm -rf {fasta_file}") |
| | os.system(f"rm -rf {csv_file}") |
| |
|
| |
|
| | def upload_scl(): |
| | repo = "scl" |
| | for filename in scl_files: |
| | download_wget(filename, repo) |
| | fasta_file = f"{filename}.fasta" |
| | csv_file = f"{filename}.csv" |
| | db = SeqIO.index(fasta_file, "fasta") |
| | output = [] |
| | for _, record in tqdm(db.items()): |
| | description = record.description |
| | description = description.split() |
| | sequence = str(record.seq) |
| | seqid = description[0] |
| | |
| | label = description[1].split("=")[1] |
| | split = description[2].split("=")[1] |
| | validation = description[3].split("=")[1] |
| |
|
| | output.append({ |
| | "seqid": seqid, |
| | "label": label, |
| | "sequence": sequence, |
| | "split": split.lower(), |
| | "validation": validation.lower(), |
| | }) |
| | pd.DataFrame(output).to_csv(csv_file, index=False) |
| | api = HfApi() |
| | api.create_repo( |
| | repo_id=f"hazemessam/{repo}", |
| | repo_type="dataset", |
| | exist_ok=True, |
| | ) |
| | api.upload_file( |
| | path_or_fileobj=csv_file, |
| | path_in_repo=csv_file, |
| | repo_id=f"hazemessam/{repo}", |
| | repo_type="dataset", |
| | ) |
| | os.system(f"rm -rf {fasta_file}") |
| | os.system(f"rm -rf {csv_file}") |
| |
|
| |
|
| | def upload_gb1(): |
| | repo = "gb1" |
| | for filename in gb1_files: |
| | download_wget(filename, repo) |
| | fasta_file = f"{filename}.fasta" |
| | csv_file = f"{filename}.csv" |
| | db = SeqIO.index(fasta_file, "fasta") |
| | output = [] |
| | for _, record in tqdm(db.items()): |
| | description = record.description |
| | description = description.split() |
| | sequence = str(record.seq) |
| | seqid = description[0] |
| | label = float(description[1].split("=")[1]) |
| | split = description[2].split("=")[1] |
| | validation = description[3].split("=")[1] |
| |
|
| | output.append({ |
| | "seqid": seqid, |
| | "label": label, |
| | "sequence": sequence, |
| | "split": split.lower(), |
| | "validation": validation.lower(), |
| | }) |
| | pd.DataFrame(output).to_csv(csv_file, index=False) |
| | api = HfApi() |
| | api.create_repo( |
| | repo_id=f"hazemessam/{repo}", |
| | repo_type="dataset", |
| | exist_ok=True, |
| | ) |
| | api.upload_file( |
| | path_or_fileobj=csv_file, |
| | path_in_repo=csv_file, |
| | repo_id=f"hazemessam/{repo}", |
| | repo_type="dataset", |
| | ) |
| | os.system(f"rm -rf {fasta_file}") |
| | os.system(f"rm -rf {csv_file}") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | upload_gb1() |
| | upload_meltome() |
| | upload_sav() |
| | upload_scl() |
| | upload_aav() |
| |
|