Liu-Hy's picture
Add files using upload-large-folder tool
d818561 verified
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Breast_Cancer"
cohort = "GSE236725"
# Input paths
in_trait_dir = "../DATA/GEO/Breast_Cancer"
in_cohort_dir = "../DATA/GEO/Breast_Cancer/GSE236725"
# Output paths
out_data_file = "./output/z2/preprocess/Breast_Cancer/GSE236725.csv"
out_gene_data_file = "./output/z2/preprocess/Breast_Cancer/gene_data/GSE236725.csv"
out_clinical_data_file = "./output/z2/preprocess/Breast_Cancer/clinical_data/GSE236725.csv"
json_path = "./output/z2/preprocess/Breast_Cancer/cohort_info.json"
# Step 1: Initial Data Loading
from tools.preprocess import *
# 1. Identify the paths to the SOFT file and the matrix file
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# 2. Read the matrix file to obtain background information and sample characteristics data
background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)
# 3. Obtain the sample characteristics dictionary from the clinical dataframe
sample_characteristics_dict = get_unique_values_by_row(clinical_data)
# 4. Explicitly print out all the background information and the sample characteristics dictionary
print("Background Information:")
print(background_info)
print("Sample Characteristics Dictionary:")
print(sample_characteristics_dict)
# Step 2: Dataset Analysis and Clinical Feature Extraction
import os
import re
import pandas as pd
# 1) Gene expression data availability
# Affymetrix RNA microarrays imply gene expression data is available (not miRNA-only or methylation-only).
is_gene_available = True
# 2) Variable availability and converters
# From the provided characteristics:
# - Trait (Breast_Cancer): only "disease state: breast cancer" present (constant) -> not usable
# - Age: not present
# - Gender: not present
trait_row = None
age_row = None
gender_row = None
def _after_colon(x):
if x is None:
return None
if isinstance(x, str):
parts = x.split(":", 1)
return parts[1].strip() if len(parts) > 1 else x.strip()
return x
def convert_trait(x):
# Binary: Breast cancer case=1, control/normal=0
v = _after_colon(x)
if v is None:
return None
s = str(v).strip().lower()
# Positive labels for breast cancer
if "breast" in s and "cancer" in s:
return 1
# Control-like labels
control_terms = ["control", "normal", "healthy", "benign", "adjacent normal", "non-cancer", "non cancer", "no cancer"]
if any(t in s for t in control_terms):
return 0
return None
def convert_age(x):
# Continuous: attempt to parse a numeric age in years
v = _after_colon(x)
if v is None:
return None
s = str(v)
m = re.search(r"(-?\d+\.?\d*)", s)
if not m:
return None
try:
age_val = float(m.group(1))
except Exception:
return None
if 0 < age_val < 120:
return age_val
return None
def convert_gender(x):
# Binary: female=0, male=1
v = _after_colon(x)
if v is None:
return None
s = str(v).strip().lower()
if s in {"female", "f", "woman", "women", "girl"}:
return 0
if s in {"male", "m", "man", "men", "boy"}:
return 1
# Handle common encodings
if s == "0":
return 0
if s == "1":
return 1
return None
# 3) Save metadata (initial filtering)
is_trait_available = trait_row is not None
_ = validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available
)
# 4) Clinical feature extraction (skip since trait_row is None)
if trait_row is not None:
selected_clinical_df = geo_select_clinical_features(
clinical_df=clinical_data,
trait=trait,
trait_row=trait_row,
convert_trait=convert_trait,
age_row=age_row,
convert_age=convert_age,
gender_row=gender_row,
convert_gender=convert_gender
)
clinical_preview = preview_df(selected_clinical_df, n=5)
os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)
selected_clinical_df.to_csv(out_clinical_data_file, index=True)
# Step 3: Dataset Analysis and Clinical Feature Extraction
import os
import re
import json
import gzip
from typing import Optional, Callable, List, Tuple
import numpy as np
import pandas as pd
# Helper: safe string
def _s(x):
if x is None or (isinstance(x, float) and np.isnan(x)):
return ""
try:
return str(x)
except Exception:
return ""
# Try to access previously prepared clinical_data; if not present, try to discover/load from disk.
clinical_data: Optional[pd.DataFrame] = globals().get("clinical_data", None)
def _discover_and_load_clinical_df(base_dir: str) -> Optional[pd.DataFrame]:
# Heuristic search for a clinical data file
candidates: List[str] = []
for root, _, files in os.walk(base_dir):
for fn in files:
lfn = fn.lower()
if ("clinical" in lfn or "character" in lfn or "charac" in lfn) and lfn.endswith((".csv", ".tsv", ".txt")):
candidates.append(os.path.join(root, fn))
# Prefer csv, then tsv, then txt
def rank(p):
lp = p.lower()
if lp.endswith(".csv"):
return 0
if lp.endswith(".tsv"):
return 1
return 2
candidates = sorted(candidates, key=rank)
for path in candidates:
try:
if path.lower().endswith(".csv"):
df = pd.read_csv(path, index_col=0)
elif path.lower().endswith(".tsv"):
df = pd.read_csv(path, sep="\t", index_col=0)
else:
# Try tab first then comma
try:
df = pd.read_csv(path, sep="\t", index_col=0)
except Exception:
df = pd.read_csv(path, index_col=0)
# Expect a "characteristics-like" matrix: rows = attributes, cols = samples
if isinstance(df, pd.DataFrame) and df.shape[0] > 0 and df.shape[1] > 0:
return df
except Exception:
continue
return None
if clinical_data is None:
clinical_data = _discover_and_load_clinical_df(in_cohort_dir)
# Heuristic parsing helpers
colon_splitter = re.compile(r":\s*", flags=re.IGNORECASE)
def extract_after_colon(val: str) -> str:
s = _s(val).strip()
if not s:
return ""
parts = colon_splitter.split(s, maxsplit=1)
if len(parts) == 2:
return parts[1].strip()
return s.strip()
def has_keyword_before_colon(val: str, keywords: List[str]) -> bool:
s = _s(val).lower()
if ":" in s:
key = s.split(":", 1)[0]
else:
# Sometimes no colon; treat whole string as key/value
key = s
return any(k in key for k in keywords)
def row_contains_keyword(clin_df: pd.DataFrame, i: int, keywords: List[str]) -> bool:
row = clin_df.iloc[i].values.tolist()
for v in row:
if has_keyword_before_colon(v, keywords):
return True
return False
def extract_row_values_after_colon(clin_df: pd.DataFrame, i: int) -> List[str]:
row = clin_df.iloc[i].values.tolist()
return [extract_after_colon(v).strip().lower() for v in row if _s(v).strip()]
# Converters required by the pipeline
def convert_age(x):
s = _s(x).strip().lower()
if not s or s in {"na", "nan", "none", "unknown", "null"}:
return None
# take value after colon if present
if ":" in s:
s = s.split(":", 1)[1].strip()
# Remove units like 'years', 'yrs', 'y', 'year-old'
s = re.sub(r"(years?|yrs?|y/o|yo|year[-\s]?old)", "", s)
# extract first number
m = re.search(r"[-+]?\d*\.?\d+", s)
if not m:
return None
try:
val = float(m.group())
return val
except Exception:
return None
def convert_gender(x):
s = _s(x).strip().lower()
if not s or s in {"na", "nan", "none", "unknown", "null"}:
return None
if ":" in s:
s = s.split(":", 1)[1].strip().lower()
# normalize
tokens = re.findall(r"[a-zA-Z]+", s)
s_norm = " ".join(tokens) if tokens else s
# map
female_tokens = {"female", "f", "woman", "women", "girl"}
male_tokens = {"male", "m", "man", "men", "boy"}
if any(t in female_tokens for t in s_norm.replace("-", " ").split()):
return 0
if any(t in male_tokens for t in s_norm.replace("-", " ").split()):
return 1
# sometimes encoded as 'sex: 0/1' but we avoid guessing mapping unless explicit
return None
# Trait converter for Breast_Cancer: binary, 1 = cancer present, 0 = non-cancer/normal
bc_pos_kw = [
"breast cancer", "cancer", "tumor", "tumour", "carcinoma", "malignan", "metast",
"idc", "ilc", "ductal", "lobular", "tnbc", "her2", "er+", "pr+", "luminal", "basal"
]
bc_neg_kw = [
"normal", "adjacent normal", "benign", "healthy", "control", "non-cancer", "noncancer", "non cancer"
]
def convert_trait(x):
s = _s(x).strip().lower()
if not s or s in {"na", "nan", "none", "unknown", "null"}:
return None
if ":" in s:
s = s.split(":", 1)[1].strip().lower()
# remove some punctuation
s_clean = s.replace("_", " ").replace("-", " ").strip()
# Decide based on keywords
has_neg = any(k in s_clean for k in bc_neg_kw)
has_pos = any(k in s_clean for k in bc_pos_kw)
if has_pos and not has_neg:
return 1
if has_neg and not has_pos:
return 0
# ambiguous phrases that often imply tumor in GEO
if any(k in s_clean for k in ["tumor", "tumour"]):
return 1
return None
# Identify availability rows
trait_row: Optional[int] = None
age_row: Optional[int] = None
gender_row: Optional[int] = None
if isinstance(clinical_data, pd.DataFrame) and clinical_data.shape[0] > 0 and clinical_data.shape[1] > 0:
n_rows = clinical_data.shape[0]
# Age candidates
age_candidates: List[Tuple[int, int]] = [] # (row_index, count_nonnull)
for i in range(n_rows):
if row_contains_keyword(clinical_data, i, ["age"]):
conv_vals = [convert_age(v) for v in clinical_data.iloc[i].values.tolist()]
nn = [v for v in conv_vals if v is not None]
if len(nn) >= max(2, int(0.5 * len(conv_vals))): # at least 50% coverage and >=2 values
# ensure not constant
if len(set(nn)) > 1:
age_candidates.append((i, len(nn)))
if age_candidates:
age_candidates.sort(key=lambda x: (-x[1], x[0]))
age_row = age_candidates[0][0]
# Gender candidates
gender_candidates: List[Tuple[int, int]] = []
for i in range(n_rows):
if row_contains_keyword(clinical_data, i, ["gender", "sex"]):
conv_vals = [convert_gender(v) for v in clinical_data.iloc[i].values.tolist()]
nn = [v for v in conv_vals if v is not None]
# need both 0 and 1 present to avoid constant feature
if set(nn) >= {0, 1}:
gender_candidates.append((i, len(nn)))
if gender_candidates:
gender_candidates.sort(key=lambda x: (-x[1], x[0]))
gender_row = gender_candidates[0][0]
# Trait candidates: must yield both 0 and 1 after conversion
trait_candidates: List[Tuple[int, int]] = []
for i in range(n_rows):
vals = clinical_data.iloc[i].values.tolist()
conv_vals = [convert_trait(v) for v in vals]
present = {v for v in conv_vals if v is not None}
if present == {0, 1}:
trait_candidates.append((i, sum(v is not None for v in conv_vals)))
if trait_candidates:
trait_candidates.sort(key=lambda x: (-x[1], x[0]))
trait_row = trait_candidates[0][0]
# Determine gene expression availability by inspecting files/series matrix
def infer_gene_expression_available(base_dir: str) -> bool:
series_files = []
other_candidates = []
for root, _, files in os.walk(base_dir):
for fn in files:
lfn = fn.lower()
fp = os.path.join(root, fn)
if "series_matrix" in lfn and (lfn.endswith(".txt") or lfn.endswith(".gz")):
series_files.append(fp)
elif any(k in lfn for k in ["matrix", "expression", "count", "tpm", "fpkm", "rpkm"]) and lfn.endswith((".txt", ".tsv", ".csv", ".gz")):
other_candidates.append(fp)
# Inspect series matrix headers if present
def scan_text_file_head(path: str, max_lines: int = 2000) -> str:
try:
if path.lower().endswith(".gz"):
with gzip.open(path, "rt", errors="ignore") as f:
lines = []
for i, line in enumerate(f):
if i >= max_lines:
break
lines.append(line.strip())
return "\n".join(lines).lower()
else:
with open(path, "r", errors="ignore") as f:
lines = []
for i, line in enumerate(f):
if i >= max_lines:
break
lines.append(line.strip())
return "\n".join(lines).lower()
except Exception:
return ""
for sf in series_files:
head = scan_text_file_head(sf)
if not head:
continue
if any(k in head for k in ["mirna", "micro rna", "methylation", "450k", "850k", "wgbs", "bisulfite"]):
return False
if any(k in head for k in ["gene expression", "transcriptome", "rna-seq", "humanht-12", "affymetrix human", "agilent sureprint g3 gene expression"]):
return True
# If no decisive header, fallback to other file heuristics
for fp in other_candidates:
lfp = fp.lower()
if any(k in lfp for k in ["mirna", "methylation", "450k", "850k", "wgbs", "bisulfite"]):
continue
# if it's a generic expression/counts table, assume gene expression
if any(k in lfp for k in ["expr", "expression", "count", "counts", "matrix", "tpm", "fpkm", "rpkm"]):
return True
# Last resort: if there are series_matrix files at all and we didn't detect miRNA/methylation, tentatively True
if series_files:
return True
return False
is_gene_available = infer_gene_expression_available(in_cohort_dir)
# Trait availability
is_trait_available = trait_row is not None
# Save metadata (initial filtering)
_ = validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available
)
# If clinical data is available, extract and save selected clinical features
if trait_row is not None and isinstance(clinical_data, pd.DataFrame) and clinical_data.shape[0] > 0:
selected_clinical_df = geo_select_clinical_features(
clinical_df=clinical_data,
trait=trait,
trait_row=trait_row,
convert_trait=convert_trait,
age_row=age_row,
convert_age=convert_age if age_row is not None else None,
gender_row=gender_row,
convert_gender=convert_gender if gender_row is not None else None
)
# Preview and save
preview = preview_df(selected_clinical_df, n=5)
print({"preview_selected_clinical": preview})
os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)
selected_clinical_df.to_csv(out_clinical_data_file)
else:
print({"preview_selected_clinical": None, "note": "No trait_row detected or no clinical_data available; skipping clinical feature extraction."})
# Step 4: Gene Data Extraction
# 1. Use the get_genetic_data function from the library to get the gene_data from the matrix_file previously defined.
gene_data = get_genetic_data(matrix_file)
# 2. Print the first 20 row IDs (gene or probe identifiers) for future observation.
print(gene_data.index[:20])
# Step 5: Gene Identifier Review
print("requires_gene_mapping = True")
# Step 6: Gene Annotation
# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.
gene_annotation = get_gene_annotation(soft_file)
# 2. Use the 'preview_df' function from the library to preview the data and print out the results.
print("Gene annotation preview:")
print(preview_df(gene_annotation))
# Step 7: Gene Identifier Mapping
# Determine the appropriate columns for mapping based on the preview:
# - Probe/identifier column in annotation: 'ID' (e.g., '1007_s_at')
# - Gene symbol column in annotation: 'Gene Symbol'
# 1-2) Get the gene mapping dataframe
mapping_df = get_gene_mapping(gene_annotation, prob_col='ID', gene_col='Gene Symbol')
# 3) Apply the mapping to convert probe-level data to gene-level expression
gene_data = apply_gene_mapping(gene_data, mapping_df)
# Step 8: Data Normalization and Linking
import os
import pandas as pd
# 1) Normalize gene symbols and save gene-level data
normalized_gene_data = normalize_gene_symbols_in_index(gene_data)
os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
normalized_gene_data.to_csv(out_gene_data_file)
# 2) Try to obtain clinical data if it exists (may not exist for this cohort)
selected_clinical_data = globals().get("selected_clinical_data", None)
if selected_clinical_data is None and os.path.exists(out_clinical_data_file):
try:
selected_clinical_data = pd.read_csv(out_clinical_data_file, index_col=0)
except Exception:
selected_clinical_data = None
# Determine trait availability from clinical data presence
is_trait_available = (
isinstance(selected_clinical_data, pd.DataFrame)
and selected_clinical_data.shape[0] > 0
and selected_clinical_data.shape[1] > 0
and (trait in selected_clinical_data.index)
)
# 2-6) Link, handle missingness, assess bias, validate, and save if usable
if is_trait_available:
# Link clinical and genetic data
linked_data = geo_link_clinical_genetic_data(selected_clinical_data, normalized_gene_data)
# Ensure the trait column exists after linking; otherwise treat as no-trait
if trait in linked_data.columns:
# 3) Handle missing values
linked_data = handle_missing_values(linked_data, trait)
# 4) Assess bias and drop biased demographic features
is_trait_biased, unbiased_linked_data = judge_and_remove_biased_features(linked_data, trait)
# 5) Final validation and save cohort info
note = "INFO: Clinical features available; linked with gene expression and processed."
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=is_trait_biased,
df=unbiased_linked_data,
note=note
)
# 6) Save linked data if usable
if is_usable:
os.makedirs(os.path.dirname(out_data_file), exist_ok=True)
unbiased_linked_data.to_csv(out_data_file)
else:
# Trait row existed before linking but trait column not found after linking; treat as unavailable
note = "INFO: Trait column missing after linking; skipping linked data saving."
_ = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=False,
is_biased=False,
df=pd.DataFrame(),
note=note
)
else:
# No clinical/trait data available; record metadata and skip linking
note = "INFO: No clinical/trait data available for this cohort; linking skipped."
_ = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=False,
is_biased=False,
df=pd.DataFrame(),
note=note
)