GenoTEX / output /preprocess /Epilepsy /code /GSE199759.py
Liu-Hy's picture
Add files using upload-large-folder tool
9efdaa1 verified
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Epilepsy"
cohort = "GSE199759"
# Input paths
in_trait_dir = "../DATA/GEO/Epilepsy"
in_cohort_dir = "../DATA/GEO/Epilepsy/GSE199759"
# Output paths
out_data_file = "./output/z3/preprocess/Epilepsy/GSE199759.csv"
out_gene_data_file = "./output/z3/preprocess/Epilepsy/gene_data/GSE199759.csv"
out_clinical_data_file = "./output/z3/preprocess/Epilepsy/clinical_data/GSE199759.csv"
json_path = "./output/z3/preprocess/Epilepsy/cohort_info.json"
# Step 1: Initial Data Loading
from tools.preprocess import *
# 1. Identify the paths to the SOFT file and the matrix file
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# 2. Read the matrix file to obtain background information and sample characteristics data
background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)
# 3. Obtain the sample characteristics dictionary from the clinical dataframe
sample_characteristics_dict = get_unique_values_by_row(clinical_data)
# 4. Explicitly print out all the background information and the sample characteristics dictionary
print("Background Information:")
print(background_info)
print("Sample Characteristics Dictionary:")
print(sample_characteristics_dict)
# Step 2: Dataset Analysis and Clinical Feature Extraction
import re
# 1) Gene expression availability based on background information
is_gene_available = True # Agilent LncRNA+mRNA Human Gene Expression Microarray V3.0 suggests mRNA expression data is available.
# 2) Variable availability based on the provided Sample Characteristics Dictionary
trait_row = None # No explicit epilepsy status in the provided characteristics; groups (GRE vs GNE) not listed.
age_row = 2 # 'age: ...y'
gender_row = 1 # 'gender: Male/Female'
# 2.2) Conversion functions
def _after_colon(val):
if val is None:
return None
parts = str(val).split(":", 1)
return parts[1].strip() if len(parts) > 1 else str(val).strip()
def convert_trait(x):
"""
Generic epilepsy status converter (not used here since trait_row is None).
Maps epilepsy-related indications to binary: epilepsy=1, non-epilepsy=0.
"""
v = _after_colon(x)
if v is None:
return None
v_low = v.lower()
# Heuristics for GRE vs GNE if ever encountered
if any(k in v_low for k in ["gre", "with epilepsy", "epilepsy", "glioma-related epilepsy"]):
if any(k in v_low for k in ["gne", "without epilepsy", "no epilepsy"]):
return None # Ambiguous
return 1
if any(k in v_low for k in ["gne", "without epilepsy", "no epilepsy", "nonepilepsy", "non-epilepsy"]):
return 0
return None
def convert_age(x):
v = _after_colon(x)
if v is None:
return None
# Extract integer/float from strings like "47y", "47", "47 years"
nums = re.findall(r"\d+\.?\d*", v)
if not nums:
return None
try:
val = float(nums[0])
return int(val) if val.is_integer() else val
except Exception:
return None
def convert_gender(x):
v = _after_colon(x)
if v is None:
return None
v_low = v.lower()
if "male" in v_low:
return 1
if "female" in v_low:
return 0
return None
# 3) Save metadata (initial filtering)
is_trait_available = trait_row is not None
_ = validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available
)
# 4) Clinical Feature Extraction (skip because trait_row is None)
# If trait_row were available:
# selected_clinical_df = geo_select_clinical_features(
# clinical_df=clinical_data,
# trait=trait,
# trait_row=trait_row,
# convert_trait=convert_trait,
# age_row=age_row,
# convert_age=convert_age,
# gender_row=gender_row,
# convert_gender=convert_gender
# )
# preview = preview_df(selected_clinical_df)
# selected_clinical_df.to_csv(out_clinical_data_file, index=True)
# Step 3: Gene Data Extraction
# 1. Use the get_genetic_data function from the library to get the gene_data from the matrix_file previously defined.
gene_data = get_genetic_data(matrix_file)
# 2. Print the first 20 row IDs (gene or probe identifiers) for future observation.
print(gene_data.index[:20])
# Step 4: Gene Identifier Review
print("requires_gene_mapping = True")
# Step 5: Gene Annotation
# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.
gene_annotation = get_gene_annotation(soft_file)
# 2. Use the 'preview_df' function from the library to preview the data and print out the results.
print("Gene annotation preview:")
print(preview_df(gene_annotation))
# Step 6: Gene Identifier Mapping
import os
import pandas as pd
# 1) Identify the correct SOFT (platform) file and the appropriate columns for probe IDs and gene symbols
soft_files = [os.path.join(in_cohort_dir, f) for f in os.listdir(in_cohort_dir) if 'soft' in f.lower()]
best_soft = None
best_probe_col = None
best_symbol_col = None
best_match_count = -1 # number of probes matched to gene_data.index
# Use a subset of probe IDs for quick matching
probe_index = pd.Index(gene_data.index.astype(str))
subset_probe = probe_index[:min(5000, len(probe_index))]
# Candidate columns to prioritize for gene symbols
symbol_priority = [
'Gene Symbol', 'GENE_SYMBOL', 'GeneSymbol', 'Symbol', 'SYMBOL', 'gene_symbol',
'GENESYMBOL', 'Gene Name', 'GENE_NAME', 'gene_assignment', 'DESCRIPTION',
'Description', 'Entrez Gene Symbol', 'ENTREZ_GENE_SYMBOL'
]
for sf in soft_files:
try:
ann = get_gene_annotation(sf)
if ann is None or not isinstance(ann, pd.DataFrame) or ann.empty:
continue
# Identify which column in this annotation best matches our probe IDs
probe_col_candidate = None
probe_match_counts = {}
for col in ann.columns:
try:
match_count = ann[col].astype(str).isin(subset_probe).sum()
probe_match_counts[col] = match_count
except Exception:
continue
if not probe_match_counts:
continue
# Choose the column with maximum matches
candidate_col, candidate_count = max(probe_match_counts.items(), key=lambda x: x[1])
# Require at least some reasonable overlap to consider this platform relevant
if candidate_count > best_match_count and candidate_count > 0:
# Find a gene symbol column in this annotation
symbol_col = None
# First try priority list
for c in symbol_priority:
if c in ann.columns:
symbol_col = c
break
# If none from priority list, choose the column with the most extractable human symbols
if symbol_col is None:
non_empty_counts = {}
for col in ann.columns:
try:
symbols_extracted = ann[col].astype(str).map(extract_human_gene_symbols)
non_empty_counts[col] = symbols_extracted.map(lambda x: len(x) > 0).sum()
except Exception:
continue
if non_empty_counts:
symbol_col = max(non_empty_counts.items(), key=lambda x: x[1])[0]
if symbol_col is not None:
best_soft = sf
best_probe_col = candidate_col
best_symbol_col = symbol_col
best_match_count = candidate_count
except Exception:
continue
# 2) Build mapping dataframe and 3) apply mapping to convert probe-level data to gene-level data
if best_soft is not None and best_probe_col is not None and best_symbol_col is not None:
selected_annotation = get_gene_annotation(best_soft)
mapping_df = get_gene_mapping(selected_annotation, prob_col=best_probe_col, gene_col=best_symbol_col)
gene_data = apply_gene_mapping(expression_df=gene_data, mapping_df=mapping_df)
else:
# If no suitable mapping was found, keep gene_data unchanged (probe-level) to avoid crashing downstream steps.
# This branch should be rare; it indicates annotation mismatch (e.g., miRNA platform vs mRNA probes).
gene_data = gene_data
# Step 7: Data Normalization and Linking
import os
import pandas as pd
# 1. Normalize gene symbols and save gene matrix
normalized_gene_data = normalize_gene_symbols_in_index(gene_data)
os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
normalized_gene_data.to_csv(out_gene_data_file)
# Helper flags and note assembly
is_gene_available_fin = not normalized_gene_data.empty
notes = []
if not is_gene_available_fin:
notes.append("WARNING: Normalized gene matrix is empty after symbol normalization; "
"probe->gene mapping likely failed due to platform annotation mismatch (e.g., miRNA vs mRNA).")
linked_data = None # default
# 2-6. Proceed only if clinical features were extracted (i.e., selected_clinical_data exists and contains the trait)
if 'selected_clinical_data' in globals() and isinstance(selected_clinical_data, pd.DataFrame) and not selected_clinical_data.empty:
try:
# 2. Link clinical and genetic data
linked_data = geo_link_clinical_genetic_data(selected_clinical_data, normalized_gene_data)
# 3. Handle missing values
linked_data = handle_missing_values(linked_data, trait)
# 4. Bias checking and removal of biased covariates
is_trait_biased, unbiased_linked_data = judge_and_remove_biased_features(linked_data, trait)
# 5. Final validation and metadata saving
os.makedirs(os.path.dirname(json_path), exist_ok=True)
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available_fin,
is_trait_available=True,
is_biased=is_trait_biased,
df=unbiased_linked_data,
note=(" ".join(notes) if notes else "INFO: Clinical features linked and processed.")
)
# 6. Save linked data only if usable
if is_usable:
os.makedirs(os.path.dirname(out_data_file), exist_ok=True)
unbiased_linked_data.to_csv(out_data_file)
except Exception as e:
# If anything fails during linking due to unexpected shapes/availability, record as unavailable
notes.append(f"ERROR: Linking/processing failed with error: {e}")
os.makedirs(os.path.dirname(json_path), exist_ok=True)
_ = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available_fin,
is_trait_available=False,
is_biased=False,
df=pd.DataFrame(),
note=" ".join(notes)
)
else:
# Trait not available (as in this cohort), skip linking and mark dataset unusable
notes.append("INFO: Clinical trait labels (Epilepsy) not available in series matrix; "
"skipping linking and marking dataset as unusable.")
os.makedirs(os.path.dirname(json_path), exist_ok=True)
_ = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available_fin,
is_trait_available=False,
is_biased=False,
df=pd.DataFrame(),
note=" ".join(notes)
)