Liu-Hy's picture
Add files using upload-large-folder tool
933cd71 verified
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Arrhythmia"
cohort = "GSE235307"
# Input paths
in_trait_dir = "../DATA/GEO/Arrhythmia"
in_cohort_dir = "../DATA/GEO/Arrhythmia/GSE235307"
# Output paths
out_data_file = "./output/z1/preprocess/Arrhythmia/GSE235307.csv"
out_gene_data_file = "./output/z1/preprocess/Arrhythmia/gene_data/GSE235307.csv"
out_clinical_data_file = "./output/z1/preprocess/Arrhythmia/clinical_data/GSE235307.csv"
json_path = "./output/z1/preprocess/Arrhythmia/cohort_info.json"
# Step 1: Initial Data Loading
from tools.preprocess import *
# 1. Identify the paths to the SOFT file and the matrix file
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# 2. Read the matrix file to obtain background information and sample characteristics data
background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)
# 3. Obtain the sample characteristics dictionary from the clinical dataframe
sample_characteristics_dict = get_unique_values_by_row(clinical_data)
# 4. Explicitly print out all the background information and the sample characteristics dictionary
print("Background Information:")
print(background_info)
print("Sample Characteristics Dictionary:")
print(sample_characteristics_dict)
# Step 2: Dataset Analysis and Clinical Feature Extraction
import os
import re
import pandas as pd
# 1) Gene expression availability based on background info
is_gene_available = True # Series title indicates gene expression profiling (not miRNA/methylation)
# 2) Variable availability (from the provided Sample Characteristics Dictionary)
trait_row = 5 # 'cardiac rhythm after 1 year follow-up: ...'
age_row = 2 # 'age: ...'
gender_row = 1 # 'gender: ...'
# 2.2) Conversion utilities
def _after_colon(x):
if x is None or (isinstance(x, float) and pd.isna(x)):
return None
s = str(x)
if ':' in s:
s = s.split(':', 1)[1]
return s.strip()
def convert_trait(x):
"""
Binary:
- 1: Atrial fibrillation (AF)
- 0: Sinus rhythm
- None: unknown/other
"""
v = _after_colon(x)
if v is None:
return None
vl = v.strip().lower()
if 'atrial fibrillation' in vl or 'a-fib' in vl or (('atrial' in vl) and ('fibrillation' in vl)) or vl == 'af':
return 1
if 'sinus' in vl and 'rhythm' in vl:
return 0
if vl == 'sr':
return 0
return None
def convert_age(x):
"""
Continuous age in years. Extract first numeric token; return float if valid (0 < age <= 120), else None.
"""
v = _after_colon(x)
if v is None:
return None
m = re.search(r'(\d+(\.\d+)?)', v)
if not m:
return None
try:
age_val = float(m.group(1))
except Exception:
return None
if 0 < age_val <= 120:
return age_val
return None
def convert_gender(x):
"""
Binary gender:
- 1: Male
- 0: Female
- None: unknown/other
"""
v = _after_colon(x)
if v is None:
return None
vl = v.strip().lower()
if vl in ['male', 'm', 'man']:
return 1
if vl in ['female', 'f', 'woman', 'women']:
return 0
return None
# 3) Save metadata via initial filtering
is_trait_available = trait_row is not None
_ = validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available
)
# 4) Clinical Feature Extraction (only if clinical data is available)
if is_trait_available:
if 'clinical_data' in locals():
selected_clinical_df = geo_select_clinical_features(
clinical_df=clinical_data,
trait=trait,
trait_row=trait_row,
convert_trait=convert_trait,
age_row=age_row,
convert_age=convert_age,
gender_row=gender_row,
convert_gender=convert_gender
)
clinical_preview = preview_df(selected_clinical_df)
print("Clinical preview:", clinical_preview)
os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)
selected_clinical_df.to_csv(out_clinical_data_file, index=False)
else:
print("WARNING: 'clinical_data' not found in environment. Skipping clinical feature extraction.")
# Step 3: Gene Data Extraction
# 1. Use the get_genetic_data function from the library to get the gene_data from the matrix_file previously defined.
gene_data = get_genetic_data(matrix_file)
# 2. Print the first 20 row IDs (gene or probe identifiers) for future observation.
print(gene_data.index[:20])
# Step 4: Gene Identifier Review
print("requires_gene_mapping = True")
# Step 5: Gene Annotation
# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.
gene_annotation = get_gene_annotation(soft_file)
# 2. Use the 'preview_df' function from the library to preview the data and print out the results.
print("Gene annotation preview:")
print(preview_df(gene_annotation))
# Step 6: Gene Identifier Mapping
# Determine appropriate identifier columns based on overlap with expression data indices
id_candidates = [col for col in ['ID', 'NAME', 'SPOT_ID'] if col in gene_annotation.columns]
expr_ids = set(gene_data.index.astype(str))
best_id_col = None
best_overlap = -1
for col in id_candidates:
ann_ids = set(gene_annotation[col].astype(str))
overlap = len(expr_ids & ann_ids)
if overlap > best_overlap:
best_overlap = overlap
best_id_col = col
# Fallback to 'ID' if nothing better is found (shouldn't happen given preview)
if best_id_col is None:
best_id_col = 'ID'
# Gene symbol column
gene_symbol_col = 'GENE_SYMBOL'
# Build mapping dataframe
mapping_df = get_gene_mapping(gene_annotation, prob_col=best_id_col, gene_col=gene_symbol_col)
# Apply mapping to convert probe-level data to gene-level data
gene_data = apply_gene_mapping(expression_df=gene_data, mapping_df=mapping_df)
# Step 7: Data Normalization and Linking
import os
import pandas as pd
# 1. Normalize gene symbols and save gene expression data
normalized_gene_data = normalize_gene_symbols_in_index(gene_data)
os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
normalized_gene_data.to_csv(out_gene_data_file)
# 2. Link clinical and genetic data
# Use the in-memory clinical dataframe if available; otherwise reload from disk and restore row labels.
if 'selected_clinical_df' not in locals():
if os.path.exists(out_clinical_data_file):
tmp = pd.read_csv(out_clinical_data_file)
# The saved file had index=False, so restore expected row index if shapes match
if tmp.shape[0] == 3:
tmp.index = [trait, 'Age', 'Gender']
selected_clinical_df = tmp
else:
raise RuntimeError("Clinical data not found in memory or on disk.")
linked_data = geo_link_clinical_genetic_data(selected_clinical_df, normalized_gene_data)
# 3. Handle missing values
linked_data = handle_missing_values(linked_data, trait)
# 4. Assess bias and remove biased demographic features
is_trait_biased, unbiased_linked_data = judge_and_remove_biased_features(linked_data, trait)
# 5. Final validation and save cohort metadata
note = f"INFO: Gene symbols normalized using NCBI synonyms. Linked {normalized_gene_data.shape[1]} samples and {normalized_gene_data.shape[0]} genes before QC."
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=is_trait_biased,
df=unbiased_linked_data,
note=note
)
# 6. Save linked data if usable
if is_usable:
os.makedirs(os.path.dirname(out_data_file), exist_ok=True)
unbiased_linked_data.to_csv(out_data_file)