Liu-Hy's picture
Add files using upload-large-folder tool
fcf7aea verified
# Path Configuration
from tools.preprocess import *
# Processing context
trait = "Alopecia"
cohort = "GSE81071"
# Input paths
in_trait_dir = "../DATA/GEO/Alopecia"
in_cohort_dir = "../DATA/GEO/Alopecia/GSE81071"
# Output paths
out_data_file = "./output/z1/preprocess/Alopecia/GSE81071.csv"
out_gene_data_file = "./output/z1/preprocess/Alopecia/gene_data/GSE81071.csv"
out_clinical_data_file = "./output/z1/preprocess/Alopecia/clinical_data/GSE81071.csv"
json_path = "./output/z1/preprocess/Alopecia/cohort_info.json"
# Step 1: Initial Data Loading
from tools.preprocess import *
# 1. Identify the paths to the SOFT file and the matrix file
soft_file, matrix_file = geo_get_relevant_filepaths(in_cohort_dir)
# 2. Read the matrix file to obtain background information and sample characteristics data
background_prefixes = ['!Series_title', '!Series_summary', '!Series_overall_design']
clinical_prefixes = ['!Sample_geo_accession', '!Sample_characteristics_ch1']
background_info, clinical_data = get_background_and_clinical_data(matrix_file, background_prefixes, clinical_prefixes)
# 3. Obtain the sample characteristics dictionary from the clinical dataframe
sample_characteristics_dict = get_unique_values_by_row(clinical_data)
# 4. Explicitly print out all the background information and the sample characteristics dictionary
print("Background Information:")
print(background_info)
print("Sample Characteristics Dictionary:")
print(sample_characteristics_dict)
# Step 2: Dataset Analysis and Clinical Feature Extraction
import re
import os
import pandas as pd
# 1) Gene expression availability
is_gene_available = True # Affymetrix mRNA microarrays from FFPE blocks
# 2) Variable availability from the provided sample characteristics
# Sample Characteristics Dictionary indicates only disease state and tissue, no explicit age or gender.
# Trait here is Alopecia, which is not explicitly recorded; inferring from DLE is unreliable (<90% certainty).
trait_row = None
age_row = None
gender_row = None
# 2.2) Converters
def _after_colon(x: str) -> str:
if x is None:
return ""
parts = str(x).split(":", 1)
return parts[1].strip() if len(parts) == 2 else str(x).strip()
def convert_trait(x):
# Conservatively only map explicit alopecia indications
val = _after_colon(x).lower()
if val in ["", "na", "n/a", "none", "unknown", "not available"]:
return None
# Positive indications
if "alopecia" in val and not ("no alopecia" in val or "without alopecia" in val):
return 1
# Explicit negatives
if "no alopecia" in val or "without alopecia" in val:
return 0
# Healthy/control without alopecia info is uncertain -> None
return None
def convert_age(x):
val = _after_colon(x)
if not val:
return None
m = re.search(r"(\d+(\.\d+)?)", val)
if m:
try:
return float(m.group(1))
except Exception:
return None
return None
def convert_gender(x):
val = _after_colon(x).lower()
if val in ["", "na", "n/a", "none", "unknown", "not available"]:
return None
if val in ["female", "f", "woman", "women"]:
return 0
if val in ["male", "m", "man", "men"]:
return 1
return None
# 3) Initial filtering metadata save
is_trait_available = trait_row is not None
_ = validate_and_save_cohort_info(
is_final=False,
cohort=cohort,
info_path=json_path,
is_gene_available=is_gene_available,
is_trait_available=is_trait_available
)
# 4) Clinical feature extraction (skip if trait not available)
if trait_row is not None:
selected_clinical_df = geo_select_clinical_features(
clinical_df=clinical_data,
trait=trait,
trait_row=trait_row,
convert_trait=convert_trait,
age_row=age_row,
convert_age=convert_age,
gender_row=gender_row,
convert_gender=convert_gender
)
preview = preview_df(selected_clinical_df, n=5)
os.makedirs(os.path.dirname(out_clinical_data_file), exist_ok=True)
selected_clinical_df.to_csv(out_clinical_data_file, index=True)
# Step 3: Gene Data Extraction
# 1. Use the get_genetic_data function from the library to get the gene_data from the matrix_file previously defined.
gene_data = get_genetic_data(matrix_file)
# 2. Print the first 20 row IDs (gene or probe identifiers) for future observation.
print(gene_data.index[:20])
# Step 4: Gene Identifier Review
requires_gene_mapping = True
print(f"requires_gene_mapping = {requires_gene_mapping}")
# Step 5: Gene Annotation
# 1. Use the 'get_gene_annotation' function from the library to get gene annotation data from the SOFT file.
gene_annotation = get_gene_annotation(soft_file)
# 2. Use the 'preview_df' function from the library to preview the data and print out the results.
print("Gene annotation preview:")
print(preview_df(gene_annotation))
# Step 6: Gene Identifier Mapping
import os
import re
# We will try to map probes to gene symbols using the best available annotation.
# 1) Try platform (GPL) annotation first for a SYMBOL-like column.
# 2) Fallback to series-level annotation for symbol columns.
# 3) If no symbol column works, map to Entrez IDs explicitly without using extract_human_gene_symbols.
# 4) If all fail, keep probe-level data and warn.
probe_col = 'ID' # Matches gene_data index name after get_genetic_data()
def find_symbol_column(df):
cols = list(df.columns)
# Direct symbol column candidates
direct_candidates = [
'SYMBOL', 'Gene Symbol', 'GENE_SYMBOL', 'GENE SYMBOL', 'Symbol', 'gene_symbol',
'Gene symbol', 'gene symbols', 'GENE_SYMBOLS', 'Gene Symbols'
]
for cand in direct_candidates:
if cand in cols:
return cand
# Regex-based search for 'gene symbol' variants
regexes = [
r'^\s*gene\s*symbol\s*$',
r'^\s*symbol\s*$',
r'^\s*gene\s*symbols?\s*$',
r'associated\s*gene\s*symbol'
]
for c in cols:
for pat in regexes:
if re.search(pat, c, flags=re.IGNORECASE):
return c
# Fallbacks that often contain symbol-like strings
fallbacks = [
'GENE_ASSIGNMENT', 'Gene Assignment', 'Gene assignment',
'GENE_TITLE', 'Gene Title', 'Gene title',
'Representative Public ID', 'REPRESENTATIVE_PUBLIC_ID'
]
for fb in fallbacks:
if fb in cols:
return fb
return None
def map_by_entrez(expression_df, anno_df, prob_col='ID', entrez_col='ENTREZ_GENE_ID'):
if (prob_col not in anno_df.columns) or (entrez_col not in anno_df.columns):
return None
m = anno_df.loc[:, [prob_col, entrez_col]].dropna()
if m.empty:
return None
m = m.rename(columns={prob_col: 'ID', entrez_col: 'Entrez'})
m['ID'] = m['ID'].astype(str).str.strip()
m = m[m['ID'] != '']
# Keep only probes present in expression data
m = m[m['ID'].isin(expression_df.index)]
if m.empty:
return None
def split_entrez(x):
if x is None:
return []
s = str(x)
s = s.replace('///', ';')
parts = re.split(r'[;,\s]+', s)
parts = [p for p in parts if re.fullmatch(r'\d+', p)]
return parts
m['Gene'] = m['Entrez'].map(split_entrez)
m['num_genes'] = m['Gene'].apply(len)
m = m.explode('Gene').dropna(subset=['Gene'])
if m.empty:
return None
m = m.set_index('ID')
merged = m.join(expression_df)
expr_cols = [c for c in merged.columns if c not in ['Gene', 'num_genes', 'Entrez']]
if not expr_cols:
return None
merged[expr_cols] = merged[expr_cols].div(merged['num_genes'].replace(0, 1), axis=0)
gene_expression = merged.groupby('Gene')[expr_cols].sum()
if gene_expression.empty:
return None
return gene_expression
# Build annotation sources: platform first (if available), then series-level annotation
annotation_sources = []
# Search recursively for GPL files (gzipped) in the cohort directory
try:
gpl_paths = []
for root, _, files in os.walk(in_cohort_dir):
for f in files:
fl = f.lower()
if ('gpl' in fl) and (f.endswith('.gz')): # prioritize gz which get_gene_annotation can read
gpl_paths.append(os.path.join(root, f))
# Add platform annotations first
for p in sorted(gpl_paths):
try:
gpl_anno = get_gene_annotation(p)
annotation_sources.append(('platform_soft', gpl_anno))
except Exception:
continue
except Exception:
pass
# Add the series-level annotation as fallback
annotation_sources.append(('series_soft', gene_annotation))
mapped = False
# Try symbol-based mapping first
for src_name, anno_df in annotation_sources:
try:
gene_col = find_symbol_column(anno_df)
if gene_col is None:
continue
mapping_df = get_gene_mapping(anno_df, prob_col=probe_col, gene_col=gene_col)
if mapping_df.empty:
continue
# Map probes to symbols
candidate_gene_data = apply_gene_mapping(gene_data, mapping_df)
if candidate_gene_data is not None and candidate_gene_data.shape[0] > 0:
gene_data = candidate_gene_data
print(f"Gene mapping to SYMBOLs successful using source='{src_name}', "
f"probe_col='{probe_col}', gene_col='{gene_col}'. "
f"Mapped genes: {gene_data.shape[0]}")
mapped = True
break
except Exception:
continue
# If symbol mapping failed, try Entrez-based mapping explicitly
if not mapped:
for src_name, anno_df in annotation_sources:
try:
if 'ENTREZ_GENE_ID' not in anno_df.columns:
continue
candidate_gene_data = map_by_entrez(gene_data, anno_df, prob_col=probe_col, entrez_col='ENTREZ_GENE_ID')
if candidate_gene_data is not None and candidate_gene_data.shape[0] > 0:
gene_data = candidate_gene_data
print(f"Gene mapping to Entrez IDs successful using source='{src_name}', "
f"probe_col='{probe_col}', gene_col='ENTREZ_GENE_ID'. "
f"Mapped genes: {gene_data.shape[0]}")
mapped = True
break
except Exception:
continue
# If both strategies fail, retain probe-level expression
if not mapped:
print("WARNING: Failed to map probes to gene symbols or Entrez IDs. "
"Proceeding with probe-level expression data.")
# Step 7: Data Normalization and Linking
import os
import re
import pandas as pd
# Helper to find a plausible SYMBOL column
def _find_symbol_col(cols):
priority = [
'SYMBOL', 'Gene Symbol', 'GENE_SYMBOL', 'GENE SYMBOL', 'Symbol',
'gene_symbol', 'Gene symbol', 'GENE_SYMBOLS', 'Gene Symbols'
]
for c in priority:
if c in cols:
return c
# Regex fallback
for c in cols:
if re.search(r'\bsymbols?\b', c, flags=re.IGNORECASE):
return c
return None
# Build Entrez->Symbol mapping from any available annotation (GPL preferred, then series-level)
entrez_to_symbol = {}
annotation_sources = []
# Try to load GPL annotations
try:
gpl_paths = []
for root, _, files in os.walk(in_cohort_dir):
for f in files:
fl = f.lower()
if ('gpl' in fl) and f.endswith('.gz'):
gpl_paths.append(os.path.join(root, f))
for p in sorted(gpl_paths):
try:
gpl_anno = get_gene_annotation(p)
annotation_sources.append(('platform_soft', gpl_anno))
except Exception:
continue
except Exception:
pass
# Add previously loaded series-level annotation as fallback (from Step 5)
if 'gene_annotation' in locals():
annotation_sources.append(('series_soft', gene_annotation))
def _split_list_field(x):
if x is None:
return []
s = str(x)
# Common delimiters in GEO/GPL annotations
s = s.replace('///', ';')
parts = re.split(r'[;,/|\s]+', s)
parts = [p for p in parts if p] # non-empty
return parts
# Construct mapping
for src_name, anno_df in annotation_sources:
try:
if 'ENTREZ_GENE_ID' not in anno_df.columns:
continue
sym_col = _find_symbol_col(anno_df.columns)
if sym_col is None:
continue
sub = anno_df[['ENTREZ_GENE_ID', sym_col]].dropna()
if sub.empty:
continue
for _, row in sub.iterrows():
entrez_list = [p for p in _split_list_field(row['ENTREZ_GENE_ID']) if re.fullmatch(r'\d+', p)]
sym_list = _split_list_field(row[sym_col])
# Choose the first plausible gene symbol token
sym = None
for token in sym_list:
tok = token.strip()
# Basic sanity: uppercase letters/digits/dash or C#orf#
if re.fullmatch(r"(?:[A-Z][A-Z0-9-]{0,9}|C\d+orf\d+)", tok):
sym = tok
break
if sym is None and sym_list:
sym = sym_list[0].strip() # fallback to first token
if sym:
for e in entrez_list:
if e not in entrez_to_symbol:
entrez_to_symbol[e] = sym
# If we built a decent mapping, we can stop early
if len(entrez_to_symbol) > 0:
break
except Exception:
continue
# 1) Normalize to gene symbols if possible, then apply synonym normalization; otherwise keep Entrez IDs
normalized_gene_data = None
note_parts = []
try:
if len(entrez_to_symbol) > 0:
# Map current Entrez-indexed expression to SYMBOLs
mapped_index = gene_data.index.to_series().map(lambda x: entrez_to_symbol.get(str(x)))
symbol_gene_data = gene_data.copy()
symbol_gene_data.index = mapped_index
symbol_gene_data = symbol_gene_data[symbol_gene_data.index.notnull()]
if len(symbol_gene_data) > 0:
# Aggregate duplicates and normalize using synonym dictionary
symbol_gene_data = symbol_gene_data.groupby(symbol_gene_data.index).sum()
candidate = normalize_gene_symbols_in_index(symbol_gene_data)
if candidate is not None and len(candidate) > 0:
normalized_gene_data = candidate
note_parts.append("Mapped Entrez->SYMBOL using available annotation and normalized symbols via NCBI synonym dictionary.")
else:
note_parts.append("SYMBOL normalization produced empty matrix; falling back to Entrez-indexed matrix.")
else:
note_parts.append("No SYMBOLs obtained from Entrez mapping; falling back to Entrez-indexed matrix.")
else:
note_parts.append("No SYMBOL column available in annotation; kept Entrez-indexed matrix.")
except Exception as e:
note_parts.append(f"Symbol normalization failed with error: {e}; kept Entrez-indexed matrix.")
# Choose the gene matrix to save
gene_matrix_to_save = normalized_gene_data if normalized_gene_data is not None else gene_data
os.makedirs(os.path.dirname(out_gene_data_file), exist_ok=True)
gene_matrix_to_save.to_csv(out_gene_data_file)
# 2-6) Linking and downstream steps should proceed only if trait data is available
if 'selected_clinical_data' in locals():
# 2. Link clinical and genetic data
linked_data = geo_link_clinical_genetic_data(selected_clinical_data, gene_matrix_to_save)
# 3. Handle missing values
linked_data = handle_missing_values(linked_data, trait)
# 4. Check bias and remove biased demographics
is_trait_biased, unbiased_linked_data = judge_and_remove_biased_features(linked_data, trait)
# 5. Final validation and metadata save
note = "INFO: " + " ".join(note_parts) if note_parts else "INFO: Standard preprocessing completed."
is_usable = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=True,
is_biased=is_trait_biased,
df=unbiased_linked_data,
note=note
)
# 6. Save linked data if usable
if is_usable:
os.makedirs(os.path.dirname(out_data_file), exist_ok=True)
unbiased_linked_data.to_csv(out_data_file)
else:
# Trait not available; record final metadata with a note, no linking performed
note = "INFO: Trait not available; skipped linking and QC. " + (" ".join(note_parts) if note_parts else "")
_ = validate_and_save_cohort_info(
is_final=True,
cohort=cohort,
info_path=json_path,
is_gene_available=True,
is_trait_available=False,
is_biased=False, # placeholder; will be recorded as None since data is not available
df=gene_matrix_to_save,
note=note
)