mahendrawada_2025 / scripts /mahendrawada_annotated_features.R
cmatkhan's picture
Restructure repository with new dataset organization
5b577ce
library(tidyverse)
library(arrow)
library(here)
library(yaml)
#' Convert BED format data frame to GRanges
#'
#' Handles coordinate system conversion from 0-indexed half-open BED format
#' to 1-indexed closed GenomicRanges format
#'
#' @param bed_df Data frame with chr, start, end columns in BED format (0-indexed, half-open)
#' @param zero_indexed Logical, whether input is 0-indexed (default: TRUE)
#' @return GRanges object
bed_to_granges <- function(bed_df, zero_indexed = TRUE) {
if (!all(c("chr", "start", "end") %in% names(bed_df))) {
stop("bed_df must have columns: chr, start, end")
}
# Convert from 0-indexed half-open [start, end) to 1-indexed closed [start, end]
if (zero_indexed) {
gr_start <- bed_df$start + 1
gr_end <- bed_df$end
} else {
gr_start <- bed_df$start
gr_end <- bed_df$end
}
# Create GRanges object (strand-agnostic for calling cards)
gr <- GenomicRanges::GRanges(
seqnames = bed_df$chr,
ranges = IRanges::IRanges(start = gr_start, end = gr_end),
strand = "*"
)
# Add any additional metadata columns
extra_cols <- setdiff(names(bed_df), c("chr", "start", "end", "strand"))
if (length(extra_cols) > 0) {
GenomicRanges::mcols(gr) <- bed_df[, extra_cols, drop = FALSE]
}
return(gr)
}
#' Sum scores of overlapping insertions per region
#'
#' @param insertions_gr GRanges object with insertions containing a 'score' metadata column
#' @param regions_gr GRanges object with regions
#' @return Numeric vector of summed scores per region
sum_overlap_scores <- function(insertions_gr, regions_gr) {
# Find overlaps between regions and insertions
overlaps <- GenomicRanges::findOverlaps(regions_gr, insertions_gr)
# If no overlaps, return zeros
if (length(overlaps) == 0) {
return(rep(0, length(regions_gr)))
}
# Extract the scores for overlapping insertions
scores <- GenomicRanges::mcols(insertions_gr)$score[S4Vectors::subjectHits(overlaps)]
# Sum scores per region using tapply
summed_scores <- tapply(scores, S4Vectors::queryHits(overlaps), sum)
# Create result vector with zeros for regions without overlaps
result <- rep(0, length(regions_gr))
result[as.integer(names(summed_scores))] <- summed_scores
return(result)
}
#' @param sampleid regulator_locus_tag used to get a set of replicates
combine_replicates_af = function(sampleid){
message(sprintf("working on sample_id: %s", sampleid))
sra_accession_list = chec_genomemap_meta %>%
filter(sample_id == sampleid) %>%
pull(sra_accession)
library_totals = mahendrawada_genome_map_ds %>%
filter(sra_accession %in% sra_accession_list) %>%
group_by(sra_accession) %>%
tally() %>%
collect()
replicate_region_counts = map(sra_accession_list, ~{
sra = .
insertions_gr = mahendrawada_genome_map_ds %>%
filter(sra_accession == sra) %>%
collect() %>%
dplyr::select(-sra_accession) %>%
bed_to_granges()
sum_overlap_scores(insertions_gr, regions_gr)
})
replicates = map(replicate_region_counts, ~{
replicate_regions = regions_gr
replicate_regions$score = .
replicate_regions
})
names(replicates) = sra_accession_list
combined = regions_gr
combined$score = Reduce(`+`, replicate_region_counts)
list(
library_total = library_totals,
replicates = replicates,
combined = combined
)
}
combine_control_af = function(){
library_totals = mahendrawada_control_ds %>%
group_by(sra_accession) %>%
tally() %>%
collect()
replicate_region_counts = map(freemnase_meta$sra_accession, ~{
sra = .
insertions_gr = mahendrawada_control_ds %>%
filter(sra_accession == sra) %>%
collect() %>%
dplyr::select(-sra_accession) %>%
bed_to_granges()
sum_overlap_scores(insertions_gr, regions_gr)
})
out = regions_gr
# combine replicate counts
out$score = Reduce(`+`, replicate_region_counts)
list(
library_totals = library_totals,
af = out
)
}
#' Calculate enrichment (calling cards effect)
#'
#' @param total_background_counts Total number of counts in background (scalar or vector)
#' @param total_experiment_counts Total number of counts in experiment (scalar or vector)
#' @param background_counts Number of counts in background per region (vector)
#' @param experiment_counts Number of counts in experiment per region (vector)
#' @param pseudocount Pseudocount to avoid division by zero (default: 0.1)
#' @return Enrichment values
calculate_enrichment <- function(total_background_counts,
total_experiment_counts,
background_counts,
experiment_counts,
pseudocount = 0.1) {
# Input validation
if (!all(is.numeric(c(total_background_counts, total_experiment_counts,
background_counts, experiment_counts)))) {
stop("All inputs must be numeric")
}
# Get the length of the region vectors
n_regions <- length(background_counts)
# Ensure experiment_counts is same length as background_counts
if (length(experiment_counts) != n_regions) {
stop("background_counts and experiment_counts must be the same length")
}
# Recycle scalar totals to match region length if needed
if (length(total_background_counts) == 1) {
total_background_counts <- rep(total_background_counts, n_regions)
}
if (length(total_experiment_counts) == 1) {
total_experiment_counts <- rep(total_experiment_counts, n_regions)
}
# Now check all are same length
if (length(total_background_counts) != n_regions ||
length(total_experiment_counts) != n_regions) {
stop("All input vectors must be the same length or scalars")
}
# Calculate enrichment
numerator <- experiment_counts / total_experiment_counts
denominator <- (background_counts + pseudocount) / total_background_counts
enrichment <- numerator / denominator
# Check for invalid values
if (any(enrichment < 0, na.rm = TRUE)) {
stop("Enrichment values must be non-negative")
}
if (any(is.na(enrichment))) {
stop("Enrichment values must not be NA")
}
if (any(is.infinite(enrichment))) {
stop("Enrichment values must not be infinite")
}
return(enrichment)
}
#' Calculate Poisson p-values
#'
#' @param total_background_counts Total number of counts in background (scalar or vector)
#' @param total_experiment_counts Total number of counts in experiment (scalar or vector)
#' @param background_counts Number of counts in background per region (vector)
#' @param experiment_counts Number of counts in experiment per region (vector)
#' @param pseudocount Pseudocount for lambda calculation (default: 0.1)
#' @param ... additional arguments to `ppois`. note that lower tail is set to FALSE
#' already
#' @return Poisson p-values
calculate_poisson_pval <- function(total_background_counts,
total_experiment_counts,
background_counts,
experiment_counts,
pseudocount = 0.1,
...) {
# Input validation
if (!all(is.numeric(c(total_background_counts, total_experiment_counts,
background_counts, experiment_counts)))) {
stop("All inputs must be numeric")
}
# Get the length of the region vectors
n_regions <- length(background_counts)
# Ensure experiment_counts is same length as background_counts
if (length(experiment_counts) != n_regions) {
stop("background_counts and experiment_counts must be the same length")
}
# Recycle scalar totals to match region length if needed
if (length(total_background_counts) == 1) {
total_background_counts <- rep(total_background_counts, n_regions)
}
if (length(total_experiment_counts) == 1) {
total_experiment_counts <- rep(total_experiment_counts, n_regions)
}
# Now check all are same length
if (length(total_background_counts) != n_regions ||
length(total_experiment_counts) != n_regions) {
stop("All input vectors must be the same length or scalars")
}
# Calculate hop ratio
hop_ratio <- total_experiment_counts / total_background_counts
# Calculate expected number of counts (mu/lambda parameter)
# Add pseudocount to avoid mu = 0
mu <- (background_counts + pseudocount) * hop_ratio
# Observed counts in experiment
x <- experiment_counts
# Calculate p-value: P(X >= x) = 1 - P(X < x) = 1 - P(X <= x-1)
# This is equivalent to: 1 - CDF(x) + PMF(x)
# Using the upper tail directly with lower.tail = FALSE
pval <- ppois(x - 1, lambda = mu, lower.tail = FALSE, ...)
return(pval)
}
#' Calculate hypergeometric p-values
#'
#' @param total_background_counts Total number of counts in background (scalar or vector)
#' @param total_experiment_counts Total number of counts in experiment (scalar or vector)
#' @param background_counts Number of counts in background per region (vector)
#' @param experiment_counts Number of counts in experiment per region (vector)
#' @param ... additional arguments to phyper. Note that lower tail is set to
#' false already
#' @return Hypergeometric p-values
calculate_hypergeom_pval <- function(total_background_counts,
total_experiment_counts,
background_counts,
experiment_counts,
...) {
# Input validation
if (!all(is.numeric(c(total_background_counts, total_experiment_counts,
background_counts, experiment_counts)))) {
stop("All inputs must be numeric")
}
# Get the length of the region vectors
n_regions <- length(background_counts)
# Ensure experiment_counts is same length as background_counts
if (length(experiment_counts) != n_regions) {
stop("background_counts and experiment_counts must be the same length")
}
# Recycle scalar totals to match region length if needed
if (length(total_background_counts) == 1) {
total_background_counts <- rep(total_background_counts, n_regions)
}
if (length(total_experiment_counts) == 1) {
total_experiment_counts <- rep(total_experiment_counts, n_regions)
}
# Now check all are same length
if (length(total_background_counts) != n_regions ||
length(total_experiment_counts) != n_regions) {
stop("All input vectors must be the same length or scalars")
}
# Hypergeometric parameters
# M: total number of balls (total counts)
M <- total_background_counts + total_experiment_counts
# n: number of white balls (experiment counts)
n <- total_experiment_counts
# N: number of draws (counts in region)
N <- background_counts + experiment_counts
# x: number of white balls drawn (experiment counts in region) - 1 for upper tail
x <- experiment_counts - 1
# Handle edge cases
valid <- (M >= 1) & (N >= 1)
pval <- rep(1, length(M))
# Calculate p-value for valid cases: P(X >= x) = 1 - P(X <= x-1)
if (any(valid)) {
pval[valid] <- phyper(x[valid], n[valid], M[valid] - n[valid], N[valid],
lower.tail = FALSE, ...)
}
return(pval)
}
#' Call peaks/quantify regions using calling cards approach
#'
#' @param sampleid regulator_locus_tag used to extract from annotated_features_counts
#' @param pseudocount Pseudocount for calculations (default: 0.1)
#' @return GRanges object with regions and statistics as metadata columns
enrichment_analysis <- function(sampleid,
background_counts,
total_background_counts,
pseudocount = 0.1) {
message(sprintf("Working on replicates for %s", sampleid))
counts_sampleid = annotated_feature_counts[[sampleid]]
replicate_quants = map(names(counts_sampleid$replicates), ~{
message(sprintf("Working on replicate: %s", .x))
gr = counts_sampleid$replicates[[.x]]
af = regions_gr
experiment_counts = gr$score
total_experiment_counts = counts_sampleid$library_total %>%
filter(sra_accession == .x) %>%
pull(n)
# Calculate statistics
GenomicRanges::mcols(af)$enrichment <- calculate_enrichment(
total_background_counts = total_background_counts,
total_experiment_counts = total_experiment_counts,
background_counts = background_counts,
experiment_counts = experiment_counts,
pseudocount = pseudocount
)
GenomicRanges::mcols(af)$poisson_pval <- calculate_poisson_pval(
total_background_counts = total_background_counts,
total_experiment_counts = total_experiment_counts,
background_counts = background_counts,
experiment_counts = experiment_counts,
pseudocount = pseudocount
)
GenomicRanges::mcols(af)$log_poisson_pval <- calculate_poisson_pval(
total_background_counts = total_background_counts,
total_experiment_counts = total_experiment_counts,
background_counts = background_counts,
experiment_counts = experiment_counts,
pseudocount = pseudocount,
log.p = TRUE
)
GenomicRanges::mcols(af)$hypergeometric_pval <- calculate_hypergeom_pval(
total_background_counts = total_background_counts,
total_experiment_counts = total_experiment_counts,
background_counts = background_counts,
experiment_counts = experiment_counts
)
GenomicRanges::mcols(af)$log_hypergeometric_pval <- calculate_hypergeom_pval(
total_background_counts = total_background_counts,
total_experiment_counts = total_experiment_counts,
background_counts = background_counts,
experiment_counts = experiment_counts,
log.p = TRUE
)
# Calculate adjusted p-values
GenomicRanges::mcols(af)$poisson_qval <- p.adjust(GenomicRanges::mcols(af)$poisson_pval, method = "fdr")
GenomicRanges::mcols(af)$hypergeometric_qval <- p.adjust(GenomicRanges::mcols(af)$hypergeometric_pval, method = "fdr")
af
})
names(replicate_quants) = names(counts_sampleid$replicates)
message(sprintf("Working on the combined for sample_id %s", sampleid))
combined_gr = regions_gr
combined_experiment_counts = counts_sampleid$combined$score
combined_total_experiment_counts = sum(counts_sampleid$library_total$n)
# Calculate statistics
GenomicRanges::mcols(combined_gr)$enrichment <- calculate_enrichment(
total_background_counts = total_background_counts,
total_experiment_counts = combined_total_experiment_counts,
background_counts = background_counts,
experiment_counts = combined_experiment_counts,
pseudocount = pseudocount
)
message("Calculating Poisson p-values...")
GenomicRanges::mcols(combined_gr)$poisson_pval <- calculate_poisson_pval(
total_background_counts = total_background_counts,
total_experiment_counts = combined_total_experiment_counts,
background_counts = background_counts,
experiment_counts = combined_experiment_counts,
pseudocount = pseudocount
)
GenomicRanges::mcols(combined_gr)$log_poisson_pval <- calculate_poisson_pval(
total_background_counts = total_background_counts,
total_experiment_counts = combined_total_experiment_counts,
background_counts = background_counts,
experiment_counts = combined_experiment_counts,
pseudocount = pseudocount,
log.p = TRUE
)
message("Calculating hypergeometric p-values...")
GenomicRanges::mcols(combined_gr)$hypergeometric_pval <- calculate_hypergeom_pval(
total_background_counts = total_background_counts,
total_experiment_counts = combined_total_experiment_counts,
background_counts = background_counts,
experiment_counts = combined_experiment_counts
)
GenomicRanges::mcols(combined_gr)$log_hypergeometric_pval <- calculate_hypergeom_pval(
total_background_counts = total_background_counts,
total_experiment_counts = combined_total_experiment_counts,
background_counts = background_counts,
experiment_counts = combined_experiment_counts,
log.p = TRUE
)
# Calculate adjusted p-values
message("Calculating adjusted p-values...")
GenomicRanges::mcols(combined_gr)$poisson_qval <- p.adjust(GenomicRanges::mcols(combined_gr)$poisson_pval, method = "fdr")
GenomicRanges::mcols(combined_gr)$hypergeometric_qval <- p.adjust(GenomicRanges::mcols(combined_gr)$hypergeometric_pval, method = "fdr")
message("Analysis complete!")
list(
replicates = replicate_quants,
combined = combined_gr
)
}
genomic_features = arrow::read_parquet("~/code/hf/yeast_genome_resources/brentlab_features.parquet")
chec_genomemap_meta = arrow::read_parquet(
"~/code/hf/mahendrawada_2025/chec_genome_map_meta.parquet")
freemnase_meta = arrow::read_parquet(
"~/code/hf/mahendrawada_2025/chec_genome_map_control_meta.parquet")
mahendrawada_genome_map_ds = arrow::open_dataset(
"~/code/hf/mahendrawada_2025/chec_genome_map")
mahendrawada_control_ds = arrow::open_dataset(
"~/code/hf/mahendrawada_2025/chec_genome_map_control")
samplid_list = chec_genomemap_meta %>%
pull(sample_id) %>%
unique()
regions_gr <- read_tsv(
"~/code/hf/yeast_genome_resources/yiming_promoters.bed",
col_names = c('chr', 'start', 'end', 'locus_tag', 'score', 'strand')) %>%
bed_to_granges()
m2025_control = combine_control_af()
annotated_feature_counts = map(samplid_list, combine_replicates_af)
names(annotated_feature_counts) = samplid_list
annotated_feature_quants = map(
samplid_list, ~{
enrichment_analysis(
.x,
m2025_control$af$score,
sum(m2025_control$library_totals$n)
)
}
)
names(annotated_feature_quants) = samplid_list
sra_accession_for_quants = map(annotated_feature_quants, ~names(.x$replicates))
annotated_features_quants_replicates =
map(annotated_feature_quants, ~{
map(.x$replicates, as_tibble) %>%
list_rbind(names_to = "sra_accession")}) %>%
list_rbind(names_to = "sample_id") %>%
mutate(sample_id = as.integer(sample_id)) %>%
arrange(sample_id) %>%
select(-sample_id) %>%
left_join(select(genomic_features, locus_tag, symbol)) %>%
dplyr::rename(target_locus_tag = locus_tag, target_symbol = symbol) %>%
dplyr::relocate(sra_accession, target_locus_tag, target_symbol) %>%
select(-score)
annotated_features_quants_replicates %>%
write_parquet("~/code/hf/mahendrawada_2025/chec_mahendrawada_m2025_af_replicates.parquet",
compression = "zstd",
write_statistics = TRUE,
chunk_size = 6708,
use_dictionary = c(
sra_accession = TRUE,
seqnames = TRUE,
target_locus_tag = TRUE,
target_symbol = TRUE
)
)
annotated_feature_quants_combined =
map(annotated_feature_quants, ~as_tibble(.x$combined)) %>%
list_rbind(names_to = "sample_id") %>%
mutate(sample_id = as.integer(sample_id)) %>%
arrange(sample_id) %>%
left_join(select(genomic_features, locus_tag, symbol)) %>%
dplyr::rename(target_locus_tag = locus_tag, target_symbol = symbol) %>%
dplyr::relocate(sample_id, target_locus_tag, target_symbol) %>%
select(-score)
annotated_feature_quants_combined %>%
write_parquet("~/code/hf/mahendrawada_2025/chec_mahendrawada_m2025_af_combined.parquet",
compression = "zstd",
write_statistics = TRUE,
chunk_size = 6708,
use_dictionary = c(
sample_id = TRUE,
seqnames = TRUE,
target_locus_tag = TRUE,
target_symbol = TRUE
)
)