# Quantifies enrichment of insertions/hops in genomic regions # # This script: # 1. Counts insertions overlapping each genomic region (experiment and background) # 2. Calculates enrichment scores # 3. Computes z-score, Poisson and hypergeometric p-values # # Works with any data in BED3+ format (chr, start, end, ...) # For calling cards: each insertion is counted once regardless of depth # # COORDINATE SYSTEMS: # - Input BED files are assumed to be 0-indexed, half-open [start, end) # - GenomicRanges uses 1-indexed, closed [start, end] # - Conversion: GR_start = BED_start + 1, GR_end = BED_end library(tidyverse) library(GenomicRanges) # Statistical Functions --------------------------------------------------- #' Calculate enrichment (calling cards effect) #' #' @param total_background_hops Total number of hops in background (scalar or vector) #' @param total_experiment_hops Total number of hops in experiment (scalar or vector) #' @param background_hops Number of hops in background per region (vector) #' @param experiment_hops Number of hops in experiment per region (vector) #' @param pseudocount Pseudocount to avoid division by zero (default: 0.1) #' @return Enrichment values calculate_enrichment <- function(total_background_hops, total_experiment_hops, background_hops, experiment_hops, pseudocount = 0.1) { # Input validation if (!all(is.numeric(c(total_background_hops, total_experiment_hops, background_hops, experiment_hops)))) { stop("All inputs must be numeric") } # Get the length of the region vectors n_regions <- length(background_hops) # Ensure experiment_hops is same length as background_hops if (length(experiment_hops) != n_regions) { stop("background_hops and experiment_hops must be the same length") } # Recycle scalar totals to match region length if needed if (length(total_background_hops) == 1) { total_background_hops <- rep(total_background_hops, n_regions) } if (length(total_experiment_hops) == 1) { total_experiment_hops <- rep(total_experiment_hops, n_regions) } # Now check all are same length if (length(total_background_hops) != n_regions || length(total_experiment_hops) != n_regions) { stop("All input vectors must be the same length or scalars") } # Calculate enrichment numerator <- experiment_hops / total_experiment_hops denominator <- (background_hops + pseudocount) / total_background_hops enrichment <- numerator / denominator # Check for invalid values if (any(enrichment < 0, na.rm = TRUE)) { stop("Enrichment values must be non-negative") } if (any(is.na(enrichment))) { stop("Enrichment values must not be NA") } if (any(is.infinite(enrichment))) { stop("Enrichment values must not be infinite") } return(enrichment) } #' Calculate Poisson p-values #' #' @param total_background_hops Total number of hops in background (scalar or vector) #' @param total_experiment_hops Total number of hops in experiment (scalar or vector) #' @param background_hops Number of hops in background per region (vector) #' @param experiment_hops Number of hops in experiment per region (vector) #' @param pseudocount Pseudocount for lambda calculation (default: 0.1) #' @param ... additional arguments to `ppois`. note that lower tail is set to FALSE #' already #' @return Poisson p-values calculate_poisson_pval <- function(total_background_hops, total_experiment_hops, background_hops, experiment_hops, pseudocount = 0.1, ...) { # Input validation if (!all(is.numeric(c(total_background_hops, total_experiment_hops, background_hops, experiment_hops)))) { stop("All inputs must be numeric") } # Get the length of the region vectors n_regions <- length(background_hops) # Ensure experiment_hops is same length as background_hops if (length(experiment_hops) != n_regions) { stop("background_hops and experiment_hops must be the same length") } # Recycle scalar totals to match region length if needed if (length(total_background_hops) == 1) { total_background_hops <- rep(total_background_hops, n_regions) } if (length(total_experiment_hops) == 1) { total_experiment_hops <- rep(total_experiment_hops, n_regions) } # Now check all are same length if (length(total_background_hops) != n_regions || length(total_experiment_hops) != n_regions) { stop("All input vectors must be the same length or scalars") } # Calculate hop ratio hop_ratio <- total_experiment_hops / total_background_hops # Calculate expected number of hops (mu/lambda parameter) # Add pseudocount to avoid mu = 0 mu <- (background_hops + pseudocount) * hop_ratio # Observed hops in experiment x <- experiment_hops # Calculate p-value: P(X >= x) = 1 - P(X < x) = 1 - P(X <= x-1) # This is equivalent to: 1 - CDF(x) + PMF(x) # Using the upper tail directly with lower.tail = FALSE pval <- ppois(x - 1, lambda = mu, lower.tail = FALSE, ...) return(pval) } #' Calculate hypergeometric p-values #' #' @param total_background_hops Total number of hops in background (scalar or vector) #' @param total_experiment_hops Total number of hops in experiment (scalar or vector) #' @param background_hops Number of hops in background per region (vector) #' @param experiment_hops Number of hops in experiment per region (vector) #' @param ... additional arguments to phyper. Note that lower tail is set to #' false already #' @return Hypergeometric p-values calculate_hypergeom_pval <- function(total_background_hops, total_experiment_hops, background_hops, experiment_hops, ...) { # Input validation if (!all(is.numeric(c(total_background_hops, total_experiment_hops, background_hops, experiment_hops)))) { stop("All inputs must be numeric") } # Get the length of the region vectors n_regions <- length(background_hops) # Ensure experiment_hops is same length as background_hops if (length(experiment_hops) != n_regions) { stop("background_hops and experiment_hops must be the same length") } # Recycle scalar totals to match region length if needed if (length(total_background_hops) == 1) { total_background_hops <- rep(total_background_hops, n_regions) } if (length(total_experiment_hops) == 1) { total_experiment_hops <- rep(total_experiment_hops, n_regions) } # Now check all are same length if (length(total_background_hops) != n_regions || length(total_experiment_hops) != n_regions) { stop("All input vectors must be the same length or scalars") } # Hypergeometric parameters # M: total number of balls (total hops) M <- total_background_hops + total_experiment_hops # n: number of white balls (experiment hops) n <- total_experiment_hops # N: number of draws (hops in region) N <- background_hops + experiment_hops # x: number of white balls drawn (experiment hops in region) - 1 for upper tail x <- experiment_hops - 1 # Handle edge cases valid <- (M >= 1) & (N >= 1) pval <- rep(1, length(M)) # Calculate p-value for valid cases: P(X >= x) = 1 - P(X <= x-1) if (any(valid)) { pval[valid] <- phyper(x[valid], n[valid], M[valid] - n[valid], N[valid], lower.tail = FALSE, ...) } return(pval) } # GRanges Conversion Functions -------------------------------------------- #' Convert BED format data frame to GRanges #' #' Handles coordinate system conversion from 0-indexed half-open BED format #' to 1-indexed closed GenomicRanges format #' #' @param bed_df Data frame with chr, start, end columns in BED format (0-indexed, half-open) #' @param zero_indexed Logical, whether input is 0-indexed (default: TRUE) #' @return GRanges object bed_to_granges <- function(bed_df, zero_indexed = TRUE) { if (!all(c("chr", "start", "end") %in% names(bed_df))) { stop("bed_df must have columns: chr, start, end") } # Convert from 0-indexed half-open [start, end) to 1-indexed closed [start, end] if (zero_indexed) { gr_start <- bed_df$start + 1 gr_end <- bed_df$end } else { gr_start <- bed_df$start gr_end <- bed_df$end } # Create GRanges object (strand-agnostic for calling cards) gr <- GRanges( seqnames = bed_df$chr, ranges = IRanges(start = gr_start, end = gr_end), strand = "*" ) # Add any additional metadata columns extra_cols <- setdiff(names(bed_df), c("chr", "start", "end", "strand")) if (length(extra_cols) > 0) { mcols(gr) <- bed_df[, extra_cols, drop = FALSE] } return(gr) } #' Deduplicate insertions in GRanges object #' #' For calling cards, if an insertion is found at the same coordinate, #' only one record is retained #' #' @param gr GRanges object #' @return Deduplicated GRanges object deduplicate_granges <- function(gr) { # Find unique ranges (ignores strand and metadata) unique_ranges <- !duplicated(granges(gr)) gr[unique_ranges] } #' Count overlaps between insertions and regions #' #' @param insertions_gr GRanges object with insertions #' @param regions_gr GRanges object with regions #' @param deduplicate Whether to deduplicate insertions (default: TRUE) #' @return Integer vector of overlap counts per region count_overlaps <- function(insertions_gr, regions_gr, deduplicate = TRUE) { # Deduplicate if requested if (deduplicate) { n_before <- length(insertions_gr) insertions_gr <- deduplicate_granges(insertions_gr) n_after <- length(insertions_gr) if (n_before != n_after) { message(" Deduplicated: ", n_before, " -> ", n_after, " (removed ", n_before - n_after, " duplicates)") } } # Count overlaps per region # countOverlaps returns an integer vector with one element per region counts <- countOverlaps(regions_gr, insertions_gr) return(counts) } # Main Analysis Function -------------------------------------------------- #' Call peaks/quantify regions using calling cards approach #' #' @param experiment_gr GRanges object with experiment insertions #' @param background_gr GRanges object with background insertions #' @param regions_gr GRanges object with regions to quantify #' @param deduplicate_experiment Whether to deduplicate experiment insertions (default: TRUE) #' @param pseudocount Pseudocount for calculations (default: 0.1) #' @return GRanges object with regions and statistics as metadata columns enrichment_analysis <- function(experiment_gr, background_gr, regions_gr, deduplicate_experiment = TRUE, pseudocount = 0.1) { message("Starting enrichment analysis...") # Validate inputs if (!inherits(experiment_gr, "GRanges")) { stop("experiment_gr must be a GRanges object") } if (!inherits(background_gr, "GRanges")) { stop("background_gr must be a GRanges object") } if (!inherits(regions_gr, "GRanges")) { stop("regions_gr must be a GRanges object") } # Count overlaps for experiment (with deduplication if requested) message("Counting experiment overlaps...") if (deduplicate_experiment) { message(" Deduplication: ON") } else { message(" Deduplication: OFF") } experiment_counts <- count_overlaps( experiment_gr, regions_gr, deduplicate = deduplicate_experiment ) # Count overlaps for background (never deduplicated) message("Counting background overlaps...") message(" Deduplication: OFF (background should not be deduplicated)") background_counts <- count_overlaps( background_gr, regions_gr, deduplicate = FALSE ) # Calculate total hops AFTER any deduplication if (deduplicate_experiment) { experiment_gr_dedup <- deduplicate_granges(experiment_gr) total_experiment_hops <- length(experiment_gr_dedup) } else { total_experiment_hops <- length(experiment_gr) } total_background_hops <- length(background_gr) message("Total experiment hops: ", total_experiment_hops) message("Total background hops: ", total_background_hops) if (total_experiment_hops == 0) { stop("Experiment data is empty") } if (total_background_hops == 0) { stop("Background data is empty") } # Add counts and totals as metadata columns mcols(regions_gr)$experiment_hops <- as.integer(experiment_counts) mcols(regions_gr)$background_hops <- as.integer(background_counts) mcols(regions_gr)$total_experiment_hops <- as.integer(total_experiment_hops) mcols(regions_gr)$total_background_hops <- as.integer(total_background_hops) # Calculate statistics message("Calculating enrichment scores...") mcols(regions_gr)$callingcards_enrichment <- calculate_enrichment( total_background_hops = total_background_hops, total_experiment_hops = total_experiment_hops, background_hops = background_counts, experiment_hops = experiment_counts, pseudocount = pseudocount ) message("Calculating Poisson p-values...") mcols(regions_gr)$poisson_pval <- calculate_poisson_pval( total_background_hops = total_background_hops, total_experiment_hops = total_experiment_hops, background_hops = background_counts, experiment_hops = experiment_counts, pseudocount = pseudocount ) message("Calculating log Poisson p-values...") mcols(regions_gr)$log_poisson_pval <- calculate_poisson_pval( total_background_hops = total_background_hops, total_experiment_hops = total_experiment_hops, background_hops = background_counts, experiment_hops = experiment_counts, pseudocount = pseudocount, log.p = TRUE ) message("Calculating hypergeometric p-values...") mcols(regions_gr)$hypergeometric_pval <- calculate_hypergeom_pval( total_background_hops = total_background_hops, total_experiment_hops = total_experiment_hops, background_hops = background_counts, experiment_hops = experiment_counts ) # Calculate adjusted p-values message("Calculating adjusted p-values...") mcols(regions_gr)$poisson_qval <- p.adjust(mcols(regions_gr)$poisson_pval, method = "fdr") mcols(regions_gr)$hypergeometric_qval <- p.adjust(mcols(regions_gr)$hypergeometric_pval, method = "fdr") message("Analysis complete!") return(regions_gr) } # Example Usage ----------------------------------------------------------- # This is a template for how to use these functions # Uncomment and modify for your actual data # # Load your data (BED3+ format: chr, start, end, ...) experiment_gr = arrow::open_dataset("~/code/hf/barkai_compendium/genome_map") accessions <- experiment_gr |> dplyr::select(accession) |> dplyr::distinct() |> dplyr::collect() |> dplyr::pull(accession) tmp_acc = experiment_gr %>% filter(accession==accessions[1]) %>% collect() mahendrawada_control_data_root = "~/projects/parsing_yeast_database_data/data/mahendrawada_chec" background_gr_h_m_paths = list.files(mahendrawada_control_data_root) background_gr_h_m = map(file.path(mahendrawada_control_data_root, background_gr_h_m_paths), rtracklayer::import) names(background_gr_h_m) = str_remove(background_gr_h_m_paths, "_REP1.mLb.mkD.sorted_5p.bed") regions_gr <- read_tsv("~/code/hf/yeast_genome_resources/yiming_promoters.bed", col_names = c('chr', 'start', 'end', 'locus_tag', 'score', 'strand')) %>% bed_to_granges() # # Run analysis with deduplication (default for calling cards) results <- enrichment_analysis( experiment_gr = experiment_gr, background_gr = background_gr, regions_gr = regions_gr, deduplicate_experiment = TRUE, pseudocount = 0.1 ) # id 9 corresponds to the binding sample -- can get from genome_map and # annotated_feature metadata # # NOTE: there are some expected differences due to a change in how I am handling # the promoter boundaries. The implementation here is correct -- please use # this from now on. If you need to compare or doubt something, please let # me konw # # curr_db_annotated_feature = arrow::read_parquet("~/code/hf/callingcards/annotated_features/batch=run_5801/part-0.parquet") %>% # filter(id == 9) # # comp_df = curr_db_annotated_feature %>% # select(target_locus_tag, experiment_hops, # background_hops, background_total_hops, # experiment_total_hops) %>% # left_join(results %>% # as_tibble() %>% # select(locus_tag, total_background_hops, # total_experiment_hops, # experiment_hops, background_hops) %>% # dplyr::rename(target_locus_tag = locus_tag, # new_exp_hops = experiment_hops, # new_bg_hops = background_hops, # new_bg_total = total_background_hops, # new_expr_total = total_experiment_hops))