library(tidyverse) library(arrow) library(here) library(yaml) #' Convert BED format data frame to GRanges #' #' Handles coordinate system conversion from 0-indexed half-open BED format #' to 1-indexed closed GenomicRanges format #' #' @param bed_df Data frame with chr, start, end columns in BED format (0-indexed, half-open) #' @param zero_indexed Logical, whether input is 0-indexed (default: TRUE) #' @return GRanges object bed_to_granges <- function(bed_df, zero_indexed = TRUE) { if (!all(c("chr", "start", "end") %in% names(bed_df))) { stop("bed_df must have columns: chr, start, end") } # Convert from 0-indexed half-open [start, end) to 1-indexed closed [start, end] if (zero_indexed) { gr_start <- bed_df$start + 1 gr_end <- bed_df$end } else { gr_start <- bed_df$start gr_end <- bed_df$end } # Create GRanges object (strand-agnostic for calling cards) gr <- GenomicRanges::GRanges( seqnames = bed_df$chr, ranges = IRanges::IRanges(start = gr_start, end = gr_end), strand = "*" ) # Add any additional metadata columns extra_cols <- setdiff(names(bed_df), c("chr", "start", "end", "strand")) if (length(extra_cols) > 0) { GenomicRanges::mcols(gr) <- bed_df[, extra_cols, drop = FALSE] } return(gr) } #' Convert point-wise coverage to BED format #' #' @param coverage_df Data frame with chr, pos, pileup columns #' @return Data frame in BED format with chr, start, end, score coverage_to_bed <- function(coverage_df) { coverage_df %>% dplyr::rename(start = pos, score = pileup) %>% dplyr::mutate(end = start + 1) %>% # pos is 0-indexed, end is exclusive dplyr::select(chr, start, end, score) } #' Sum scores of overlapping insertions per region #' #' @param insertions_gr GRanges object with insertions containing a 'score' metadata column #' @param regions_gr GRanges object with regions #' @return Numeric vector of summed scores per region sum_overlap_scores <- function(insertions_gr, regions_gr) { # Find overlaps between regions and insertions overlaps <- GenomicRanges::findOverlaps(regions_gr, insertions_gr) # If no overlaps, return zeros if (length(overlaps) == 0) { return(rep(0, length(regions_gr))) } # Extract the scores for overlapping insertions scores <- GenomicRanges::mcols(insertions_gr)$score[S4Vectors::subjectHits(overlaps)] # Sum scores per region using tapply summed_scores <- tapply(scores, S4Vectors::queryHits(overlaps), sum) # Create result vector with zeros for regions without overlaps result <- rep(0, length(regions_gr)) result[as.integer(names(summed_scores))] <- summed_scores return(result) } #' Combine replicates for a given regulator #' #' @param sample_set_id sample_id that identifies a set of replicates #' @param genomecov_data List containing meta and ds (tagged experiment data) #' @param regions_gr GRanges object with regions to quantify combine_replicates_af <- function(sample_set_id, genomecov_data, regions_gr) { message(sprintf("Working on sample_id: %s", sample_set_id)) run_accession_list <- genomecov_data$meta %>% filter(sample_id == sample_set_id) %>% pull(run_accession) library_totals <- genomecov_data$ds %>% filter(accession %in% run_accession_list) %>% group_by(accession) %>% summarise(n = sum(pileup, na.rm = TRUE)) %>% collect() replicate_region_counts <- map(run_accession_list, ~{ run_acc <- .x coverage_gr <- genomecov_data$ds %>% filter(accession == run_acc) %>% collect() %>% coverage_to_bed() %>% bed_to_granges() sum_overlap_scores(coverage_gr, regions_gr) }) replicates <- map2(replicate_region_counts, run_accession_list, ~{ replicate_regions <- regions_gr replicate_regions$score <- .x replicate_regions }) names(replicates) <- run_accession_list combined <- regions_gr combined$score <- Reduce(`+`, replicate_region_counts) list( library_total = library_totals, replicates = replicates, combined = combined ) } #' Combine control samples #' #' @param genomecov_control List containing meta and ds (control data) #' @param regions_gr GRanges object with regions to quantify combine_control_af <- function(genomecov_control, regions_gr) { message("Processing control samples...") library_totals <- genomecov_control$ds %>% group_by(accession) %>% summarise(n = sum(pileup, na.rm = TRUE)) %>% collect() replicate_region_counts <- map(genomecov_control$meta$accession, ~{ run_acc <- .x coverage_gr <- genomecov_control$ds %>% filter(accession == run_acc) %>% collect() %>% coverage_to_bed() %>% bed_to_granges() sum_overlap_scores(coverage_gr, regions_gr) }) out <- regions_gr out$score <- Reduce(`+`, replicate_region_counts) list( library_totals = library_totals, af = out ) } #' Calculate enrichment (calling cards effect) #' #' @param total_background_counts Total number of counts in background (scalar or vector) #' @param total_experiment_counts Total number of counts in experiment (scalar or vector) #' @param background_counts Number of counts in background per region (vector) #' @param experiment_counts Number of counts in experiment per region (vector) #' @param pseudocount Pseudocount to avoid division by zero (default: 0.1) #' @return Enrichment values calculate_enrichment <- function(total_background_counts, total_experiment_counts, background_counts, experiment_counts, pseudocount = 0.1) { # Input validation if (!all(is.numeric(c(total_background_counts, total_experiment_counts, background_counts, experiment_counts)))) { stop("All inputs must be numeric") } # Get the length of the region vectors n_regions <- length(background_counts) # Ensure experiment_counts is same length as background_counts if (length(experiment_counts) != n_regions) { stop("background_counts and experiment_counts must be the same length") } # Recycle scalar totals to match region length if needed if (length(total_background_counts) == 1) { total_background_counts <- rep(total_background_counts, n_regions) } if (length(total_experiment_counts) == 1) { total_experiment_counts <- rep(total_experiment_counts, n_regions) } # Now check all are same length if (length(total_background_counts) != n_regions || length(total_experiment_counts) != n_regions) { stop("All input vectors must be the same length or scalars") } # Calculate enrichment numerator <- experiment_counts / total_experiment_counts denominator <- (background_counts + pseudocount) / total_background_counts enrichment <- numerator / denominator # Check for invalid values if (any(enrichment < 0, na.rm = TRUE)) { stop("Enrichment values must be non-negative") } if (any(is.na(enrichment))) { stop("Enrichment values must not be NA") } if (any(is.infinite(enrichment))) { stop("Enrichment values must not be infinite") } return(enrichment) } #' Calculate Poisson p-values #' #' @param total_background_counts Total number of counts in background (scalar or vector) #' @param total_experiment_counts Total number of counts in experiment (scalar or vector) #' @param background_counts Number of counts in background per region (vector) #' @param experiment_counts Number of counts in experiment per region (vector) #' @param pseudocount Pseudocount for lambda calculation (default: 0.1) #' @param ... additional arguments to `ppois`. note that lower tail is set to FALSE already #' @return Poisson p-values calculate_poisson_pval <- function(total_background_counts, total_experiment_counts, background_counts, experiment_counts, pseudocount = 0.1, ...) { # Input validation if (!all(is.numeric(c(total_background_counts, total_experiment_counts, background_counts, experiment_counts)))) { stop("All inputs must be numeric") } # Get the length of the region vectors n_regions <- length(background_counts) # Ensure experiment_counts is same length as background_counts if (length(experiment_counts) != n_regions) { stop("background_counts and experiment_counts must be the same length") } # Recycle scalar totals to match region length if needed if (length(total_background_counts) == 1) { total_background_counts <- rep(total_background_counts, n_regions) } if (length(total_experiment_counts) == 1) { total_experiment_counts <- rep(total_experiment_counts, n_regions) } # Now check all are same length if (length(total_background_counts) != n_regions || length(total_experiment_counts) != n_regions) { stop("All input vectors must be the same length or scalars") } # Calculate hop ratio hop_ratio <- total_experiment_counts / total_background_counts # Calculate expected number of counts (mu/lambda parameter) # Add pseudocount to avoid mu = 0 mu <- (background_counts + pseudocount) * hop_ratio # Observed counts in experiment x <- experiment_counts # Calculate p-value: P(X >= x) = 1 - P(X < x) = 1 - P(X <= x-1) pval <- ppois(x - 1, lambda = mu, lower.tail = FALSE, ...) return(pval) } #' Calculate hypergeometric p-values #' #' @param total_background_counts Total number of counts in background (scalar or vector) #' @param total_experiment_counts Total number of counts in experiment (scalar or vector) #' @param background_counts Number of counts in background per region (vector) #' @param experiment_counts Number of counts in experiment per region (vector) #' @param ... additional arguments to phyper. Note that lower tail is set to false already #' @return Hypergeometric p-values calculate_hypergeom_pval <- function(total_background_counts, total_experiment_counts, background_counts, experiment_counts, ...) { # Input validation if (!all(is.numeric(c(total_background_counts, total_experiment_counts, background_counts, experiment_counts)))) { stop("All inputs must be numeric") } # Get the length of the region vectors n_regions <- length(background_counts) # Ensure experiment_counts is same length as background_counts if (length(experiment_counts) != n_regions) { stop("background_counts and experiment_counts must be the same length") } # Recycle scalar totals to match region length if needed if (length(total_background_counts) == 1) { total_background_counts <- rep(total_background_counts, n_regions) } if (length(total_experiment_counts) == 1) { total_experiment_counts <- rep(total_experiment_counts, n_regions) } # Now check all are same length if (length(total_background_counts) != n_regions || length(total_experiment_counts) != n_regions) { stop("All input vectors must be the same length or scalars") } # Hypergeometric parameters M <- total_background_counts + total_experiment_counts n <- total_experiment_counts N <- background_counts + experiment_counts x <- experiment_counts - 1 # Handle edge cases valid <- (M >= 1) & (N >= 1) pval <- rep(1, length(M)) # Calculate p-value for valid cases if (any(valid)) { pval[valid] <- phyper(x[valid], n[valid], M[valid] - n[valid], N[valid], lower.tail = FALSE, ...) } return(pval) } #' Call peaks/quantify regions using calling cards approach #' #' @param sample_set_id sample_id that identifies a set of replicates #' @param background_counts Vector of background counts per region #' @param total_background_counts Total background counts (scalar) #' @param annotated_feature_counts List of combined replicate data #' @param regions_gr GRanges object with regions #' @param pseudocount Pseudocount for calculations (default: 0.1) #' @return List with replicates and combined quantifications enrichment_analysis <- function(sample_set_id, background_counts, total_background_counts, annotated_feature_counts, regions_gr, pseudocount = 0.1) { message(sprintf("Working on sample_id for %s", sample_set_id)) counts_regulator <- annotated_feature_counts[[as.character(sample_set_id)]] replicate_quants <- map(names(counts_regulator$replicates), ~{ message(sprintf("Working on replicate: %s", .x)) gr <- counts_regulator$replicates[[.x]] af <- regions_gr experiment_counts <- gr$score total_experiment_counts <- counts_regulator$library_total %>% filter(accession == .x) %>% pull(n) # Add count columns GenomicRanges::mcols(af)$background_counts <- background_counts GenomicRanges::mcols(af)$experiment_counts <- experiment_counts GenomicRanges::mcols(af)$total_background_counts <- total_background_counts GenomicRanges::mcols(af)$total_experiment_counts <- total_experiment_counts # Calculate statistics GenomicRanges::mcols(af)$enrichment <- calculate_enrichment( total_background_counts = total_background_counts, total_experiment_counts = total_experiment_counts, background_counts = background_counts, experiment_counts = experiment_counts, pseudocount = pseudocount ) GenomicRanges::mcols(af)$poisson_pval <- calculate_poisson_pval( total_background_counts = total_background_counts, total_experiment_counts = total_experiment_counts, background_counts = background_counts, experiment_counts = experiment_counts, pseudocount = pseudocount ) GenomicRanges::mcols(af)$log_poisson_pval <- calculate_poisson_pval( total_background_counts = total_background_counts, total_experiment_counts = total_experiment_counts, background_counts = background_counts, experiment_counts = experiment_counts, pseudocount = pseudocount, log.p = TRUE ) GenomicRanges::mcols(af)$hypergeometric_pval <- calculate_hypergeom_pval( total_background_counts = total_background_counts, total_experiment_counts = total_experiment_counts, background_counts = background_counts, experiment_counts = experiment_counts ) GenomicRanges::mcols(af)$log_hypergeometric_pval <- calculate_hypergeom_pval( total_background_counts = total_background_counts, total_experiment_counts = total_experiment_counts, background_counts = background_counts, experiment_counts = experiment_counts, log.p = TRUE ) # Calculate adjusted p-values GenomicRanges::mcols(af)$poisson_qval <- p.adjust( GenomicRanges::mcols(af)$poisson_pval, method = "fdr") GenomicRanges::mcols(af)$hypergeometric_qval <- p.adjust( GenomicRanges::mcols(af)$hypergeometric_pval, method = "fdr") af }) names(replicate_quants) <- names(counts_regulator$replicates) message(sprintf("Working on the combined for sample_id %s", sample_set_id)) combined_gr <- regions_gr combined_experiment_counts <- counts_regulator$combined$score combined_total_experiment_counts <- sum(counts_regulator$library_total$n) # Add count columns GenomicRanges::mcols(combined_gr)$background_counts <- background_counts GenomicRanges::mcols(combined_gr)$experiment_counts <- combined_experiment_counts GenomicRanges::mcols(combined_gr)$total_background_counts <- total_background_counts GenomicRanges::mcols(combined_gr)$total_experiment_counts <- combined_total_experiment_counts # Calculate statistics GenomicRanges::mcols(combined_gr)$enrichment <- calculate_enrichment( total_background_counts = total_background_counts, total_experiment_counts = combined_total_experiment_counts, background_counts = background_counts, experiment_counts = combined_experiment_counts, pseudocount = pseudocount ) message("Calculating Poisson p-values...") GenomicRanges::mcols(combined_gr)$poisson_pval <- calculate_poisson_pval( total_background_counts = total_background_counts, total_experiment_counts = combined_total_experiment_counts, background_counts = background_counts, experiment_counts = combined_experiment_counts, pseudocount = pseudocount ) GenomicRanges::mcols(combined_gr)$log_poisson_pval <- calculate_poisson_pval( total_background_counts = total_background_counts, total_experiment_counts = combined_total_experiment_counts, background_counts = background_counts, experiment_counts = combined_experiment_counts, pseudocount = pseudocount, log.p = TRUE ) message("Calculating hypergeometric p-values...") GenomicRanges::mcols(combined_gr)$hypergeometric_pval <- calculate_hypergeom_pval( total_background_counts = total_background_counts, total_experiment_counts = combined_total_experiment_counts, background_counts = background_counts, experiment_counts = combined_experiment_counts ) GenomicRanges::mcols(combined_gr)$log_hypergeometric_pval <- calculate_hypergeom_pval( total_background_counts = total_background_counts, total_experiment_counts = combined_total_experiment_counts, background_counts = background_counts, experiment_counts = combined_experiment_counts, log.p = TRUE ) # Calculate adjusted p-values message("Calculating adjusted p-values...") GenomicRanges::mcols(combined_gr)$poisson_qval <- p.adjust( GenomicRanges::mcols(combined_gr)$poisson_pval, method = "fdr") GenomicRanges::mcols(combined_gr)$hypergeometric_qval <- p.adjust( GenomicRanges::mcols(combined_gr)$hypergeometric_pval, method = "fdr") message("Analysis complete!") list( replicates = replicate_quants, combined = combined_gr ) } # ============================================================================ # Main analysis workflow # ============================================================================ # Load data genomic_features <- arrow::read_parquet( "~/code/hf/yeast_genome_resources/brentlab_features.parquet") genomecov <- list( tagged = list( meta = arrow::read_parquet("~/code/hf/rossi_2021/rossi_2021_metadata.parquet"), ds = arrow::open_dataset("~/code/hf/rossi_2021/genome_map") ), control = list( meta = arrow::read_parquet("~/code/hf/rossi_2021/genome_map_control_meta.parquet"), ds = arrow::open_dataset("~/code/hf/rossi_2021/genome_map_control") ) ) # Get unique regulators sample_id_list <- genomecov$tagged$meta %>% pull(sample_id) %>% unique() # Load regions regions_gr <- read_tsv( "~/code/hf/yeast_genome_resources/yiming_promoters.bed", col_names = c('chr', 'start', 'end', 'locus_tag', 'score', 'strand')) %>% bed_to_granges() # Process control samples rossi_2021_control <- combine_control_af(genomecov$control, regions_gr) # Process all sample_id sets annotated_feature_counts <- map(sample_id_list, ~{ combine_replicates_af(.x, genomecov$tagged, regions_gr) }) names(annotated_feature_counts) <- sample_id_list # Perform enrichment analysis annotated_feature_quants <- map(sample_id_list, ~{ enrichment_analysis( .x, rossi_2021_control$af$score, sum(rossi_2021_control$library_totals$n), annotated_feature_counts, regions_gr ) }) names(annotated_feature_quants) <- sample_id_list # Extract and format replicate-level results annotated_features_quants_replicates <- map(annotated_feature_quants, ~{ map(.x$replicates, as_tibble) %>% list_rbind(names_to = "run_accession")}) %>% list_rbind(names_to = "sample_id") %>% mutate(sample_id = as.integer(sample_id)) %>% left_join( genomecov$tagged$meta %>% ungroup() %>% select(sample_id, regulator_locus_tag, regulator_symbol, run_accession) %>% distinct(), by = c("sample_id", "run_accession")) %>% left_join(select(genomic_features, locus_tag, symbol)) %>% dplyr::rename(target_locus_tag = locus_tag, target_symbol = symbol) %>% dplyr::relocate(sample_id, run_accession, regulator_locus_tag, regulator_symbol, target_locus_tag, target_symbol) %>% select(-c(score, width, strand)) # Write replicate-level results # annotated_features_quants_replicates %>% # write_parquet( # "~/code/hf/rossi_2021/rossi_2021_af_replicates.parquet", # compression = "zstd", # write_statistics = TRUE, # chunk_size = 6708, # use_dictionary = c( # sample_id = TRUE, # run_accession = TRUE, # regulator_locus_tag = TRUE, # regulator_symbol = TRUE, # seqnames = TRUE, # target_locus_tag = TRUE, # target_symbol = TRUE # ) # ) # Extract and format combined results annotated_feature_quants_combined <- map(annotated_feature_quants, ~{ as_tibble(.x$combined)}) %>% list_rbind(names_to = "sample_id") %>% mutate(sample_id = as.integer(sample_id)) %>% left_join( genomecov$tagged$meta %>% ungroup() %>% select(sample_id, regulator_locus_tag, regulator_symbol) %>% distinct(), by = "sample_id") %>% left_join(select(genomic_features, locus_tag, symbol)) %>% dplyr::rename(target_locus_tag = locus_tag, target_symbol = symbol) %>% dplyr::relocate(sample_id, regulator_locus_tag, regulator_symbol, target_locus_tag, target_symbol) %>% select(-c(score, width, strand)) # Write combined results # annotated_feature_quants_combined %>% # write_parquet( # "~/code/hf/rossi_2021/rossi_2021_af_combined.parquet", # compression = "zstd", # write_statistics = TRUE, # chunk_size = 6708, # use_dictionary = c( # sample_id = TRUE, # regulator_locus_tag = TRUE, # regulator_symbol = TRUE, # seqnames = TRUE, # target_locus_tag = TRUE, # target_symbol = TRUE # ) # )