barkai_compendium / scripts /parse_barkai_checseq.R
cmatkhan's picture
Upload scripts/parse_barkai_checseq.R with huggingface_hub
d0ca76f verified
raw
history blame
4.46 kB
## NOTE: The data is currently on /lts/mblab/downloaded_data/barkai_checseq
## and the parquet dataset is on the brentlab-strides aws at s3://yeast-binding-perturbation-data/barkai_checkseq
library(tidyverse)
library(here)
library(arrow)
sacCer3_genome = rtracklayer::import("~/ref/sacCer3/ucsc/sacCer3.fa.gz", format="fasta")
sacCer3_seqnames = unlist(map(str_split(names(sacCer3_genome), " "), ~.[[1]]))
sacCer3_genome_df = tibble(
seqnames = rep(sacCer3_seqnames, Biostrings::width(sacCer3_genome))
) %>%
group_by(seqnames) %>%
mutate(start = row_number()-1,
end = row_number()) %>%
ungroup()
retrieve_series_paths = function(series_id){
sra_meta_path = file.path("data/barkai_checseq", series_id, "SraRunTable.csv")
stopifnot(file.exists(sra_meta_path))
df = read_csv(sra_meta_path)
data_files = list.files(here("data/barkai_checseq", series_id), "*.txt.gz", full.names = TRUE)
stopifnot(nrow(df) == length(data_files))
names(data_files) = str_extract(basename(data_files), "GSM\\d+")
list(
meta = sra_meta_path,
files = data_files
)
}
add_genomic_coordinate = function(checseqpath){
bind_cols(sacCer3_genome_df,
data.table::fread(checseqpath, sep = "\t", col.names='pileup'))
}
process_checseq_files = function(file){
add_genomic_coordinate(file) %>%
filter(pileup != 0)
}
series_list = map(set_names(c("GSE179430", "GSE209631", "GSE222268")), retrieve_series_paths)
dataset_basepath = here("data/barkai_checseq/hf/genome_map")
# Create output directory
dir.create(dataset_basepath, recursive = TRUE, showWarnings = FALSE)
for (series_id in names(series_list)) {
message(glue::glue("Processing series {series_id}"))
for (accession_id in names(series_list[[series_id]]$files)) {
message(glue::glue(" Processing {accession_id}"))
df <- process_checseq_files(
series_list[[series_id]]$files[[accession_id]]
) %>%
mutate(accession = accession_id, series = series_id)
df %>%
group_by(seqnames) %>%
write_dataset(
path = dataset_basepath,
format = "parquet",
partitioning = c("series", "accession"),
existing_data_behavior = "overwrite",
compression = "zstd",
write_statistics = TRUE,
use_dictionary = c(
seqnames = TRUE
)
)
gc()
}
}
# the following code was used to parse an entire series to DF and then save
# to a parquet dataset. that was too large and I chose the dataset partitioning
# instead.
# split_manipulation <- function(manipulation_str) {
# parts <- str_split(manipulation_str, "::")[[1]]
#
# if (length(parts) != 2) {
# stop("Unexpected format. Expected 'LOCUS::TAGGED_CONSTRUCT'")
# }
#
# tagged_locus <- parts[1]
# rhs <- parts[2]
#
# # default
# dbd_donor_symbol_str <- "none"
# ortholog <- "none"
#
# # Check for paralog DBD
# if (str_detect(rhs, "-[A-Za-z0-9]+DBD-Mnase$")) {
# dbd_donor_symbol_str <- toupper(str_remove(str_split(rhs, "-", simplify = TRUE)[[2]], "DBD"))
# } else if (str_detect(rhs, "^K\\.lactis .*?-Mnase$")) {
# ortholog <- rhs
# }
#
# list(
# mnase_tagged_symbol = tagged_locus,
# dbd_donor_symbol = dbd_donor_symbol_str,
# ortholog_donor = ortholog
# )
# }
#
#
# split_deletion <- function(deletion_str) {
# parts <- str_split(deletion_str, "::", simplify = TRUE)
#
# list(
# paralog_deletion_symbol = parts[1],
# paralog_resistance_cassette = if (ncol(parts) >= 2) parts[2] else "none"
# )
# }
#
# split_construct_to_tibble = function(split_list){
# background = list(background=split_list[[1]])
# manipulation_list = split_manipulation(split_list[[2]])
# deletion_list = split_deletion(tryCatch(split_list[[3]], error = function(e) "none"))
#
# bind_cols(map(list(background, manipulation_list, deletion_list), as_tibble))
#
# }
#
#
# split_constructs <- function(s) {
# s <- str_trim(s)
# if (s == "" || is.na(s)) return(character(0))
# # split on spaces ONLY when the next token starts a new locus "XYZ::"
# split_geno = str_split(s, "\\s+(?=[A-Za-z0-9_.()\\-]+::)")[[1]]
#
# bind_cols(tibble(genotype = s), split_construct_to_tibble(split_geno))
#
#
# }
#
# gse178430_parsed_meta = bind_cols(
# select(gse178430_meta, `GEO_Accession (exp)`, strainid, Instrument) %>%
# dplyr::rename(accession = `GEO_Accession (exp)`,
# instrument = Instrument),
# bind_rows(map(gse178430_meta$genotype, split_constructs))
# )