cmatkhan commited on
Commit
e85efd8
·
verified ·
1 Parent(s): 526ad65

Update scripts/parse_barkai_checseq.R

Browse files
Files changed (1) hide show
  1. scripts/parse_barkai_checseq.R +193 -124
scripts/parse_barkai_checseq.R CHANGED
@@ -4,154 +4,223 @@
4
  library(tidyverse)
5
  library(here)
6
  library(arrow)
 
7
 
8
- sacCer3_genome = rtracklayer::import("~/ref/sacCer3/ucsc/sacCer3.fa.gz", format="fasta")
 
 
 
9
 
10
- sacCer3_seqnames = unlist(map(str_split(names(sacCer3_genome), " "), ~.[[1]]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- sacCer3_genome_df = tibble(
13
- seqnames = rep(sacCer3_seqnames, Biostrings::width(sacCer3_genome))
14
- ) %>%
15
- group_by(seqnames) %>%
16
- mutate(start = row_number()-1,
17
- end = row_number()) %>%
18
- ungroup()
19
 
20
- retrieve_series_paths = function(series_id){
21
- sra_meta_path = file.path("data/barkai_checseq", series_id, "SraRunTable.csv")
22
- stopifnot(file.exists(sra_meta_path))
23
- df = read_csv(sra_meta_path)
 
 
24
 
25
- data_files = list.files(here("data/barkai_checseq", series_id), "*.txt.gz", full.names = TRUE)
 
26
 
27
- stopifnot(nrow(df) == length(data_files))
 
 
28
 
29
- names(data_files) = str_extract(basename(data_files), "GSM\\d+")
 
 
 
 
 
30
 
31
  list(
32
- meta = sra_meta_path,
33
- files = data_files
 
34
  )
35
  }
36
 
37
 
38
- add_genomic_coordinate = function(checseqpath){
39
-
40
- bind_cols(sacCer3_genome_df,
41
- data.table::fread(checseqpath, sep = "\t", col.names='pileup'))
42
-
43
- }
44
-
45
- process_checseq_files = function(file){
46
 
47
- add_genomic_coordinate(file) %>%
48
- filter(pileup != 0)
 
 
49
  }
50
 
51
- series_list = map(set_names(c("GSE179430", "GSE209631", "GSE222268")), retrieve_series_paths)
 
 
 
52
 
53
- dataset_basepath = here("data/barkai_checseq/hf/genome_map")
54
 
55
- # Create output directory
56
- dir.create(dataset_basepath, recursive = TRUE, showWarnings = FALSE)
57
 
58
- for (series_id in names(series_list)) {
59
 
60
- message(glue::glue("Processing series {series_id}"))
 
 
 
 
61
 
62
- for (accession_id in names(series_list[[series_id]]$files)) {
63
 
64
- message(glue::glue(" Processing {accession_id}"))
65
 
66
- df <- process_checseq_files(
67
- series_list[[series_id]]$files[[accession_id]]
68
- ) %>%
69
- mutate(accession = accession_id, series = series_id)
70
 
71
- df %>%
72
- group_by(seqnames) %>%
73
- write_dataset(
74
- path = dataset_basepath,
75
- format = "parquet",
76
- partitioning = c("series", "accession"),
77
- existing_data_behavior = "overwrite",
78
- compression = "zstd",
79
- write_statistics = TRUE,
80
- use_dictionary = c(
81
- seqnames = TRUE
82
- )
83
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
- gc()
86
- }
87
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
- # the following code was used to parse an entire series to DF and then save
90
- # to a parquet dataset. that was too large and I chose the dataset partitioning
91
- # instead.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
- # split_manipulation <- function(manipulation_str) {
94
- # parts <- str_split(manipulation_str, "::")[[1]]
95
- #
96
- # if (length(parts) != 2) {
97
- # stop("Unexpected format. Expected 'LOCUS::TAGGED_CONSTRUCT'")
98
- # }
99
- #
100
- # tagged_locus <- parts[1]
101
- # rhs <- parts[2]
102
- #
103
- # # default
104
- # dbd_donor_symbol_str <- "none"
105
- # ortholog <- "none"
106
- #
107
- # # Check for paralog DBD
108
- # if (str_detect(rhs, "-[A-Za-z0-9]+DBD-Mnase$")) {
109
- # dbd_donor_symbol_str <- toupper(str_remove(str_split(rhs, "-", simplify = TRUE)[[2]], "DBD"))
110
- # } else if (str_detect(rhs, "^K\\.lactis .*?-Mnase$")) {
111
- # ortholog <- rhs
112
- # }
113
- #
114
- # list(
115
- # mnase_tagged_symbol = tagged_locus,
116
- # dbd_donor_symbol = dbd_donor_symbol_str,
117
- # ortholog_donor = ortholog
118
- # )
119
- # }
120
- #
121
- #
122
- # split_deletion <- function(deletion_str) {
123
- # parts <- str_split(deletion_str, "::", simplify = TRUE)
124
- #
125
- # list(
126
- # paralog_deletion_symbol = parts[1],
127
- # paralog_resistance_cassette = if (ncol(parts) >= 2) parts[2] else "none"
128
- # )
129
- # }
130
- #
131
- # split_construct_to_tibble = function(split_list){
132
- # background = list(background=split_list[[1]])
133
- # manipulation_list = split_manipulation(split_list[[2]])
134
- # deletion_list = split_deletion(tryCatch(split_list[[3]], error = function(e) "none"))
135
- #
136
- # bind_cols(map(list(background, manipulation_list, deletion_list), as_tibble))
137
- #
138
- # }
139
- #
140
- #
141
- # split_constructs <- function(s) {
142
- # s <- str_trim(s)
143
- # if (s == "" || is.na(s)) return(character(0))
144
- # # split on spaces ONLY when the next token starts a new locus "XYZ::"
145
- # split_geno = str_split(s, "\\s+(?=[A-Za-z0-9_.()\\-]+::)")[[1]]
146
- #
147
- # bind_cols(tibble(genotype = s), split_construct_to_tibble(split_geno))
148
- #
149
- #
150
- # }
151
- #
152
- # gse178430_parsed_meta = bind_cols(
153
- # select(gse178430_meta, `GEO_Accession (exp)`, strainid, Instrument) %>%
154
- # dplyr::rename(accession = `GEO_Accession (exp)`,
155
- # instrument = Instrument),
156
- # bind_rows(map(gse178430_meta$genotype, split_constructs))
157
- # )
 
4
  library(tidyverse)
5
  library(here)
6
  library(arrow)
7
+ library(GEOquery)
8
 
9
+ # genomic feature harmonization table ----
10
+ # see https://huggingface.co/datasets/BrentLab/yeast_genome_resources
11
+ genomicfeatures = arrow::open_dataset(here("data/genome_files/hf/features")) %>%
12
+ as_tibble()
13
 
14
+ # sacCer3_genome = rtracklayer::import("~/ref/sacCer3/ucsc/sacCer3.fa.gz", format="fasta")
15
+ #
16
+ # sacCer3_seqnames = unlist(map(str_split(names(sacCer3_genome), " "), ~.[[1]]))
17
+ #
18
+ # sacCer3_genome_df = tibble(
19
+ # seqnames = rep(sacCer3_seqnames, Biostrings::width(sacCer3_genome))
20
+ # ) %>%
21
+ # group_by(seqnames) %>%
22
+ # mutate(start = row_number()-1,
23
+ # end = row_number()) %>%
24
+ # ungroup()
25
+ #
26
+ # retrieve_series_paths = function(series_id){
27
+ # sra_meta_path = file.path("data/barkai_checseq", series_id, "SraRunTable.csv")
28
+ # stopifnot(file.exists(sra_meta_path))
29
+ # df = read_csv(sra_meta_path)
30
+ #
31
+ # data_files = list.files(here("data/barkai_checseq", series_id), "*.txt.gz", full.names = TRUE)
32
+ #
33
+ # stopifnot(nrow(df) == length(data_files))
34
+ #
35
+ # names(data_files) = str_extract(basename(data_files), "GSM\\d+")
36
+ #
37
+ # list(
38
+ # meta = sra_meta_path,
39
+ # files = data_files
40
+ # )
41
+ # }
42
+ #
43
+ #
44
+ # add_genomic_coordinate = function(checseqpath){
45
+ #
46
+ # bind_cols(sacCer3_genome_df,
47
+ # data.table::fread(checseqpath, sep = "\t", col.names='pileup'))
48
+ #
49
+ # }
50
+ #
51
+ # process_checseq_files = function(file){
52
+ #
53
+ # add_genomic_coordinate(file) %>%
54
+ # filter(pileup != 0)
55
+ # }
56
+ #
57
+ # series_list = map(set_names(c("GSE179430", "GSE209631", "GSE222268")), retrieve_series_paths)
58
+ #
59
+ # dataset_basepath = here("data/barkai_checseq/hf/genome_map")
60
+ #
61
+ # # Create output directory
62
+ # dir.create(dataset_basepath, recursive = TRUE, showWarnings = FALSE)
63
+ #
64
+ # for (series_id in names(series_list)) {
65
+ #
66
+ # message(glue::glue("Processing series {series_id}"))
67
+ #
68
+ # for (accession_id in names(series_list[[series_id]]$files)) {
69
+ #
70
+ # message(glue::glue(" Processing {accession_id}"))
71
+ #
72
+ # df <- process_checseq_files(
73
+ # series_list[[series_id]]$files[[accession_id]]
74
+ # ) %>%
75
+ # mutate(accession = accession_id, series = series_id)
76
+ #
77
+ # df %>%
78
+ # group_by(seqnames) %>%
79
+ # write_dataset(
80
+ # path = dataset_basepath,
81
+ # format = "parquet",
82
+ # partitioning = c("series", "accession"),
83
+ # existing_data_behavior = "overwrite",
84
+ # compression = "zstd",
85
+ # write_statistics = TRUE,
86
+ # use_dictionary = c(
87
+ # seqnames = TRUE
88
+ # )
89
+ # )
90
+ #
91
+ # gc()
92
+ # }
93
+ # }
94
 
95
+ # the following code was used to parse an entire series to DF and then save
96
+ # to a parquet dataset. that was too large and I chose the dataset partitioning
97
+ # instead.
 
 
 
 
98
 
99
+ split_manipulation <- function(manipulation_str) {
100
+ parts <- str_split(manipulation_str, "::")[[1]]
101
+
102
+ if (length(parts) != 2) {
103
+ stop("Unexpected format. Expected 'LOCUS::TAGGED_CONSTRUCT'")
104
+ }
105
 
106
+ tagged_locus <- parts[1]
107
+ rhs <- parts[2]
108
 
109
+ # default
110
+ dbd_donor_symbol_str <- "none"
111
+ ortholog <- "none"
112
 
113
+ # Check for paralog DBD
114
+ if (str_detect(rhs, "-[A-Za-z0-9]+DBD-Mnase$")) {
115
+ dbd_donor_symbol_str <- toupper(str_remove(str_split(rhs, "-", simplify = TRUE)[[2]], "DBD"))
116
+ } else if (str_detect(rhs, "^K\\.lactis .*?-Mnase$")) {
117
+ ortholog <- rhs
118
+ }
119
 
120
  list(
121
+ mnase_tagged_symbol = tagged_locus,
122
+ dbd_donor_symbol = dbd_donor_symbol_str,
123
+ ortholog_donor = ortholog
124
  )
125
  }
126
 
127
 
128
+ split_deletion <- function(deletion_str) {
129
+ parts <- str_split(deletion_str, "::", simplify = TRUE)
 
 
 
 
 
 
130
 
131
+ list(
132
+ paralog_deletion_symbol = parts[1],
133
+ paralog_resistance_cassette = if (ncol(parts) >= 2) parts[2] else "none"
134
+ )
135
  }
136
 
137
+ split_construct_to_tibble = function(split_list){
138
+ background = list(background=split_list[[1]])
139
+ manipulation_list = split_manipulation(split_list[[2]])
140
+ deletion_list = split_deletion(tryCatch(split_list[[3]], error = function(e) "none"))
141
 
142
+ bind_cols(map(list(background, manipulation_list, deletion_list), as_tibble))
143
 
144
+ }
 
145
 
 
146
 
147
+ split_constructs <- function(s) {
148
+ s <- str_trim(s)
149
+ if (s == "" || is.na(s)) return(character(0))
150
+ # split on spaces ONLY when the next token starts a new locus "XYZ::"
151
+ split_geno = str_split(s, "\\s+(?=[A-Za-z0-9_.()\\-]+::)")[[1]]
152
 
153
+ bind_cols(tibble(genotype = s), split_construct_to_tibble(split_geno))
154
 
 
155
 
156
+ }
 
 
 
157
 
158
+ gse178430_meta = read_csv("data/barkai_checseq/GSE179430/SraRunTable.csv") %>%
159
+ mutate(genotype = str_replace(genotype, "Yap2", "Cad1")) %>%
160
+ mutate(genotype = str_replace(genotype, "Yap4", "Cin5"))
161
+
162
+ gse178430_parsed_meta = bind_cols(
163
+ select(gse178430_meta, `Sample Name`, strainid, Instrument) %>%
164
+ dplyr::rename(accession = `Sample Name`,
165
+ instrument = Instrument),
166
+ bind_rows(map(gse178430_meta$genotype, split_constructs))) %>%
167
+ left_join(select(genomicfeatures, locus_tag, symbol) %>%
168
+ dplyr::rename(mnase_tagged_symbol = symbol)) %>%
169
+ dplyr::rename(regulator_locus_tag = locus_tag,
170
+ regulator_symbol = mnase_tagged_symbol) %>%
171
+ select(accession, regulator_locus_tag, regulator_symbol, strainid,
172
+ instrument, genotype, dbd_donor_symbol, ortholog_donor,
173
+ paralog_deletion_symbol, paralog_resistance_cassette)
174
+
175
+ gse178430_parsed_meta %>%
176
+ write_parquet(here("/home/chase/code/hf/barkai_compendium/GSE178430_metadata.parquet"),
177
+ compression = "zstd",
178
+ write_statistics = TRUE,
179
+ use_dictionary = c(
180
+ accession = TRUE,
181
+ regulator_locus_tag = TRUE,
182
+ regulator_symbol = TRUE
183
+ )
184
+ )
185
 
186
+ gse209631_meta = read_csv("data/barkai_checseq/GSE209631/SraRunTable.csv")
187
+
188
+ gse209631_parsed_meta = gse209631_meta %>%
189
+ select(`Sample Name`, tagged_tf, Instrument, `variant-type`) %>%
190
+ janitor::clean_names() %>%
191
+ dplyr::rename(accession = sample_name) %>%
192
+ arrange(tagged_tf, variant_type) %>%
193
+ left_join(select(genomicfeatures, locus_tag, symbol) %>% dplyr::rename(tagged_tf = symbol)) %>%
194
+ dplyr::rename(regulator_symbol = tagged_tf, regulator_locus_tag = locus_tag) %>%
195
+ select(accession, regulator_locus_tag, regulator_symbol, variant_type)
196
+
197
+ gse209631_parsed_meta %>%
198
+ write_parquet(here("/home/chase/code/hf/barkai_compendium/GSE209631_metadata.parquet"),
199
+ compression = "zstd",
200
+ write_statistics = TRUE,
201
+ use_dictionary = c(
202
+ accession = TRUE,
203
+ regulator_locus_tag = TRUE,
204
+ regulator_symbol = TRUE,
205
+ variant_type = TRUE
206
+ )
207
+ )
208
 
209
+ gse=GEOquery::getGEO(filename=here("data/barkai_checseq/GSE222268_series_matrix.txt"))
210
+
211
+ gse222268_meta = Biobase::pData(gse@phenoData) %>% as_tibble() %>%
212
+ select(title, geo_accession, extract_protocol_ch1, description,
213
+ instrument_model, library_selection) %>%
214
+ mutate(description = ifelse(description == "", library_selection, description)) %>%
215
+ dplyr::rename(accession = geo_accession) %>%
216
+ select(-library_selection)
217
+
218
+ gse222268_meta %>%
219
+ write_parquet(here("/home/chase/code/hf/barkai_compendium/GSE222268_metadata.parquet"),
220
+ compression = "zstd",
221
+ write_statistics = TRUE,
222
+ use_dictionary = c(
223
+ accession = TRUE
224
+ )
225
+ )
226