aurman commited on
Commit
0cf1acd
·
verified ·
1 Parent(s): 2e88aac

Upload Processing_CleanedUp_Commented.R

Browse files
Files changed (1) hide show
  1. Processing_CleanedUp_Commented.R +240 -0
Processing_CleanedUp_Commented.R ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ============================================================================
2
+ # GoogleTrendArchive DATA PROCESSING PIPELINE
3
+ # ============================================================================
4
+ # This script processes raw daily CSV files from Google's Trending Now system
5
+ # into a single consolidated dataset with calculated trend durations.
6
+ #
7
+ # Input: Daily CSV files organized by location in separate folders
8
+ # see daily_compressed.zip
9
+ # Output: Single CSV with deduplicated trends and calculated durations
10
+ # ============================================================================
11
+
12
+ library(tidyverse)
13
+ library(lubridate)
14
+ library(data.table)
15
+
16
+ daily_base_dir <- ""#data directory with the (sub)folders
17
+ output_file <- "googletrendarchive_preprocessed.csv"
18
+
19
+ # ============================================================================
20
+ # HELPER FUNCTIONS
21
+ # ============================================================================
22
+
23
+ # Parse Google's bucketed search volume format (e.g., "50K+", "2M+", "500+")
24
+ parse_search_volume <- function(volume_str) {
25
+ if (is.na(volume_str) || volume_str == "") return(NA_real_)
26
+
27
+ clean <- str_remove(volume_str, "\\+")
28
+
29
+ if (str_detect(clean, "K$")) {
30
+ return(as.numeric(str_remove(clean, "K")) * 1000)
31
+ } else if (str_detect(clean, "M$")) {
32
+ return(as.numeric(str_remove(clean, "M")) * 1000000)
33
+ } else {
34
+ return(as.numeric(clean))
35
+ }
36
+ }
37
+
38
+ # Load all CSV files for a single location
39
+ load_location_data <- function(location_folder) {
40
+ location <- basename(location_folder)
41
+
42
+ files <- list.files(location_folder,
43
+ pattern = "trending_.*_1d_.*\\.csv$",
44
+ full.names = TRUE,
45
+ recursive = TRUE)
46
+
47
+ if (length(files) == 0) {
48
+ cat(" ", location, ": No files found\n")
49
+ return(NULL)
50
+ }
51
+
52
+ cat(" Loading", location, ":", length(files), "files\n")
53
+
54
+ # Load and combine all files for this location
55
+ data <- map_dfr(files, function(file) {
56
+ tryCatch({
57
+ df <- read_csv(file, show_col_types = FALSE, col_types = cols(.default = "c"))
58
+
59
+ if (nrow(df) == 0) return(NULL)
60
+
61
+ # Extract collection date from filename (format: YYYYMMDD)
62
+ filename <- basename(file)
63
+ date_match <- str_match(filename, "(\\d{8})")
64
+ collection_date <- if (!is.na(date_match[1])) {
65
+ ymd(date_match[2])
66
+ } else {
67
+ NA_Date_
68
+ }
69
+
70
+ # Add metadata
71
+ df %>%
72
+ mutate(
73
+ location = location,
74
+ collection_date = collection_date
75
+ )
76
+ }, error = function(e) {
77
+ warning("Error loading ", file, ": ", e$message)
78
+ return(NULL)
79
+ })
80
+ })
81
+
82
+ return(data)
83
+ }
84
+
85
+ # ============================================================================
86
+ # STEP 1: LOAD ALL RAW DATA
87
+ # ============================================================================
88
+
89
+ cat("=== STEP 1: LOADING RAW DATA ===\n\n")
90
+
91
+ # Find all location folders
92
+ folders <- list.dirs(daily_base_dir, full.names = TRUE, recursive = FALSE)
93
+ folders <- folders[basename(folders) != "weekly" & basename(folders) != "reconstructed"]
94
+
95
+ cat("Found", length(folders), "locations\n\n")
96
+
97
+ # Load data from all locations
98
+ all_data <- map_dfr(folders, load_location_data)
99
+
100
+ cat("\n✓ Loaded", format(nrow(all_data), big.mark = ","), "raw trend records\n")
101
+ cat(" Date range:", min(all_data$collection_date, na.rm = TRUE),
102
+ "to", max(all_data$collection_date, na.rm = TRUE), "\n\n")
103
+
104
+ # ============================================================================
105
+ # STEP 2: PARSE AND STANDARDIZE FIELDS
106
+ # ============================================================================
107
+
108
+ cat("=== STEP 2: PARSING FIELDS ===\n\n")
109
+
110
+ # Standardize column names
111
+ colnames(all_data) <- tolower(colnames(all_data))
112
+ colnames(all_data) <- str_replace_all(colnames(all_data), " ", "_")
113
+
114
+ cat("Parsing search volumes...\n")
115
+ all_data <- all_data %>%
116
+ mutate(search_volume_lower = map_dbl(search_volume, parse_search_volume))
117
+
118
+ cat("Parsing timestamps...\n")
119
+ all_data_parsed <- all_data %>%
120
+ mutate(
121
+ # Remove timezone suffix and parse
122
+ started_clean = str_remove(started, " UTC[+-]?\\d+$"),
123
+ ended_clean = str_remove(ended, " UTC[+-]?\\d+$"),
124
+
125
+ # Parse to POSIXct timestamps (UTC)
126
+ start_time = parse_date_time(started_clean,
127
+ orders = c("Bdy IMS p"),
128
+ tz = "UTC",
129
+ quiet = TRUE),
130
+ end_time = parse_date_time(ended_clean,
131
+ orders = c("Bdy IMS p"),
132
+ tz = "UTC",
133
+ quiet = TRUE),
134
+
135
+ # Count queries in trend breakdown (comma-separated)
136
+ n_queries = str_count(trend_breakdown, ",") + 1
137
+ ) %>%
138
+ select(-started_clean, -ended_clean) %>%
139
+ arrange(trends, location, collection_date, start_time)
140
+
141
+ cat("✓ Parsing complete\n\n")
142
+
143
+ # ============================================================================
144
+ # STEP 3: CREATE TREND EPISODES AND CALCULATE DURATIONS
145
+ # ============================================================================
146
+
147
+ cat("=== STEP 3: EPISODE DEDUPLICATION AND DURATION CALCULATION ===\n\n")
148
+
149
+ # Convert to data.table for faster processing
150
+ setDT(all_data_parsed)
151
+
152
+ # Sort by trend, location, and time
153
+ setorder(all_data_parsed, trends, location, collection_date, start_time)
154
+
155
+ cat("Step 3a: Identifying trend episodes...\n")
156
+ # Identify trend episodes (same trend appearing in multiple daily snapshots)
157
+ all_data_parsed[, `:=`(
158
+ prev_start = shift(start_time),
159
+ prev_end = shift(end_time)
160
+ ), by = .(trends, location)]
161
+
162
+ all_data_parsed[, `:=`(
163
+ start_gap = as.numeric(difftime(start_time, prev_start, units = "hours")),
164
+ time_gap = as.numeric(difftime(start_time, prev_end, units = "hours"))
165
+ )]
166
+
167
+ all_data_parsed[, new_episode := is.na(prev_start) | (!is.na(start_gap) & abs(start_gap) > 1)]
168
+ all_data_parsed[is.na(new_episode), new_episode := TRUE]
169
+ all_data_parsed[, episode_id := cumsum(new_episode), by = .(trends, location)]
170
+
171
+ cat("Step 3b: Aggregating episodes...\n")
172
+ # Collapse multiple occurrences of the same trend into single episodes
173
+ # Use earliest start time and latest end time for each episode
174
+ episodes <- all_data_parsed[, .(
175
+ start_time = min(start_time, na.rm = TRUE),
176
+ end_time = max(end_time, na.rm = TRUE),
177
+ first_collection_date = min(collection_date),
178
+ last_collection_date = max(collection_date),
179
+ n_days_observed = uniqueN(collection_date),
180
+ total_occurrences = .N,
181
+ search_volume_lower = max(search_volume_lower, na.rm = TRUE),
182
+ n_queries = first(n_queries),
183
+ trend_breakdown = first(trend_breakdown),
184
+ collection_date = first(collection_date)
185
+ ), by = .(trends, location, episode_id)]
186
+
187
+ # Replace Inf values with NA
188
+ episodes[is.infinite(start_time), start_time := as.POSIXct(NA)]
189
+ episodes[is.infinite(end_time), end_time := as.POSIXct(NA)]
190
+
191
+ cat("Step 3c: Calculating durations with patching...\n")
192
+ # Fix data quality issues and calculate durations
193
+
194
+ episodes[, `:=`(
195
+ start_fixed = fifelse(!is.na(start_time) & !is.na(end_time) & end_time < start_time,
196
+ end_time, start_time),
197
+ end_fixed = fifelse(!is.na(start_time) & !is.na(end_time) & end_time < start_time,
198
+ start_time, end_time),
199
+ times_were_swapped = !is.na(start_time) & !is.na(end_time) & end_time < start_time
200
+ )]
201
+
202
+ episodes[is.na(end_fixed) & !is.na(start_fixed),
203
+ end_estimated := as.POSIXct(paste(last_collection_date, "23:59:59"), tz = "UTC")]
204
+ episodes[is.na(end_estimated), end_estimated := end_fixed]
205
+
206
+ # Calculate final duration
207
+ episodes[, `:=`(
208
+ duration_minutes = as.numeric(difftime(end_estimated, start_fixed, units = "mins")),
209
+ duration_is_estimate = is.na(end_fixed) | times_were_swapped
210
+ )]
211
+
212
+ episodes[, duration_hours := duration_minutes / 60]
213
+
214
+ # Add date components for analysis
215
+ episodes[, `:=`(
216
+ year = year(collection_date),
217
+ month = month(collection_date),
218
+ weekday = lubridate::wday(collection_date, label = TRUE)
219
+ )]
220
+
221
+ cat("Step 3d: Filtering invalid records...\n")
222
+ # Filter out records with missing or invalid data
223
+ all_data_clean <- episodes[
224
+ !is.na(search_volume_lower) &
225
+ !is.na(duration_minutes) &
226
+ duration_minutes > 0
227
+ ]
228
+
229
+ # Remove temporary working columns
230
+ all_data_clean[, c("start_fixed", "end_fixed", "end_estimated",
231
+ "prev_start", "prev_end", "start_gap", "time_gap", "new_episode") := NULL]
232
+
233
+ cat("✓ Episode processing complete\n\n")
234
+
235
+
236
+
237
+ # Write to CSV
238
+ cat("\nWriting to", output_file, "...\n")
239
+ fwrite(all_data_clean, output_file)
240
+