RichardVR commited on
Commit
9bfc7f3
·
verified ·
1 Parent(s): ee5a636

Upload 2 files

Browse files
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. complete_meta_data.csv +3 -0
  3. stock_metadata.Rmd +566 -0
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ complete_meta_data.csv filter=lfs diff=lfs merge=lfs -text
complete_meta_data.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04daadece427428d0d659f3896d5a06ba88a8b626edde7c6e54b59251393f33f
3
+ size 213055926
stock_metadata.Rmd ADDED
@@ -0,0 +1,566 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: "Copper"
3
+ output: html_document
4
+ date: "2025-06-23"
5
+ ---
6
+
7
+ ```{r setup, include=FALSE}
8
+ knitr::opts_chunk$set(echo = TRUE)
9
+ library(readr)
10
+ library(tidyverse)
11
+ library(readxl)
12
+ library(ggplot2)
13
+ library(dplyr)
14
+ library(httr)
15
+ library(jsonlite)
16
+ ```
17
+
18
+ ```{r}
19
+ #
20
+ # res <- GET("https://eodhd.com/api/screener",
21
+ # query = list(
22
+ # industries = "Metals & Mining",
23
+ # query = "copper",
24
+ # api_token = " 5f3afd582bd7b4.95720069"
25
+ # ))
26
+ #
27
+ # companies <- fromJSON(content(res, "text"))
28
+ #
29
+ # copper = companies$data
30
+
31
+ ```
32
+
33
+ ```{r}
34
+
35
+ api_token <- "5f3afd582bd7b4.95720069"
36
+
37
+ ```
38
+
39
+ ```{r}
40
+
41
+ res <- GET("https://eodhd.com/api/exchanges-list", query = list(api_token = api_token))
42
+ exchange_list <- fromJSON(content(res, "text", encoding = "UTF-8"))
43
+ exchange_df <- as.data.frame(exchange_list)
44
+
45
+ # Step 2: Initialize storage for results
46
+ all_stocks <- list()
47
+
48
+ # Step 3: Loop over exchange codes and download stocks
49
+ for (exchange in exchange_df$Code) {
50
+ message("Fetching data for exchange: ", exchange)
51
+
52
+ res <- GET(
53
+ paste0("https://eodhd.com/api/exchange-symbol-list/", exchange),
54
+ query = list(api_token = api_token)
55
+ )
56
+
57
+ if (status_code(res) == 200) {
58
+ json_text <- content(res, "text", encoding = "UTF-8")
59
+ stock_df <- tryCatch(
60
+ {
61
+ read.csv(text = json_text)
62
+ },
63
+ error = function(e) {
64
+ message("Skipping ", exchange, " due to error: ", e$message)
65
+ return(NULL)
66
+ }
67
+ )
68
+
69
+ if (!is.null(stock_df) && nrow(stock_df) > 0) {
70
+ stock_df$ExchangeCode <- exchange
71
+ all_stocks[[length(all_stocks) + 1]] <- stock_df
72
+ }
73
+ } else {
74
+ message("Failed to fetch data for ", exchange, " (", status_code(res), ")")
75
+ }
76
+
77
+ Sys.sleep(1) # avoid rate limit
78
+ }
79
+
80
+ # Step 3.5: Force all columns to character for binding
81
+ all_stocks_clean <- lapply(all_stocks, function(df) {
82
+ if (is.data.frame(df)) {
83
+ df[] <- lapply(df, as.character) # convert every column to character
84
+ return(df)
85
+ } else {
86
+ return(NULL)
87
+ }
88
+ })
89
+ all_stocks_clean <- Filter(Negate(is.null), all_stocks_clean) # remove NULLs
90
+
91
+
92
+ # Step 4: Combine cleaned data frames
93
+ stock_metadata_df <- dplyr::bind_rows(all_stocks_clean)
94
+
95
+ # Optional: Save or view
96
+ View(stock_metadata_df)
97
+ # write.csv(stock_metadata_df, "global_stock_metadata.csv", row.names = FALSE)
98
+ ```
99
+
100
+ # Stock MetaData
101
+
102
+ ```{r}
103
+ merge_fields_deep <- function(base_row, ...) {
104
+ lists <- list(...)
105
+ for (list_item in lists) {
106
+ if (is.null(list_item)) next
107
+ for (field in names(list_item)) {
108
+ value <- list_item[[field]]
109
+ if (is.list(value)) {
110
+ value <- paste(unlist(value), collapse = ", ")
111
+ }
112
+ base_row[[field]] <- value
113
+ }
114
+ }
115
+ return(base_row)
116
+ }
117
+
118
+ extract_institution_fields <- function(holder_list) {
119
+ if (is.null(holder_list$Institutions)) return(list())
120
+
121
+ holders <- holder_list$Institutions
122
+ fields <- list()
123
+
124
+ for (i in seq_along(holders)) {
125
+ h <- holders[[i]]
126
+ prefix <- paste0("Holder_", i, "_")
127
+ fields[[paste0(prefix, "name")]] <- h$name
128
+ fields[[paste0(prefix, "date")]] <- h$date
129
+ fields[[paste0(prefix, "totalShares")]] <- h$totalShares
130
+ fields[[paste0(prefix, "currentShares")]] <- h$currentShares
131
+ fields[[paste0(prefix, "change_p")]] <- h$change_p
132
+ }
133
+
134
+ return(fields)
135
+ }
136
+
137
+ extract_fund_fields <- function(holder_list) {
138
+ if (is.null(holder_list$Funds)) return(list())
139
+
140
+ funds <- holder_list$Funds
141
+ fields <- list()
142
+
143
+ for (i in seq_along(funds)) {
144
+ f <- funds[[i]]
145
+ prefix <- paste0("Fund_", i, "_")
146
+ fields[[paste0(prefix, "name")]] <- f$name
147
+ fields[[paste0(prefix, "date")]] <- f$date
148
+ fields[[paste0(prefix, "totalShares")]] <- f$totalShares
149
+ fields[[paste0(prefix, "currentShares")]] <- f$currentShares
150
+ fields[[paste0(prefix, "change_p")]] <- f$change_p
151
+ }
152
+
153
+ return(fields)
154
+ }
155
+
156
+ extract_dividend_fields <- function(div_list) {
157
+ if (is.null(div_list) || length(div_list) == 0) return(list())
158
+
159
+ div_raw <- lapply(div_list, function(x) {
160
+ if (!is.null(x$Year) && !is.null(x$Count)) {
161
+ data.frame(Year = as.integer(x$Year), Count = as.integer(x$Count))
162
+ } else {
163
+ NULL
164
+ }
165
+ })
166
+
167
+ div_df <- do.call(rbind, div_raw)
168
+
169
+ if (is.null(div_df) || nrow(div_df) == 0) return(list())
170
+
171
+ # 保留近5年(最多到 2024)
172
+ current_year <- as.integer(format(Sys.Date(), "%Y"))
173
+ target_years <- (current_year - 4):current_year
174
+
175
+ div_df <- div_df[div_df$Year %in% target_years, ]
176
+
177
+ if (nrow(div_df) == 0) return(list()) # ✅ 加这一句!
178
+
179
+ out <- setNames(as.list(div_df$Count), paste0("Dividend_", div_df$Year))
180
+ return(out)
181
+
182
+ }
183
+
184
+ # 主循环开始
185
+ stock_only <- stock_metadata_df %>%
186
+ filter(Type %in% c("Common Stock", "Preferred Stock", "ETF"))
187
+
188
+ #stocks_subset <- head(stock_only, 5)
189
+ stocks_subset <- stock_only[94334:96494, ] %>%
190
+ filter(Type == "Common Stock")
191
+ #stocks_subset$Code <- sprintf("%06d", as.integer(stocks_subset$Code))
192
+ #stocks_subset$Code <- as.character(stocks_subset$Code)
193
+ #stocks_subset <- tail(stocks_subset)
194
+
195
+ #stocks_subset <- stock_only[50000:nrow(stock_only), ]
196
+ #stocks_subset <- stock_only
197
+ enriched_data <- list()
198
+
199
+ for (i in 1:nrow(stocks_subset)) {
200
+ symbol <- as.character(stocks_subset[i, "Code"])
201
+ exchange <- as.character(stocks_subset[i, "ExchangeCode"])
202
+ full_symbol <- paste0(symbol, ".", gsub(" ", "", exchange))
203
+
204
+ message(sprintf("🔄 Processing %d / %d: %s", i, nrow(stocks_subset), full_symbol))
205
+
206
+ url <- paste0("https://eodhd.com/api/fundamentals/", full_symbol,
207
+ "?api_token=", api_token)
208
+
209
+ res <- tryCatch(GET(url), error = function(e) NULL)
210
+
211
+ if (!is.null(res) && status_code(res) == 200) {
212
+ json_data <- tryCatch(fromJSON(content(res, "text", encoding = "UTF-8")), error = function(e) NULL)
213
+
214
+ if (!is.null(json_data)) {
215
+ # 提取三部分数据
216
+ inst_fields <- extract_institution_fields(json_data$Holders)
217
+ fund_fields <- extract_fund_fields(json_data$Holders)
218
+ dividend_data <- extract_dividend_fields(json_data$SplitsDividends$NumberDividendsByYear)
219
+
220
+ merged_fields <- c(
221
+ json_data$General,
222
+ json_data$Highlights,
223
+ json_data$Valuation,
224
+ json_data$SharesStats,
225
+ json_data$Technicals,
226
+ json_data$SplitsDividends[names(json_data$SplitsDividends) != "NumberDividendsByYear"],
227
+ dividend_data,
228
+ inst_fields,
229
+ fund_fields
230
+
231
+ )
232
+
233
+ enriched_row <- merge_fields_deep(stocks_subset[i, ], merged_fields)
234
+ enriched_data[[length(enriched_data) + 1]] <- enriched_row
235
+ } else {
236
+ message("⚠️ No JSON data for ", full_symbol)
237
+ }
238
+ } else {
239
+ message("❌ Failed request for ", full_symbol)
240
+ }
241
+
242
+ Sys.sleep(1) # 避免过快请求
243
+ }
244
+
245
+ # 合并输出结果
246
+ if (length(enriched_data) > 0) {
247
+ result_df <- bind_rows(enriched_data)
248
+ } else {
249
+ warning("No data enriched.")
250
+ result_df <- data.frame()
251
+ }
252
+
253
+ #View(result_df)
254
+ #write.csv(result_df, "metadata(1-1w).csv", row.names = FALSE)
255
+ #write.csv(result_df, "metadata(1w-2w).csv", row.names = FALSE)
256
+ #write.csv(result_df, "metadata_Taiwan(94324:96485).csv", row.names = FALSE)
257
+ ```
258
+
259
+ ```{r}
260
+ w1 = read_csv("metadata(1-1w).csv")
261
+ w2 = read_csv("metadata(1w-2w).csv")
262
+ w3 = read_csv("metadata(2w-4w).csv")
263
+ w4 = read_csv("metadata(4w-6w).csv")
264
+ w5 = read_csv("metadata(6w-8w).csv")
265
+ w6 = read_csv("metadata(8w-82080).csv")
266
+ w7 = read_csv("metadata(85250-94323).csv")
267
+ w8 = read_csv("metadata(96486-end).csv")
268
+ w9 = read_csv("metadata_China.csv")
269
+ w10 = read_csv("metadata_Taiwan.csv")
270
+
271
+ x1 = w1 %>% group_by(Industry) %>%
272
+ filter(Type == "Common Stock")%>%
273
+ summarize(total_count = n())
274
+ x2 = w2 %>% group_by(Industry) %>%
275
+ filter(Type == "Common Stock")%>%
276
+ summarize(total_count = n())
277
+ x3 = w3 %>% group_by(Industry) %>%
278
+ filter(Type == "Common Stock")%>%
279
+ summarize(total_count = n())
280
+ x4 = w4 %>% group_by(Industry) %>%
281
+ filter(Type == "Common Stock")%>%
282
+ summarize(total_count = n())
283
+ x5 = w5 %>% group_by(Industry) %>%
284
+ filter(Type == "Common Stock")%>%
285
+ summarize(total_count = n())
286
+ x6 = w6 %>% group_by(Industry) %>%
287
+ filter(Type == "Common Stock")%>%
288
+ summarize(total_count = n())
289
+ x7 = w7 %>% group_by(Industry) %>%
290
+ filter(Type == "Common Stock")%>%
291
+ summarize(total_count = n())
292
+ x8 = w8 %>% group_by(Industry) %>%
293
+ filter(Type == "Common Stock")%>%
294
+ summarize(total_count = n())
295
+ x9 = w9 %>% group_by(Industry) %>%
296
+ filter(Type == "Common Stock")%>%
297
+ summarize(total_count = n())
298
+ x10 = w10 %>% group_by(Industry) %>%
299
+ filter(Type == "Common Stock")%>%
300
+ summarize(total_count = n())
301
+
302
+ ```
303
+
304
+ ```{r}
305
+ yy = rbind(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10)
306
+ yy = yy %>%
307
+ group_by(Industry) %>%
308
+ summarize(count = sum(total_count)) %>%
309
+ arrange(desc(count))
310
+
311
+ write.csv(yy, "Industry_Count.csv", row.names = FALSE)
312
+ ```
313
+
314
+ ```{r}
315
+ x1 = w1 %>% group_by(Industry) %>%
316
+ filter(Type == "Common Stock")
317
+ x2 = w2 %>% group_by(Industry) %>%
318
+ filter(Type == "Common Stock")
319
+ x3 = w3 %>% group_by(Industry) %>%
320
+ filter(Type == "Common Stock")
321
+ x4 = w4 %>% group_by(Industry) %>%
322
+ filter(Type == "Common Stock")
323
+ x5 = w5 %>% group_by(Industry) %>%
324
+ filter(Type == "Common Stock")
325
+ x6 = w6 %>% group_by(Industry) %>%
326
+ filter(Type == "Common Stock")
327
+ x7 = w7 %>% group_by(Industry) %>%
328
+ filter(Type == "Common Stock")
329
+ x8 = w8 %>% group_by(Industry) %>%
330
+ filter(Type == "Common Stock")
331
+ x9 = w9 %>% group_by(Industry) %>%
332
+ filter(Type == "Common Stock")
333
+ x10 = w10 %>% group_by(Industry) %>%
334
+ filter(Type == "Common Stock")
335
+
336
+
337
+
338
+ # write.csv(x1, "metadata_1.csv", row.names = FALSE)
339
+ # write.csv(x2, "metadata_2.csv", row.names = FALSE)
340
+ # write.csv(x3, "metadata_3.csv", row.names = FALSE)
341
+ # write.csv(x4, "metadata_4.csv", row.names = FALSE)
342
+ # write.csv(x5, "metadata_5.csv", row.names = FALSE)
343
+ # write.csv(x6, "metadata_6.csv", row.names = FALSE)
344
+ # write.csv(x7, "metadata_7.csv", row.names = FALSE)
345
+ # write.csv(x8, "metadata_8.csv", row.names = FALSE)
346
+ # write.csv(x9, "metadata_9.csv", row.names = FALSE)
347
+ # write.csv(x10, "metadata_10.csv", row.names = FALSE)
348
+
349
+ ```
350
+
351
+ ## Clean Data
352
+
353
+ ```{r}
354
+ extract_top3_holders_clean <- function(df) {
355
+ # Step 1: 找出 totalShares 列
356
+ share_cols <- grep("^Holder_\\d+_totalShares$", names(df), value = TRUE)
357
+ holder_fields <- c("_name", "_date", "_totalShares", "_currentShares")
358
+
359
+ # Step 2: 找出每行中最大的 3 个 holder
360
+ top3_indices <- apply(df[ , share_cols, drop = FALSE], 1, function(row) {
361
+ non_na <- which(!is.na(row))
362
+ if (length(non_na) == 0) return(rep(NA, 3))
363
+ top <- order(row[non_na], decreasing = TRUE)[1:min(3, length(non_na))]
364
+ return(non_na[top])
365
+ })
366
+
367
+ # Step 3: 抽出列名
368
+ top3_colnames <- lapply(top3_indices, function(idxs) {
369
+ if (all(is.na(idxs))) return(rep(NA, 3))
370
+ return(share_cols[idxs])
371
+ })
372
+ top3_colnames <- as.data.frame(do.call(rbind, top3_colnames), stringsAsFactors = FALSE)
373
+
374
+ # Step 4: 抽取每行 holder 对应数据并重命名
375
+ row_extracts <- lapply(1:nrow(top3_colnames), function(i) {
376
+ bases <- top3_colnames[i, ]
377
+ if (all(is.na(bases))) {
378
+ empty_names <- unlist(lapply(1:3, function(j) paste0("Holder_", j, holder_fields)))
379
+ return(as.data.frame(matrix(NA, nrow = 1, ncol = length(empty_names),
380
+ dimnames = list(NULL, empty_names))))
381
+ }
382
+ bases <- bases[!is.na(bases)]
383
+ cols <- unlist(lapply(bases, function(base) paste0(gsub("_totalShares$", "", base), holder_fields)))
384
+ out <- df[i, cols, drop = FALSE]
385
+ new_names <- unlist(lapply(seq_along(bases), function(j) paste0("Holder_", j, holder_fields)))
386
+ names(out) <- new_names
387
+ full_names <- unlist(lapply(1:3, function(j) paste0("Holder_", j, holder_fields)))
388
+ for (nm in setdiff(full_names, names(out))) {
389
+ out[[nm]] <- NA
390
+ }
391
+ out <- out[ , full_names]
392
+ return(out)
393
+ })
394
+
395
+ top3_df <- do.call(rbind, row_extracts)
396
+
397
+ # Step 5: 删除原始 holder/fund 列并合并
398
+ df <- df[ , !grepl("^Holder_|^Fund_", names(df))]
399
+ df <- cbind(df, top3_df)
400
+
401
+ # Step 6: 删除无关字段
402
+ filter_out <- c(
403
+ "CurrencyCode", "CurrencyName", "CurrencySymbol", "CountryISO", "ISIN", "LEI", "Listings", "Officers", "LogoURL",
404
+ "ShortPercent", "ForwardAnnualDividendRate", "ForwardAnnualDividendYield", "DividendDate", "ExDividendDate",
405
+ "LastSplitFactor", "LastSplitDate", "Dividend_2021", "Dividend_2022", "Dividend_2023", "Dividend_2024", "Dividend_2025","DelistedDate","Category"
406
+ )
407
+ df <- df %>% select(-any_of(filter_out))
408
+
409
+ return(df)
410
+ }
411
+
412
+ ```
413
+
414
+ ```{r}
415
+
416
+ z1 = extract_top3_holders_clean(x1)
417
+ z2 = extract_top3_holders_clean(x2)
418
+ z3 = extract_top3_holders_clean(x3)
419
+ #z5 = extract_top3_holders_clean(x5)
420
+ z7 = extract_top3_holders_clean(x7)
421
+
422
+ filter_out <- c(
423
+ "CurrencyCode", "CurrencyName", "CurrencySymbol", "CountryISO", "ISIN", "LEI", "Listings", "Officers", "LogoURL",
424
+ "ShortPercent", "ForwardAnnualDividendRate", "ForwardAnnualDividendYield", "DividendDate", "ExDividendDate",
425
+ "LastSplitFactor", "LastSplitDate", "Dividend_2021", "Dividend_2022", "Dividend_2023", "Dividend_2024", "Dividend_2025", "DelistedDate","Category"
426
+ )
427
+ rm(list = ls(pattern = "^Fund_"))
428
+ x5 <- x5[ , !grepl("^Fund_", names(x5))]
429
+ rm(list = ls(pattern = "^Holder_"))
430
+ x5 <- x5[ , !grepl("^Holder_", names(x5))]
431
+ z5 = x5%>%
432
+ select(-any_of(filter_out))
433
+ z4 = x4 %>%
434
+ select(-any_of(filter_out))
435
+ z6 = x6 %>%
436
+ select(-any_of(filter_out))
437
+ z8 = x8 %>%
438
+ select(-any_of(filter_out))
439
+ z9 = x9 %>%
440
+ select(-any_of(filter_out))
441
+ z10 = x10 %>%
442
+ select(-any_of(filter_out))
443
+ ```
444
+
445
+ ```{r}
446
+ # 需要补齐的 Holder 列名
447
+ holder_fields <- c("_name", "_date", "_totalShares", "_currentShares")
448
+ required_holder_cols <- unlist(lapply(1:3, function(j) paste0("Holder_", j, holder_fields)))
449
+
450
+ # 获取 x1 到 x10 的数据框
451
+ data_list <- mget(paste0("z", 1:10))
452
+
453
+ # 用 lapply 批量补全
454
+ data_list_fixed <- lapply(data_list, function(df) {
455
+ missing_cols <- setdiff(required_holder_cols, names(df))
456
+ for (col in missing_cols) {
457
+ df[[col]] <- NA
458
+ }
459
+ return(df)
460
+ })
461
+
462
+ # 还原到环境中(x1 到 x10 被更新)
463
+ list2env(data_list_fixed, .GlobalEnv)
464
+
465
+
466
+ date_cols <- grep("_date$", names(z1), value = TRUE)
467
+
468
+ # 强制转换 z1/z2 的日期列为 Date 类型
469
+ for (col in date_cols) {
470
+ if (col %in% names(z1)) z1[[col]] <- as.Date(z1[[col]])
471
+ if (col %in% names(z2)) z2[[col]] <- as.Date(z2[[col]])
472
+ if (col %in% names(z3)) z3[[col]] <- as.Date(z3[[col]])
473
+ if (col %in% names(z4)) z4[[col]] <- as.Date(z4[[col]])
474
+ if (col %in% names(z5)) z5[[col]] <- as.Date(z5[[col]])
475
+ if (col %in% names(z6)) z6[[col]] <- as.Date(z6[[col]])
476
+ if (col %in% names(z7)) z7[[col]] <- as.Date(z7[[col]])
477
+ if (col %in% names(z8)) z8[[col]] <- as.Date(z8[[col]])
478
+ if (col %in% names(z9)) z9[[col]] <- as.Date(z9[[col]])
479
+ if (col %in% names(z10)) z10[[col]] <- as.Date(z10[[col]])
480
+ }
481
+
482
+
483
+ stock_meta_data <- rbind(z1, z2,z3,z4,z5,z6,z7,z8,z9,z10)
484
+ ```
485
+
486
+ # Filter Industry
487
+
488
+ ```{r}
489
+ ind = c("Other Industrial Metals & Mining", "Gold", "Oil & Gas E&P", "Other Precious Metals & Mining",
490
+ "Semiconductors", "Oil & Gas Equipment & Services", "Semiconductor Equipment & Materials",
491
+ "Metal Fabrication", "Oil & Gas Refining & Marketing", "Copper", "Oil & Gas Midstream",
492
+ "Thermal Coal", "Oil & Gas Integrated", "Uranium", "Silver", "Oil & Gas Drilling", "Metals & Mining",
493
+ "Oil & Gas", "Coal", "Oil & Gas Related Equipment and Services", "Oil, Gas & Consumable Fuels"
494
+ )
495
+
496
+ cleaned_data = stock_meta_data %>%
497
+ filter(Industry %in% ind)
498
+ write.csv(cleaned_data, "cleaned_stock_data.csv", row.names = FALSE)
499
+ ```
500
+
501
+ # Merge Financial
502
+
503
+ ```{r}
504
+ fin = read.csv("financials_filtered.csv")
505
+ stock_meta_data$PrimaryTicker <- paste0(stock_meta_data$Code, ".", stock_meta_data$ExchangeCode)
506
+
507
+ merged = stock_meta_data %>%
508
+ left_join(fin, by = c("PrimaryTicker" = "Ticker"))
509
+
510
+ merged_industry = merged %>%
511
+ filter(Industry %in% ind)
512
+ ```
513
+
514
+ # EDA
515
+
516
+ ```{r}
517
+ # 选择相关列
518
+ equity_cols <- paste0("totalStockholderEquity_", 2020:2024)
519
+ df_equity <- merged_industry[, c("PrimaryTicker", equity_cols)]
520
+
521
+ # 定义 CAGR 函数
522
+ calc_cagr <- function(start, end, years = 4) {
523
+ if (is.na(start) || is.na(end) || start <= 0) return(NA)
524
+ return((end / start)^(1 / years) - 1)
525
+ }
526
+
527
+ # 逐行计算 CAGR(2020~2024)
528
+ df_equity$Equity_CAGR_2020_2024 <- mapply(
529
+ calc_cagr,
530
+ df_equity$totalStockholderEquity_2020,
531
+ df_equity$totalStockholderEquity_2024
532
+ )
533
+
534
+ ```
535
+
536
+ ```{r}
537
+ library(tidyverse)
538
+
539
+ # 假设 df_equity 已包含 Ticker, totalStockholderEquity_2020~2024 和 CAGR 列
540
+ # 选出 CAGR 最大的前 5 家公司
541
+ top5 <- df_equity %>%
542
+ arrange(desc(Equity_CAGR_2020_2024)) %>%
543
+ slice(1:5)
544
+
545
+ # 把数据转成长格式,适合 ggplot2 绘图
546
+ df_long <- top5 %>%
547
+ select(PrimaryTicker, starts_with("totalStockholderEquity_")) %>%
548
+ pivot_longer(
549
+ cols = -PrimaryTicker,
550
+ names_to = "Year",
551
+ values_to = "Equity"
552
+ ) %>%
553
+ mutate(Year = as.numeric(gsub("totalStockholderEquity_", "", Year)))
554
+
555
+ # 画折线图
556
+ ggplot(df_long, aes(x = Year, y = Equity, color = PrimaryTicker)) +
557
+ geom_line(size = 1) +
558
+ geom_point(size = 2) +
559
+ labs(
560
+ title = "Top 5 Companies by Equity CAGR (2020–2024)",
561
+ x = "Year", y = "Total Stockholder Equity"
562
+ ) +
563
+ theme_minimal() +
564
+ scale_y_continuous(labels = scales::comma)
565
+
566
+ ```