stock_meta_data / stock_metadata.Rmd
RichardVR's picture
Upload 2 files
9bfc7f3 verified
---
title: "Copper"
output: html_document
date: "2025-06-23"
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
library(readr)
library(tidyverse)
library(readxl)
library(ggplot2)
library(dplyr)
library(httr)
library(jsonlite)
```
```{r}
#
# res <- GET("https://eodhd.com/api/screener",
# query = list(
# industries = "Metals & Mining",
# query = "copper",
# api_token = " 5f3afd582bd7b4.95720069"
# ))
#
# companies <- fromJSON(content(res, "text"))
#
# copper = companies$data
```
```{r}
api_token <- "5f3afd582bd7b4.95720069"
```
```{r}
res <- GET("https://eodhd.com/api/exchanges-list", query = list(api_token = api_token))
exchange_list <- fromJSON(content(res, "text", encoding = "UTF-8"))
exchange_df <- as.data.frame(exchange_list)
# Step 2: Initialize storage for results
all_stocks <- list()
# Step 3: Loop over exchange codes and download stocks
for (exchange in exchange_df$Code) {
message("Fetching data for exchange: ", exchange)
res <- GET(
paste0("https://eodhd.com/api/exchange-symbol-list/", exchange),
query = list(api_token = api_token)
)
if (status_code(res) == 200) {
json_text <- content(res, "text", encoding = "UTF-8")
stock_df <- tryCatch(
{
read.csv(text = json_text)
},
error = function(e) {
message("Skipping ", exchange, " due to error: ", e$message)
return(NULL)
}
)
if (!is.null(stock_df) && nrow(stock_df) > 0) {
stock_df$ExchangeCode <- exchange
all_stocks[[length(all_stocks) + 1]] <- stock_df
}
} else {
message("Failed to fetch data for ", exchange, " (", status_code(res), ")")
}
Sys.sleep(1) # avoid rate limit
}
# Step 3.5: Force all columns to character for binding
all_stocks_clean <- lapply(all_stocks, function(df) {
if (is.data.frame(df)) {
df[] <- lapply(df, as.character) # convert every column to character
return(df)
} else {
return(NULL)
}
})
all_stocks_clean <- Filter(Negate(is.null), all_stocks_clean) # remove NULLs
# Step 4: Combine cleaned data frames
stock_metadata_df <- dplyr::bind_rows(all_stocks_clean)
# Optional: Save or view
View(stock_metadata_df)
# write.csv(stock_metadata_df, "global_stock_metadata.csv", row.names = FALSE)
```
# Stock MetaData
```{r}
merge_fields_deep <- function(base_row, ...) {
lists <- list(...)
for (list_item in lists) {
if (is.null(list_item)) next
for (field in names(list_item)) {
value <- list_item[[field]]
if (is.list(value)) {
value <- paste(unlist(value), collapse = ", ")
}
base_row[[field]] <- value
}
}
return(base_row)
}
extract_institution_fields <- function(holder_list) {
if (is.null(holder_list$Institutions)) return(list())
holders <- holder_list$Institutions
fields <- list()
for (i in seq_along(holders)) {
h <- holders[[i]]
prefix <- paste0("Holder_", i, "_")
fields[[paste0(prefix, "name")]] <- h$name
fields[[paste0(prefix, "date")]] <- h$date
fields[[paste0(prefix, "totalShares")]] <- h$totalShares
fields[[paste0(prefix, "currentShares")]] <- h$currentShares
fields[[paste0(prefix, "change_p")]] <- h$change_p
}
return(fields)
}
extract_fund_fields <- function(holder_list) {
if (is.null(holder_list$Funds)) return(list())
funds <- holder_list$Funds
fields <- list()
for (i in seq_along(funds)) {
f <- funds[[i]]
prefix <- paste0("Fund_", i, "_")
fields[[paste0(prefix, "name")]] <- f$name
fields[[paste0(prefix, "date")]] <- f$date
fields[[paste0(prefix, "totalShares")]] <- f$totalShares
fields[[paste0(prefix, "currentShares")]] <- f$currentShares
fields[[paste0(prefix, "change_p")]] <- f$change_p
}
return(fields)
}
extract_dividend_fields <- function(div_list) {
if (is.null(div_list) || length(div_list) == 0) return(list())
div_raw <- lapply(div_list, function(x) {
if (!is.null(x$Year) && !is.null(x$Count)) {
data.frame(Year = as.integer(x$Year), Count = as.integer(x$Count))
} else {
NULL
}
})
div_df <- do.call(rbind, div_raw)
if (is.null(div_df) || nrow(div_df) == 0) return(list())
# 保留近5年(最多到 2024
current_year <- as.integer(format(Sys.Date(), "%Y"))
target_years <- (current_year - 4):current_year
div_df <- div_df[div_df$Year %in% target_years, ]
if (nrow(div_df) == 0) return(list()) # ✅ 加这一句!
out <- setNames(as.list(div_df$Count), paste0("Dividend_", div_df$Year))
return(out)
}
# 主循环开始
stock_only <- stock_metadata_df %>%
filter(Type %in% c("Common Stock", "Preferred Stock", "ETF"))
#stocks_subset <- head(stock_only, 5)
stocks_subset <- stock_only[94334:96494, ] %>%
filter(Type == "Common Stock")
#stocks_subset$Code <- sprintf("%06d", as.integer(stocks_subset$Code))
#stocks_subset$Code <- as.character(stocks_subset$Code)
#stocks_subset <- tail(stocks_subset)
#stocks_subset <- stock_only[50000:nrow(stock_only), ]
#stocks_subset <- stock_only
enriched_data <- list()
for (i in 1:nrow(stocks_subset)) {
symbol <- as.character(stocks_subset[i, "Code"])
exchange <- as.character(stocks_subset[i, "ExchangeCode"])
full_symbol <- paste0(symbol, ".", gsub(" ", "", exchange))
message(sprintf("🔄 Processing %d / %d: %s", i, nrow(stocks_subset), full_symbol))
url <- paste0("https://eodhd.com/api/fundamentals/", full_symbol,
"?api_token=", api_token)
res <- tryCatch(GET(url), error = function(e) NULL)
if (!is.null(res) && status_code(res) == 200) {
json_data <- tryCatch(fromJSON(content(res, "text", encoding = "UTF-8")), error = function(e) NULL)
if (!is.null(json_data)) {
# 提取三部分数据
inst_fields <- extract_institution_fields(json_data$Holders)
fund_fields <- extract_fund_fields(json_data$Holders)
dividend_data <- extract_dividend_fields(json_data$SplitsDividends$NumberDividendsByYear)
merged_fields <- c(
json_data$General,
json_data$Highlights,
json_data$Valuation,
json_data$SharesStats,
json_data$Technicals,
json_data$SplitsDividends[names(json_data$SplitsDividends) != "NumberDividendsByYear"],
dividend_data,
inst_fields,
fund_fields
)
enriched_row <- merge_fields_deep(stocks_subset[i, ], merged_fields)
enriched_data[[length(enriched_data) + 1]] <- enriched_row
} else {
message("⚠️ No JSON data for ", full_symbol)
}
} else {
message("❌ Failed request for ", full_symbol)
}
Sys.sleep(1) # 避免过快请求
}
# 合并输出结果
if (length(enriched_data) > 0) {
result_df <- bind_rows(enriched_data)
} else {
warning("No data enriched.")
result_df <- data.frame()
}
#View(result_df)
#write.csv(result_df, "metadata(1-1w).csv", row.names = FALSE)
#write.csv(result_df, "metadata(1w-2w).csv", row.names = FALSE)
#write.csv(result_df, "metadata_Taiwan(94324:96485).csv", row.names = FALSE)
```
```{r}
w1 = read_csv("metadata(1-1w).csv")
w2 = read_csv("metadata(1w-2w).csv")
w3 = read_csv("metadata(2w-4w).csv")
w4 = read_csv("metadata(4w-6w).csv")
w5 = read_csv("metadata(6w-8w).csv")
w6 = read_csv("metadata(8w-82080).csv")
w7 = read_csv("metadata(85250-94323).csv")
w8 = read_csv("metadata(96486-end).csv")
w9 = read_csv("metadata_China.csv")
w10 = read_csv("metadata_Taiwan.csv")
x1 = w1 %>% group_by(Industry) %>%
filter(Type == "Common Stock")%>%
summarize(total_count = n())
x2 = w2 %>% group_by(Industry) %>%
filter(Type == "Common Stock")%>%
summarize(total_count = n())
x3 = w3 %>% group_by(Industry) %>%
filter(Type == "Common Stock")%>%
summarize(total_count = n())
x4 = w4 %>% group_by(Industry) %>%
filter(Type == "Common Stock")%>%
summarize(total_count = n())
x5 = w5 %>% group_by(Industry) %>%
filter(Type == "Common Stock")%>%
summarize(total_count = n())
x6 = w6 %>% group_by(Industry) %>%
filter(Type == "Common Stock")%>%
summarize(total_count = n())
x7 = w7 %>% group_by(Industry) %>%
filter(Type == "Common Stock")%>%
summarize(total_count = n())
x8 = w8 %>% group_by(Industry) %>%
filter(Type == "Common Stock")%>%
summarize(total_count = n())
x9 = w9 %>% group_by(Industry) %>%
filter(Type == "Common Stock")%>%
summarize(total_count = n())
x10 = w10 %>% group_by(Industry) %>%
filter(Type == "Common Stock")%>%
summarize(total_count = n())
```
```{r}
yy = rbind(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10)
yy = yy %>%
group_by(Industry) %>%
summarize(count = sum(total_count)) %>%
arrange(desc(count))
write.csv(yy, "Industry_Count.csv", row.names = FALSE)
```
```{r}
x1 = w1 %>% group_by(Industry) %>%
filter(Type == "Common Stock")
x2 = w2 %>% group_by(Industry) %>%
filter(Type == "Common Stock")
x3 = w3 %>% group_by(Industry) %>%
filter(Type == "Common Stock")
x4 = w4 %>% group_by(Industry) %>%
filter(Type == "Common Stock")
x5 = w5 %>% group_by(Industry) %>%
filter(Type == "Common Stock")
x6 = w6 %>% group_by(Industry) %>%
filter(Type == "Common Stock")
x7 = w7 %>% group_by(Industry) %>%
filter(Type == "Common Stock")
x8 = w8 %>% group_by(Industry) %>%
filter(Type == "Common Stock")
x9 = w9 %>% group_by(Industry) %>%
filter(Type == "Common Stock")
x10 = w10 %>% group_by(Industry) %>%
filter(Type == "Common Stock")
# write.csv(x1, "metadata_1.csv", row.names = FALSE)
# write.csv(x2, "metadata_2.csv", row.names = FALSE)
# write.csv(x3, "metadata_3.csv", row.names = FALSE)
# write.csv(x4, "metadata_4.csv", row.names = FALSE)
# write.csv(x5, "metadata_5.csv", row.names = FALSE)
# write.csv(x6, "metadata_6.csv", row.names = FALSE)
# write.csv(x7, "metadata_7.csv", row.names = FALSE)
# write.csv(x8, "metadata_8.csv", row.names = FALSE)
# write.csv(x9, "metadata_9.csv", row.names = FALSE)
# write.csv(x10, "metadata_10.csv", row.names = FALSE)
```
## Clean Data
```{r}
extract_top3_holders_clean <- function(df) {
# Step 1: 找出 totalShares 列
share_cols <- grep("^Holder_\\d+_totalShares$", names(df), value = TRUE)
holder_fields <- c("_name", "_date", "_totalShares", "_currentShares")
# Step 2: 找出每行中最大的 3 个 holder
top3_indices <- apply(df[ , share_cols, drop = FALSE], 1, function(row) {
non_na <- which(!is.na(row))
if (length(non_na) == 0) return(rep(NA, 3))
top <- order(row[non_na], decreasing = TRUE)[1:min(3, length(non_na))]
return(non_na[top])
})
# Step 3: 抽出列名
top3_colnames <- lapply(top3_indices, function(idxs) {
if (all(is.na(idxs))) return(rep(NA, 3))
return(share_cols[idxs])
})
top3_colnames <- as.data.frame(do.call(rbind, top3_colnames), stringsAsFactors = FALSE)
# Step 4: 抽取每行 holder 对应数据并重命名
row_extracts <- lapply(1:nrow(top3_colnames), function(i) {
bases <- top3_colnames[i, ]
if (all(is.na(bases))) {
empty_names <- unlist(lapply(1:3, function(j) paste0("Holder_", j, holder_fields)))
return(as.data.frame(matrix(NA, nrow = 1, ncol = length(empty_names),
dimnames = list(NULL, empty_names))))
}
bases <- bases[!is.na(bases)]
cols <- unlist(lapply(bases, function(base) paste0(gsub("_totalShares$", "", base), holder_fields)))
out <- df[i, cols, drop = FALSE]
new_names <- unlist(lapply(seq_along(bases), function(j) paste0("Holder_", j, holder_fields)))
names(out) <- new_names
full_names <- unlist(lapply(1:3, function(j) paste0("Holder_", j, holder_fields)))
for (nm in setdiff(full_names, names(out))) {
out[[nm]] <- NA
}
out <- out[ , full_names]
return(out)
})
top3_df <- do.call(rbind, row_extracts)
# Step 5: 删除原始 holder/fund 列并合并
df <- df[ , !grepl("^Holder_|^Fund_", names(df))]
df <- cbind(df, top3_df)
# Step 6: 删除无关字段
filter_out <- c(
"CurrencyCode", "CurrencyName", "CurrencySymbol", "CountryISO", "ISIN", "LEI", "Listings", "Officers", "LogoURL",
"ShortPercent", "ForwardAnnualDividendRate", "ForwardAnnualDividendYield", "DividendDate", "ExDividendDate",
"LastSplitFactor", "LastSplitDate", "Dividend_2021", "Dividend_2022", "Dividend_2023", "Dividend_2024", "Dividend_2025","DelistedDate","Category"
)
df <- df %>% select(-any_of(filter_out))
return(df)
}
```
```{r}
z1 = extract_top3_holders_clean(x1)
z2 = extract_top3_holders_clean(x2)
z3 = extract_top3_holders_clean(x3)
#z5 = extract_top3_holders_clean(x5)
z7 = extract_top3_holders_clean(x7)
filter_out <- c(
"CurrencyCode", "CurrencyName", "CurrencySymbol", "CountryISO", "ISIN", "LEI", "Listings", "Officers", "LogoURL",
"ShortPercent", "ForwardAnnualDividendRate", "ForwardAnnualDividendYield", "DividendDate", "ExDividendDate",
"LastSplitFactor", "LastSplitDate", "Dividend_2021", "Dividend_2022", "Dividend_2023", "Dividend_2024", "Dividend_2025", "DelistedDate","Category"
)
rm(list = ls(pattern = "^Fund_"))
x5 <- x5[ , !grepl("^Fund_", names(x5))]
rm(list = ls(pattern = "^Holder_"))
x5 <- x5[ , !grepl("^Holder_", names(x5))]
z5 = x5%>%
select(-any_of(filter_out))
z4 = x4 %>%
select(-any_of(filter_out))
z6 = x6 %>%
select(-any_of(filter_out))
z8 = x8 %>%
select(-any_of(filter_out))
z9 = x9 %>%
select(-any_of(filter_out))
z10 = x10 %>%
select(-any_of(filter_out))
```
```{r}
# 需要补齐的 Holder 列名
holder_fields <- c("_name", "_date", "_totalShares", "_currentShares")
required_holder_cols <- unlist(lapply(1:3, function(j) paste0("Holder_", j, holder_fields)))
# 获取 x1 到 x10 的数据框
data_list <- mget(paste0("z", 1:10))
# 用 lapply 批量补全
data_list_fixed <- lapply(data_list, function(df) {
missing_cols <- setdiff(required_holder_cols, names(df))
for (col in missing_cols) {
df[[col]] <- NA
}
return(df)
})
# 还原到环境中(x1 到 x10 被更新)
list2env(data_list_fixed, .GlobalEnv)
date_cols <- grep("_date$", names(z1), value = TRUE)
# 强制转换 z1/z2 的日期列为 Date 类型
for (col in date_cols) {
if (col %in% names(z1)) z1[[col]] <- as.Date(z1[[col]])
if (col %in% names(z2)) z2[[col]] <- as.Date(z2[[col]])
if (col %in% names(z3)) z3[[col]] <- as.Date(z3[[col]])
if (col %in% names(z4)) z4[[col]] <- as.Date(z4[[col]])
if (col %in% names(z5)) z5[[col]] <- as.Date(z5[[col]])
if (col %in% names(z6)) z6[[col]] <- as.Date(z6[[col]])
if (col %in% names(z7)) z7[[col]] <- as.Date(z7[[col]])
if (col %in% names(z8)) z8[[col]] <- as.Date(z8[[col]])
if (col %in% names(z9)) z9[[col]] <- as.Date(z9[[col]])
if (col %in% names(z10)) z10[[col]] <- as.Date(z10[[col]])
}
stock_meta_data <- rbind(z1, z2,z3,z4,z5,z6,z7,z8,z9,z10)
```
# Filter Industry
```{r}
ind = c("Other Industrial Metals & Mining", "Gold", "Oil & Gas E&P", "Other Precious Metals & Mining",
"Semiconductors", "Oil & Gas Equipment & Services", "Semiconductor Equipment & Materials",
"Metal Fabrication", "Oil & Gas Refining & Marketing", "Copper", "Oil & Gas Midstream",
"Thermal Coal", "Oil & Gas Integrated", "Uranium", "Silver", "Oil & Gas Drilling", "Metals & Mining",
"Oil & Gas", "Coal", "Oil & Gas Related Equipment and Services", "Oil, Gas & Consumable Fuels"
)
cleaned_data = stock_meta_data %>%
filter(Industry %in% ind)
write.csv(cleaned_data, "cleaned_stock_data.csv", row.names = FALSE)
```
# Merge Financial
```{r}
fin = read.csv("financials_filtered.csv")
stock_meta_data$PrimaryTicker <- paste0(stock_meta_data$Code, ".", stock_meta_data$ExchangeCode)
merged = stock_meta_data %>%
left_join(fin, by = c("PrimaryTicker" = "Ticker"))
merged_industry = merged %>%
filter(Industry %in% ind)
```
# EDA
```{r}
# 选择相关列
equity_cols <- paste0("totalStockholderEquity_", 2020:2024)
df_equity <- merged_industry[, c("PrimaryTicker", equity_cols)]
# 定义 CAGR 函数
calc_cagr <- function(start, end, years = 4) {
if (is.na(start) || is.na(end) || start <= 0) return(NA)
return((end / start)^(1 / years) - 1)
}
# 逐行计算 CAGR(2020~2024
df_equity$Equity_CAGR_2020_2024 <- mapply(
calc_cagr,
df_equity$totalStockholderEquity_2020,
df_equity$totalStockholderEquity_2024
)
```
```{r}
library(tidyverse)
# 假设 df_equity 已包含 Ticker, totalStockholderEquity_2020~2024 和 CAGR 列
# 选出 CAGR 最大的前 5 家公司
top5 <- df_equity %>%
arrange(desc(Equity_CAGR_2020_2024)) %>%
slice(1:5)
# 把数据转成长格式,适合 ggplot2 绘图
df_long <- top5 %>%
select(PrimaryTicker, starts_with("totalStockholderEquity_")) %>%
pivot_longer(
cols = -PrimaryTicker,
names_to = "Year",
values_to = "Equity"
) %>%
mutate(Year = as.numeric(gsub("totalStockholderEquity_", "", Year)))
# 画折线图
ggplot(df_long, aes(x = Year, y = Equity, color = PrimaryTicker)) +
geom_line(size = 1) +
geom_point(size = 2) +
labs(
title = "Top 5 Companies by Equity CAGR (2020–2024)",
x = "Year", y = "Total Stockholder Equity"
) +
theme_minimal() +
scale_y_continuous(labels = scales::comma)
```