blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c123ba248c982d697044339cfc2309f938b6e351
|
2b2de3eaf02e36eb373afcdfc85b1b788fbbe6f6
|
/codes/combined analysis/analysis.R
|
c8ce472fa47f2b6c98a3c538f13540e88bdf0b7d
|
[] |
no_license
|
rheofur/CS492-Team3-
|
f55c3d4100290e134bddb001e138e6b355cbddd6
|
ffcc9a542153851fde72da0ba61c822e4eaf4773
|
refs/heads/master
| 2022-11-10T00:01:47.599948
| 2020-06-27T11:37:26
| 2020-06-27T11:37:26
| 275,173,469
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,613
|
r
|
analysis.R
|
library(tidyverse)
library(dplyr)
library(ggplot2)
library(reshape2)
library(lubridate)
library(zoo)
library(forecast)
############################
#fishery analysis by time#
############################
for (year in 2010:2020) {
#transform into viable form
fishery_time <- fishery %>%
filter(time %/% 100 == year) %>%
melt(1:6) %>%
group_by(species, time) %>%
summarize(value = sum(value[variable=="price_live" | variable=="price_fresh" | variable=="price_frozen"], na.rm=TRUE) /
sum(value[variable=="amount_live" | variable=="amount_fresh" | variable=="amount_frozen"], na.rm=TRUE)) %>%
dcast(formula=species~time, value="value")
fishery_time <- fishery_time[rowSums((!is.finite(as.matrix(fishery_time[, -1])))) == 0, ]
#cluster analysis
pmatrix <- fishery_time[, -1]
pmatrix <- pmatrix %>% t %>% scale %>% t %>% scale
finite <- rowSums((!is.finite(pmatrix))) == 0
pmatrix <- pmatrix[finite, ]
d=dist(pmatrix, method="euclidean")
#hierarchial clutering
pfit <- hclust(d, method="ward.D")
groups <- cutree(pfit, k=5)
clusters.h <- data.frame(fishery_time[finite, 1], group = groups)
#k-means clustering
pclusters <- kmeans(pmatrix, 5, nstart=100, iter.max=100)
groups <- pclusters$cluster
clusters.k <- data.frame(fishery_time[finite, 1], group = groups)
#correlation analysis
fishery_time_corr <- fishery_time[finite, -1]
rownames(fishery_time_corr) <- fishery_time$species[finite]
cormat <- round(cor(t(fishery_time_corr)), 2)
pfit <- hclust(as.dist(1-cormat), method="ward.D")
groups <- cutree(pfit, k=5)
cormat <- cormat[pfit$order, pfit$order]
cormat.melt <- melt(cormat)
corrplot <- ggplot(data=cormat.melt) + aes(x=Var1, y=Var2, fill=value) + geom_tile() + scale_fill_gradient2(low="slateblue", mid="black", high="coral") + theme(text=element_text(size=10), axis.text.x=element_text(angle=90))
clusters.corr <- data.frame(species=fishery_time$species[finite], group=groups)
#data output
write.csv(clusters.h, paste0("data/", year, "_hierarchial_clustering.ratio.csv"), row.names=FALSE)
write.csv(clusters.k, paste0("data/", year, "_kmeans_clustering.ratio.csv"), row.names=FALSE)
write.csv(clusters.corr, paste0("data/", year, "_correlation_clustering.ratio.csv"), row.names=FALSE)
write.csv(cormat.melt, paste0("data/", year, "_correlation.ratio.csv"), row.names=FALSE)
ggsave(paste0("data/", year, "_correlation_plot.ratio.png"), corrplot)
}
########################################
#comparison of two fishery statistics#
########################################
fishery %>%
filter(species=="민꽃게" | species == "참홍어") %>%
group_by(species, time) %>%
summarize(value = sum(price_live, price_fresh, price_frozen, na.rm=TRUE)/sum(amount_live, amount_fresh, amount_frozen, na.rm=TRUE)) %>%
ggplot() + aes(x=as.yearmon(paste0(time%/%100, "-", time%%100)), y=value, color=species) + geom_line()
##########################
#trade analysis by time#
##########################
for (year in 2020) {
#transform into viable form
trade_time <- trade %>%
filter(time %/% 100 == year) %>%
group_by(product, time) %>%
summarize(value = sum(price, na.rm=TRUE)/sum(amount, na.rm=TRUE)) %>%
dcast(formula=product~time, value="value")
trade_time <- trade_time[rowSums((!is.finite(as.matrix(trade_time[, -1])))) == 0, ]
#cluster analysis
pmatrix <- trade_time[, -1]
pmatrix <- pmatrix %>% t %>% scale %>% t %>% scale
finite <- rowSums((!is.finite(pmatrix))) == 0
pmatrix <- pmatrix[finite, ]
d=dist(pmatrix, method="euclidean")
#hierarchial clutering
pfit <- hclust(d, method="ward.D")
groups <- cutree(pfit, k=5)
clusters.h <- data.frame(trade_time[finite, 1], group = groups)
#k-means clustering
pclusters <- kmeans(pmatrix, 2, nstart=100, iter.max=100)
groups <- pclusters$cluster
clusters.k <- data.frame(trade_time[finite, 1], group = groups)
#correlation analysis
trade_time_corr <- trade_time[finite, -1]
rownames(trade_time_corr) <- trade_time$product[finite]
cormat <- round(cor(t(trade_time_corr)), 2)
pfit <- hclust(as.dist(1-cormat), method="ward.D")
groups <- cutree(pfit, k=5)
cormat <- cormat[pfit$order, pfit$order]
cormat.melt <- melt(cormat)
corrplot <- ggplot(data=cormat.melt) + aes(x=Var1, y=Var2, fill=value) + geom_tile() + scale_fill_gradient2(low="slateblue", mid="black", high="coral") + theme(text=element_text(size=10), axis.text.x=element_text(angle=90))
clusters.corr <- data.frame(product=trade_time$product[finite], group=groups)
#data output
write.csv(clusters.h, paste0("data/trade/", year, "_hierarchial_clustering.ratio.csv"), row.names=FALSE)
write.csv(clusters.k, paste0("data/trade/", year, "_kmeans_clustering.ratio.csv"), row.names=FALSE)
write.csv(clusters.corr, paste0("data/trade/", year, "_correlation_clustering.ratio.csv"), row.names=FALSE)
write.csv(cormat.melt, paste0("data/trade/", year, "_correlation.ratio.csv"), row.names=FALSE)
ggsave(paste0("data/", year, "_correlation_plot.ratio.png"), corrplot)
}
######################################
#comparison of two trade statistics#
######################################
trade %>%
filter(product =="꽃게" | product == "대하") %>%
group_by(species, time) %>%
summarize(value = sum(price, na.rm=TRUE)) %>%
ggplot() + aes(x=as.yearmon(paste0(time%/%100, "-", time%%100)), y=value, color=species) + geom_line()
##############################
#wholesale analysis by time#
##############################
for (year in 2013:2019) {
#transform into viable form
wholesale_time <- wholesale %>%
filter(year(time) == year) %>%
group_by(product, time = year(time) * 100 + month(time)) %>%
summarize(value = sum(amount, na.rm=TRUE)) %>%
dcast(formula=product~time, value="value")
wholesale_time <- wholesale_time[rowSums((!is.finite(as.matrix(wholesale_time[, -1])))) == 0, ]
#cluster analysis
pmatrix <- wholesale_time[, -1]
pmatrix <- pmatrix %>% t %>% scale %>% t %>% scale
finite <- rowSums((!is.finite(pmatrix))) == 0
pmatrix <- pmatrix[finite, ]
d=dist(pmatrix, method="euclidean")
#hierarchial clutering
pfit <- hclust(d, method="ward.D")
groups <- cutree(pfit, k=5)
clusters.h <- data.frame(wholesale_time[finite, 1], group = groups)
#k-means clustering
pclusters <- kmeans(pmatrix, 5, nstart=100, iter.max=100)
groups <- pclusters$cluster
clusters.k <- data.frame(wholesale_time[finite, 1], group = groups)
#correlation analysis
wholesale_time_corr <- wholesale_time[finite, -1]
rownames(wholesale_time_corr) <- wholesale_time$product[finite]
cormat <- round(cor(t(wholesale_time_corr)), 2)
pfit <- hclust(as.dist(1-cormat), method="ward.D")
groups <- cutree(pfit, k=5)
cormat <- cormat[pfit$order, pfit$order]
cormat.melt <- melt(cormat)
corrplot <- ggplot(data=cormat.melt) + aes(x=Var1, y=Var2, fill=value) + geom_tile() + scale_fill_gradient2(low="slateblue", mid="black", high="coral") + theme(text=element_text(size=10), axis.text.x=element_text(angle=90))
clusters.corr <- data.frame(product=wholesale_time$product[finite], group=groups)
#data output
write.csv(clusters.h, paste0("data/", year, "_hierarchial_clustering.amount.csv"), row.names=FALSE)
write.csv(clusters.k, paste0("data/", year, "_kmeans_clustering.amount.csv"), row.names=FALSE)
write.csv(clusters.corr, paste0("data/", year, "_correlation_clustering.amount.csv"), row.names=FALSE)
write.csv(cormat.melt, paste0("data/", year, "_correlation.amount.csv"), row.names=FALSE)
ggsave(paste0("data/", year, "_correlation_plot.amount.png"), corrplot)
}
#########################################
#comparison of two wholesale statistics#
#########################################
wholesale %>%
filter(product=="꽃게" | product == "대하", time %/% 100 == 2019) %>%
group_by(product, time) %>%
summarize(value = sum(price_live, price_fresh, price_frozen, na.rm=TRUE)) %>%
ggplot() + aes(x=as.yearmon(paste0(time%/%100, "-", time%%100)), y=value, color=product) + geom_line()
############################
#fishery analysis by year#
############################
#transform into viable form
fishery_time <- fishery %>%
melt(1:6) %>%
group_by(species, time=time%/%100) %>%
summarize(value = mean(value[variable=="amount_live" | variable=="amount_fresh" | variable=="amount_frozen"], na.rm=TRUE)) %>%
dcast(formula=species~time, value="value")
fishery_time <- fishery_time[rowSums((!is.finite(as.matrix(fishery_time[, -1])))) == 0, ]
#cluster analysis
pmatrix <- fishery_time[, -1]
pmatrix <- pmatrix %>% t %>% scale %>% t %>% scale
finite <- rowSums((!is.finite(pmatrix))) == 0
pmatrix <- pmatrix[finite, ]
d=dist(pmatrix, method="euclidean")
#hierarchial clutering
pfit <- hclust(d, method="ward.D")
groups <- cutree(pfit, k=5)
clusters.h <- data.frame(fishery_time[finite, 1], group = groups)
#k-means clustering
pclusters <- kmeans(pmatrix, 5, nstart=100, iter.max=100)
groups <- pclusters$cluster
clusters.k <- data.frame(fishery_time[finite, 1], group = groups)
#correlation analysis
fishery_time_corr <- fishery_time[finite, -1]
rownames(fishery_time_corr) <- fishery_time$species[finite]
cormat <- round(cor(t(fishery_time_corr)), 2)
pfit <- hclust(as.dist(1-cormat), method="ward.D")
groups <- cutree(pfit, k=5)
cormat <- cormat[pfit$order, pfit$order]
cormat.melt <- melt(cormat)
corrplot <- ggplot(data=cormat.melt) + aes(x=Var1, y=Var2, fill=value) + geom_tile() + scale_fill_gradient2(low="slateblue", mid="black", high="coral") + theme(text=element_text(size=10), axis.text.x=element_text(angle=90))
clusters.corr <- data.frame(species=fishery_time$species[finite], group=groups)
#data output
write.csv(clusters.h, paste0("data/hierarchial_clustering.amount.csv"), row.names=FALSE)
write.csv(clusters.k, paste0("data/kmeans_clustering.amount.csv"), row.names=FALSE)
write.csv(clusters.corr, paste0("data/correlation_clustering.amount.csv"), row.names=FALSE)
write.csv(cormat.melt, paste0("data/correlation.amount.csv"), row.names=FALSE)
ggsave(paste0("data/correlation_plot.amount.png"), corrplot)
##########################
#trade analysis by year#
##########################
#transform into viable form
trade_time <- trade %>%
filter(import.export == "수입") %>%
group_by(product, time=date%%100) %>%
summarize(value = sum(amount, na.rm=TRUE)) %>%
dcast(formula=product~time, value="value")
trade_time <- trade_time[rowSums((!is.finite(as.matrix(trade_time[, -1])))) == 0, ]
#cluster analysis
pmatrix <- trade_time[, -1]
pmatrix <- pmatrix %>% t %>% scale %>% t %>% scale
finite <- rowSums((!is.finite(pmatrix))) == 0
pmatrix <- pmatrix[finite, ]
d=dist(pmatrix, method="euclidean")
#hierarchial clutering
pfit <- hclust(d, method="ward.D")
groups <- cutree(pfit, k=5)
clusters.h <- data.frame(trade_time[finite, 1], group = groups)
#k-means clustering
pclusters <- kmeans(pmatrix, 5, nstart=100, iter.max=100)
groups <- pclusters$cluster
clusters.k <- data.frame(trade_time[finite, 1], group = groups)
#correlation analysis
trade_time_corr <- trade_time[finite, -1]
rownames(trade_time_corr) <- trade_time$product[finite]
cormat <- round(cor(t(trade_time_corr)), 2)
pfit <- hclust(as.dist(1-cormat), method="ward.D")
groups <- cutree(pfit, k=5)
cormat <- cormat[pfit$order, pfit$order]
cormat.melt <- melt(cormat)
corrplot <- ggplot(data=cormat.melt) + aes(x=Var1, y=Var2, fill=value) + geom_tile() + scale_fill_gradient2(low="slateblue", mid="black", high="coral") + theme(text=element_text(size=10), axis.text.x=element_text(angle=90))
clusters.corr <- data.frame(product=trade_time$product[finite], group=groups)
#data output
write.csv(clusters.h, paste0("data/hierarchial_clustering.amount.import.csv"), row.names=FALSE)
write.csv(clusters.k, paste0("data/kmeans_clustering.amount.import.csv"), row.names=FALSE)
write.csv(clusters.corr, paste0("data/correlation_clustering.amount.import.csv"), row.names=FALSE)
write.csv(cormat.melt, paste0("data/correlation.amount.import.csv"), row.names=FALSE)
ggsave(paste0("data/correlation_plot.amount.import.png"), corrplot)
##############################
#wholesale analysis by time#
##############################
#transform into viable form
wholesale_time <- wholesale %>%
group_by(product, time = year(as.Date(time))) %>%
summarize(value = sum(price, na.rm=TRUE)/sum(amount, na.rm=TRUE)) %>%
dcast(formula=product~time, value="value")
wholesale_time <- wholesale_time[rowSums((!is.finite(as.matrix(wholesale_time[, -1])))) == 0, ]
#cluster analysis
pmatrix <- wholesale_time[, -1]
pmatrix <- pmatrix %>% t %>% scale %>% t %>% scale
finite <- rowSums((!is.finite(pmatrix))) == 0
pmatrix <- pmatrix[finite, ]
d=dist(pmatrix, method="euclidean")
#hierarchial clutering
pfit <- hclust(d, method="ward.D")
groups <- cutree(pfit, k=5)
clusters.h <- data.frame(wholesale_time[finite, 1], group = groups)
#k-means clustering
pclusters <- kmeans(pmatrix, 5, nstart=100, iter.max=100)
groups <- pclusters$cluster
clusters.k <- data.frame(wholesale_time[finite, 1], group = groups)
#correlation analysis
wholesale_time_corr <- wholesale_time[finite, -1]
rownames(wholesale_time_corr) <- wholesale_time$product[finite]
cormat <- round(cor(t(wholesale_time_corr)), 2)
pfit <- hclust(as.dist(1-cormat), method="ward.D")
groups <- cutree(pfit, k=5)
cormat <- cormat[pfit$order, pfit$order]
cormat.melt <- melt(cormat)
corrplot <- ggplot(data=cormat.melt) + aes(x=Var1, y=Var2, fill=value) + geom_tile() + scale_fill_gradient2(low="slateblue", mid="black", high="coral") + theme(text=element_text(size=10), axis.text.x=element_text(angle=90))
clusters.corr <- data.frame(product=wholesale_time$product[finite], group=groups)
#data output
write.csv(clusters.h, paste0("data/hierarchial_clustering.ratio.csv"), row.names=FALSE)
write.csv(clusters.k, paste0("data/kmeans_clustering.ratio.csv"), row.names=FALSE)
write.csv(clusters.corr, paste0("data/correlation_clustering.ratio.csv"), row.names=FALSE)
write.csv(cormat.melt, paste0("data/correlation.ratio.csv"), row.names=FALSE)
ggsave(paste0("data/correlation_plot.ratio.png"), corrplot)
#########################
#Wholesale vs. fishery#
#########################
codeMapping <- read.csv("fisherycode_standardcode.csv")
wholesale_compare <- wholesale %>%
group_by(code=productcode, time=year(date)*100+month(date)) %>%
summarize(amount_wholesale=sum(amount, na.rm=TRUE),
price_wholesale=sum(price, na.rm=TRUE)) %>%
ungroup
fishery_compare <- fishery %>%
melt(1:6) %>%
group_by(fishery_code=speciescode, time) %>%
summarize(amount_fishery=sum(value[variable=="amount_live" | variable=="amount_fresh" | variable=="amount_frozen"], na.rm=TRUE),
price_fishery=sum(value[variable=="price_live" | variable=="price_fresh" | variable=="price_frozen"], na.rm=TRUE)) %>%
left_join(codeMapping) %>%
ungroup %>%
select(code, time, amount_fishery, price_fishery)
compare <- left_join(wholesale_compare, fishery_compare)
View(compare)
for(c in 630204:630206) {
values <- compare %>%
filter(code==c) %>%
group_by(time = as.yearmon(paste0(time%/%100, "-", time%%100))) %>%
summarize(amount1 = sum(amount_wholesale, na.rm=TRUE),
price1 = sum(price_wholesale, na.rm=TRUE),
ratio1 = sum(price_wholesale, na.rm=TRUE)/sum(amount_wholesale, na.rm=TRUE),
amount2 = sum(amount_fishery, na.rm=TRUE),
price2 = sum(price_fishery, na.rm=TRUE),
ratio2 = sum(price_fishery, na.rm=TRUE)/sum(amount_fishery, na.rm=TRUE),)
p1 <- ggplot(data=values) +
aes(x=ratio1, y=ratio2) +
geom_point() +
stat_smooth(method="lm") +
xlab("wholesale") + ylab("fishery") +
ggtitle(code$소분류명[which(code$수산물품목코드==c)])
p2 <- ggplot(data=values, aes(x=time)) +
geom_line(aes(y=ratio1, color="도매")) +
geom_line(aes(y=ratio2, color="생산")) +
ylab("price") +
ggtitle(code$소분류명[which(code$수산물품목코드==c)])
print(code$소분류명[which(code$수산물품목코드==c)])
cat("\n")
print(c(amount=cor(values$amount1, values$amount2),
price=cor(values$price1, values$price2),
ratio=cor(values$ratio1, values$ratio2)))
ggsave(paste0(code$소분류명[which(code$수산물품목코드==c)],"_도매_생산.ratio1.png"), p1)
ggsave(paste0(code$소분류명[which(code$수산물품목코드==c)],"_도매_생산.ratio2.png"), p2)
}
######################
#Wholesale vs. sale#
######################
wholesale_compare <- wholesale %>%
group_by(code=productcode, time=as.Date(date)) %>%
summarize(amount_wholesale=sum(amount, na.rm=TRUE),
price_wholesale=sum(price, na.rm=TRUE)) %>%
ungroup
sale_compare <- sale %>%
group_by(code=as.character(수산물표준코드), time=as.Date(위판일자)) %>%
summarize(amount_sale=sum(물량.KG.*수량, na.rm=TRUE),
price_sale=sum(총.판매액, na.rm=TRUE)) %>%
ungroup
compare <- left_join(wholesale_compare, sale_compare)
View(compare)
for(c in 630204:630212) {
values <- compare %>%
filter(code==c) %>%
group_by(time = as.yearmon(paste0(year(time), "-", month(time)))) %>%
summarize(amount1 = sum(amount_wholesale, na.rm=TRUE),
price1 = sum(price_wholesale, na.rm=TRUE),
ratio1 = sum(price_wholesale, na.rm=TRUE)/sum(amount_wholesale, na.rm=TRUE),
amount2 = sum(amount_sale, na.rm=TRUE),
price2 = sum(amount_sale, na.rm=TRUE),
ratio2 = sum(price_sale, na.rm=TRUE)/sum(amount_sale, na.rm=TRUE),)
p1 <- ggplot(data=values) +
aes(x=ratio1, y=ratio2) +
geom_point() +
stat_smooth(method="lm") +
xlab("wholesale") + ylab("fishery") +
ggtitle(code$소분류명[which(code$수산물품목코드==c)])
p2 <- ggplot(data=values, aes(x=time)) +
geom_line(aes(y=ratio1, color="도매")) +
geom_line(aes(y=ratio2, color="생산")) +
ylab("price") +
ggtitle(code$소분류명[which(code$수산물품목코드==c)])
print(code$소분류명[which(code$수산물품목코드==c)])
cat("\n")
print(c(amount=cor(values$amount1, values$amount2),
price=cor(values$price1, values$price2),
ratio=cor(values$ratio1, values$ratio2)))
ggsave(paste0(code$소분류명[which(code$수산물품목코드==c)],"_도매_생산.ratio1.png"), p1)
ggsave(paste0(code$소분류명[which(code$수산물품목코드==c)],"_도매_생산.ratio2.png"), p2)
}
########################
#Wholesale vs. import#
########################
crabbo <- "왕게"
wholesale_compare <- wholesale[wholesale$product=="왕게", ] %>%
group_by(time=as.yearmon(date)) %>%
summarize(amount_wholesale=sum(amount, na.rm=TRUE),
price_wholesale=sum(price, na.rm=TRUE)) %>%
ungroup
trade_compare <- trade[grep(crabbo, trade$product), ] %>%
filter(import.export=="수입") %>%
group_by(time=as.yearmon(paste0(date%/%100, "-", date%%100)), country) %>%
summarize(amount_trade=sum(amount, na.rm=TRUE),
price_trade=sum(price, na.rm=TRUE)) %>%
ungroup
compare <- left_join(trade_compare, wholesale_compare)
values <- compare %>%
group_by(time) %>%
summarize(amount1 = sum(amount_wholesale, na.rm=TRUE),
price1 = sum(price_wholesale, na.rm=TRUE),
ratio1 = sum(price_wholesale, na.rm=TRUE)/sum(amount_wholesale, na.rm=TRUE),
amount2 = sum(amount_trade, na.rm=TRUE),
price2 = sum(amount_trade, na.rm=TRUE),
ratio2 = sum(price_trade, na.rm=TRUE)/sum(amount_trade, na.rm=TRUE),)
p1 <- ggplot(data=values) +
aes(x=ratio1, y=ratio2) +
geom_point() +
stat_smooth(method="lm") +
xlab("wholesale") + ylab("import") +
ggtitle(crabbo)
p2 <- ggplot(data=values, aes(x=time)) +
geom_line(aes(y=price1, color="도매")) +
geom_line(aes(y=price2, color="수입")) +
ylab("price") +
ggtitle(crabbo)
print(c(amount=cor(values$amount1, values$amount2),
price=cor(values$price1, values$price2),
ratio=cor(values$amount1, values$amount2)))
print(p1)
print(p2)
ggsave(paste0(crabbo,"_도매_수입.price1.png"), p1)
ggsave(paste0(crabbo,"_도매_수입.amount2.png"), p2)
################################
#wholesale analysis by market#
################################
wholesale %>%
filter(substr(productcode, 1, 4) %in% c(6302, 7302)) %>%
group_by(market, productcode) %>%
summarize(amount=sum(amount,na.rm=TRUE), price=sum(price,na.rm=TRUE), ratio=sum(price,na.rm=TRUE)/sum(amount,na.rm=TRUE)) %>%
ungroup %>%
mutate(productcode = sapply(productcode, function(x) code$name[which(x==code$code)] %>% as.character)) %>%
rename(product = productcode) %>%
group_by(market) %>%
top_n(5, wt=ratio) %>% write.csv("도매시장 주요판매품종별 판매금액_양비율.csv", quote=FALSE, row.names=FALSE)
ggplot(aes(y=market, weight=ratio, fill=product)) + geom_bar(position="fill")
################################
#wholesale analysis by location#
################################
wholesale %>%
filter(substr(productcode, 1, 4) %in% c(6302, 7302), !is.na(location)) %>%
group_by(location, productcode) %>%
summarize(amount=sum(amount,na.rm=TRUE), price=sum(price,na.rm=TRUE), ratio=sum(price,na.rm=TRUE)/sum(amount,na.rm=TRUE)) %>%
top_n(5, wt=amount) %>%
group_by(productcode) %>%
arrange(amount) %>% write.csv("품종별 주요 산지.csv", quote=FALSE, row.names=FALSE)
ggplot(aes(y=productcode, weight=ratio, fill=location)) + geom_bar(position="fill")
########################################
#predicting future wholesale#
########################################
wholesale_predict <- wholesale %>%
filter(productcode == 630205) %>%
group_by(time=as.yearmon(date)) %>%
summarize(amount = sum(amount, na.rm=TRUE), price = sum(price, na.rm=TRUE),
ratio = sum(price, na.rm=TRUE)/sum(amount, na.rm=TRUE))
amount <- ts(wholesale_predict$amount)
price <- ts(wholesale_predict$price, start = c(2013,1), frequency=12)
ratio <- ts(wholesale_predict$ratio, start = c(2013,1), frequency=12)
autoplot(forecast(amount))
autoplot(forecast(price))
autoplot(forecast(ratio))
|
b99a33cc0a9f770490df0a88100e2da24df0857a
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkWidgetCreatePangoLayout.Rd
|
d32b9f6c2ed1dcfd96433d85f53d6f76e736bcdc
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 892
|
rd
|
gtkWidgetCreatePangoLayout.Rd
|
\alias{gtkWidgetCreatePangoLayout}
\name{gtkWidgetCreatePangoLayout}
\title{gtkWidgetCreatePangoLayout}
\description{Creates a new \code{\link{PangoLayout}} with the appropriate font map,
font description, and base direction for drawing text for
this widget.}
\usage{gtkWidgetCreatePangoLayout(object, text)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkWidget}}}
\item{\verb{text}}{text to set on the layout (can be \code{NULL})}
}
\details{If you keep a \code{\link{PangoLayout}} created in this way around, in order to
notify the layout of changes to the base direction or font of this
widget, you must call \code{\link{pangoLayoutContextChanged}} in response to
the \verb{"style-set"} and \verb{"direction-changed"} signals
for the widget.}
\value{[\code{\link{PangoLayout}}] the new \code{\link{PangoLayout}}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
4088560a795d8ecfd69893368a45b14243d8780c
|
cafb8b630cbdcf5197e3093e9981044a0fcdd05e
|
/randomForest/step3-model-testing.R
|
0d85f20d78bb6b9dd6cba73e6710370b0707e918
|
[] |
no_license
|
yejingyu0628/ML
|
6d1d8bfd4f99cd1d3e4424d5dd33406d3f3cd31c
|
f706de31c207a564fd1d65f1628a6b7c8f9d27cb
|
refs/heads/master
| 2021-12-14T23:32:56.406762
| 2017-06-27T15:50:50
| 2017-06-27T15:50:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,371
|
r
|
step3-model-testing.R
|
rm(list=ls())
load('rf_output.Rdata')
load('ML_RF_input.Rdata')
library(randomForest)
library(ROCR)
require(Hmisc)
predictor_data=t(testing_data)
predictor_data[1:4,1:4]
dim(predictor_data)
RF_predictor_names=rownames(rf_output$importance)
predictor_data=predictor_data[,RF_predictor_names]
predictor_data[1:4,1:4]
dim(predictor_data)
RF_predictions_responses=predict(rf_output, predictor_data, type="response")
RF_predictions_votes=predict(rf_output, predictor_data, type="vote")
head(RF_predictions_responses)
head(RF_predictions_votes)
# RFS: In cancer, the length of time after primary treatment for a cancer ends
# that the patient survives without any signs or symptoms of that cancer.
# In a clinical trial, measuring the RFS is one way to see how well a new treatment works.
# Also called DFS, disease-free survival, and relapse-free survival.
head(testing_clinical)
clindata=testing_clinical[,c('event.rfs','time.rfs')]
clindata_plusRF=cbind(clindata,RF_predictions_responses,RF_predictions_votes)
dim(clindata_plusRF)
clindata_plusRF=clindata_plusRF[! is.na(clindata_plusRF$event.rfs) ,]
dim(clindata_plusRF)
head(clindata_plusRF)
save(clindata_plusRF,file='predictor_output.Rdata')
confusion=table(clindata_plusRF[,c("event.rfs","RF_predictions_responses")])
rownames(confusion)=c("NoRelapse","Relapse")
confusion
sensitivity=(confusion[2,2]/(confusion[2,2]+confusion[2,1]))*100
sensitivity
specificity=(confusion[1,1]/(confusion[1,1]+confusion[1,2]))*100
specificity
overall_error=((confusion[1,2]+confusion[2,1])/sum(confusion))*100
overall_error
overall_accuracy=((confusion[1,1]+confusion[2,2])/sum(confusion))*100
overall_accuracy
class1_error=confusion[1,2]/(confusion[1,1]+confusion[1,2])
class1_error
class2_error=confusion[2,1]/(confusion[2,2]+confusion[2,1])
class2_error
# Create variables for the known target class and predicted class probabilities.
target=clindata_plusRF[,"event.rfs"]
target[target==1]="Relapse"
target[target==0]="NoRelapse"
relapse_scores=clindata_plusRF[,"Relapse"]
# First calculate the AUC value.
pred=prediction(relapse_scores,target)
perf_AUC=performance(pred,"auc")
AUC=perf_AUC@y.values[[1]]
AUC_out=paste("AUC=",AUC,sep="")
# Then, plot the actual ROC curve.
perf_ROC=performance(pred,"tpr","fpr")
plot(perf_ROC, main="ROC plot")
text(0.5,0.5,paste("AUC = ",format(AUC, digits=5, scientific=FALSE)))
|
757381d18e25cdac880b6e5d9d9306c4eb9bf4ad
|
7ccf63998aff4e0520cc8199f75a379eb4c3a3f5
|
/man/addConf.Rd
|
254a0d9dae70d90db319e23342884238a9ccef89
|
[
"MIT"
] |
permissive
|
Peder2911/evallib
|
aca579c93d03f70b63df912fbcf905f1b64c7da5
|
6a191226fdfb1a53a3ccf03e2ebb6a9f745cd5cc
|
refs/heads/master
| 2020-08-02T06:31:40.592736
| 2019-12-17T13:35:52
| 2019-12-17T13:35:52
| 211,264,306
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 194
|
rd
|
addConf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metrics.R
\name{addConf}
\alias{addConf}
\title{???}
\usage{
addConf(data, predicted, actual)
}
\description{
???
}
|
7e6313e235fb4988ed188e6f704a2c5ed859b892
|
28d74b88eae5145e4a4f1be6f5110b3641be4462
|
/data-raw/PrepExampleData.R
|
033fd333739c92e81e93fa9f5b3283eac9055d0c
|
[] |
no_license
|
tmcd82070/Rdistance
|
844a1fd051ed0d717937995af331c95aaef59bc2
|
3953b2be5c1fd222a9e833d5f020f97ddf1b5c58
|
refs/heads/master
| 2023-08-31T16:09:29.949221
| 2023-08-29T05:29:24
| 2023-08-29T05:29:24
| 10,951,200
| 7
| 6
| null | 2020-08-14T22:02:13
| 2013-06-25T21:46:54
|
R
|
UTF-8
|
R
| false
| false
| 10,678
|
r
|
PrepExampleData.R
|
# Code used to prepare the example songbird datasets in Rdistance
# Jason Carlisle
# Last updated 6/19/2017
# Creating two example datasets, each will include two tables:
# 1) Line transect example (BRSP)
# a detection data.frame with at least $dist, $siteID, and $groupsize
# a site data.frame with at least $siteID and $length (and we're adding some other covariates)
# 2) Point transect example (SATH)
# a detection data.frame with at least $dist, $siteID, and $groupsize
# a site data.frame with at least $siteID (and we're adding some other covariates)
# Load required packages
# require(Rdistance) # build from development
require(sp)
require(rgdal)
require(rgeos)
require(raster)
require(RODBC)
require(lubridate)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#
# PART 1: PREPARE THE DETECTIONS DATASETS (RAW BIRD COUNT DATA) -----
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#
# Data sources
# Connect to SQL Server database on CarlisleMule Desktop
# chan <- odbcDriverConnect(connection='driver={SQL Server Native Client 11.0}; server=CARLISLEMULE,1433;
# database=LineTransectData; Uid=jasontemp; Pwd=temppassword', believeNRows=FALSE)
# Path to Access databases
path <- "C:/Users/jcarlisle/Google Drive/Dissertation/Data/"
tchan <- odbcConnectAccess2007(access.file=paste0(path, "Umbrella_MasterData_LineTransect_25May2017.accdb"))
pchan <- odbcConnectAccess2007(access.file=paste0(path, "Umbrella_MasterData_PointCount_25May2017.accdb"))
# Read in dataframe of observations on transects that don't have "Z" in the transect ID
# We surveyed 8 "Z" transects which were sited differently and are not being analyzed here
dB <- sqlQuery(tchan, query="SELECT * FROM [Avian_Data] WHERE TranID NOT LIKE 'Z%'")
dB$Comments <- NULL # drop comments columns
dB <- dB[order(dB$TranID), ] # order by transect ID
# Keep only Brewer's Sparrow data from 2012
dB <- dB[dB$Spp=="BRSP" & dB$Year==2012, ]
# Keep only necessary columns and rename them to Rdistance conventions
dB <- dB[, c("TranID", "Number", "SightDist", "SightAngle")]
names(dB) <- c("siteID", "groupsize", "sightdist", "sightangle")
# Compute perpendicular, off-transect distances from the observer's sight distance and angle
dB$dist <- round(perp.dists(s.dist="sightdist", s.angle="sightangle", data=dB), 1)
# Plot histogram
hist(dB$dist, breaks=25, col="dark gray", main="Sparrow Distances", xlab="Distance (m)")
# SATH point count data
# Read in dataframe of observations on transects that don't have "Z" in the transect ID
dS <- sqlQuery(pchan, query="SELECT * FROM [Point_Data]")
dS$Comments <- NULL # drop comments columns
dS <- dS[order(dS$PtID), ] # order by transect ID
# Keep only Sage Thrasher data from 2013
dS <- dS[dS$Spp=="SATH" & dS$Year==2013, ]
# Keep only necessary columns and rename them to Rdistance conventions
dS <- dS[, c("PtID", "GrpSize", "Dist")]
names(dS) <- c("siteID", "groupsize", "dist")
# Plot histogram
hist(dS$dist, breaks=10, col="dark gray", main="Thrasher Distances", xlab="Distance (m)")
abline(0, 1)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#
# PART 2: PREPARE THE SITE DATASETS (TRANSECT- (OR POINT-) LEVEL DATA) -----
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#
# Transect shapefile
trandir <- "D:/GIS/Field_GIS_Data/x2012" # External drive
# trandir <- "C:/Users/Jason/Box Sync/GIS/Field_GIS_Data/x2012" # ThinkPad Laptop
# trandir <- "C:/Users/jcarlis3/Box Sync/GIS/Field_GIS_Data/x2012" # CarlisleMule Desktop
# Point count shapefile
ptdir <- "D:/GIS/Field_GIS_Data/x2013/TrtPointCounts" # External drive
# Homer raster data
rastdir <- "D:/UWyo/Ch3_FocalRaster/Oct2016/CovariateRasters/Veg0s" # External drive
# rastdir <- "C:/GIS_data/HomerData/ProcessedRasters" # ThinkPad Laptop
# rastdir <- "F:/umbrella_GIS/LandcoverData/usgs_wyo_sagebrush_jan2012/ProcessedRasters"# CarlisleMule Desktop
# BRSP line transect data
# Read in transect information recorded while surveying (from SQL database)
tB <- sqlQuery(tchan, query="SELECT * FROM [Avian_Effort] WHERE TranID NOT LIKE 'Z%'")
tB <- tB[order(tB$TranID), ] # order by transect ID
# Keep only the 2012 surveys
tB <- tB[year(tB$SampDateStart) == 2012, ]
# Add transect length (all 500 m)
tB$length <- 500
# SATH point transect data
tS <- sqlQuery(pchan, query="SELECT * FROM [Point_Effort]")
tS <- tS[order(tS$PtID), ] # order by transect ID
# Keep only the 2013 surveys
tS <- tS[tS$Year == 2013, ]
tB <- droplevels(tB)
tS <- droplevels(tS)
odbcCloseAll()
# Keep only necessary columns and rename
tB <- tB[, c("TranID", "Observer", "length")]
names(tB) <- c("siteID", "observer", "length")
tS <- tS[, c("PtID", "Observer")]
names(tS) <- c("siteID", "observer")
# Rename observers
levels(tB$observer) <- c("obs1", "obs2", "obs3", "obs4", "obs5")
levels(tS$observer) <- c("obs1", "obs2", "obs3", "obs4", "obs5", "obs6")
table(tB$observer)
table(tS$observer)
# Reorder columns
tB <- tB[, c("siteID", "length", "observer")]
# Calculate some covariates with GIS
# Read in transect locations
trans <- readOGR(dsn=trandir, layer="Transects_atts_wylam")
length(trans)
trans@data[ncol(trans@data)] <- NULL # get rid of last column with redundant info
trans@data # look at attribute table
# drop 8 transects that start with "Z"
trans <- trans[!grepl(pattern="Z", x=trans@data$TransectID), ]
trans@data[grepl(pattern="Z", x=trans@data$TransectID), ] # check that "Z"s gone
length(trans) # should now be 72
# Read in point count locations
pts <- readOGR(dsn=ptdir, layer="PointCountLocations_surveyed")
plot(pts)
length(pts)
pts@data
# Project points from NAD 83 to Wylam
proj4string(trans)
proj4string(pts)
pts <- spTransform(pts, proj4string(trans))
identicalCRS(pts, trans)
# Find all .img raster files in the raster directory that were processed to have masked values==0
(dlist <- dir(path=rastdir, pattern="0s.img$"))
(dlist <- dlist[c(1, 4, 7, 8)]) # keep only the ones I'm interested in
# Loop through the list and read in the rasters
rlist <- list()
for(i in 1:length(dlist)){
rlist<- c(rlist, raster(paste(rastdir, dlist[i], sep="/")))
}
# Stack the rasters. They must have the exact same properties (proj, origin, res, extent)
rs <- stack(rlist)
names(rs) # Double check all rasters of interest made it into the stack
#plot(rs)
# Crop down to a 5km buffer from transects
rs <- crop(x=rs, y=gBuffer(trans, width=5000))
plot(rs)
# Create a 100m buffer around each transect line
polys <- gBuffer(trans, width=100, capStyle="FLAT", byid=TRUE)
plot(polys[1:4, ]); plot(trans, add=TRUE) # plot a few to see
# Take the mean of each raster within the buffered polygon of each line
xmean <- extract(x=rs, y=polys, fun=mean, df=TRUE) # mean
# Give columns meaningful names and append to one data.frame
names(xmean)[2:ncol(xmean)] <- c("bare", "herb", "shrub", "height")
xmean[2:ncol(xmean)] <- round(xmean[2:ncol(xmean)], 1)
plot(xmean)
# Append to data from shapefile attribute table to make covariate data.frame
(sdf <- trans@data)
(sdf <- data.frame(cbind(sdf, xmean), row.names=NULL)) # append, they're already in the right order
sdf$ID <- NULL # remove ID field after ensuring match with TranID order
sdf$Tier <- sdf$Core <- NULL # extra cols
# Merge to existing transect data
tB <- merge(tB, sdf, by.x="siteID", by.y="TransectID")
rm(sdf)
tB <- droplevels(tB) # drops unused factor levels in sdf (specifically TranID)
# Add categorical sagebrush cover (low being less than 10%)
tB$shrubclass <- "Low"
tB[tB$shrub >= 10, "shrubclass"] <- "High"
tB$shrubclass <- factor(tB$shrubclass, levels=c("Low", "High"))
# For points
# Take the mean of each raster within 100 m of each point
xmean <- extract(x=rs, y=pts, fun=mean, buffer=100, df=TRUE) # mean
# Give columns meaningful names and append to one data.frame
names(xmean)[2:ncol(xmean)] <- c("bare", "herb", "shrub", "height")
xmean[2:ncol(xmean)] <- round(xmean[2:ncol(xmean)], 1)
plot(xmean)
# Append to data from shapefile attribute table to make covariate data.frame
(sdf <- pts@data)
(sdf <- data.frame(cbind(sdf, xmean), row.names=NULL)) # append, they're already in the right order
sdf$ID <- NULL # remove ID field after ensuring match with TranID order
# Merge to existing transect data
tS <- merge(tS, sdf, by.x="siteID", by.y="Pt_ID")
rm(sdf)
tS <- droplevels(tS)
tS <- tS[, c("siteID", "observer", "bare", "herb", "shrub", "height")]
# Make sure pt sites has full complement of site levels
length(levels(tS$siteID))
levels(tS$siteID)
length(levels(dS$siteID))
levels(dS$siteID)
table(tS$siteID)
table(dS$siteID)
dS$siteID <- factor(dS$siteID)
length(levels(dS$siteID))
# The two levels with no detections, add back as levels
levels(dS$siteID) <- c(levels(dS$siteID), "C1X03", "C1X07")
table(dS$siteID)
# Rename
sparrow.detections <- data.frame(dB, row.names=NULL)
sparrow.sites <- data.frame(tB, row.names=NULL)
thrasher.detections <- data.frame(dS, row.names=NULL)
thrasher.sites <- data.frame(tS, row.names=NULL)
# Erase everything but the detection and transect datasets
rm(list=setdiff(ls(), c("sparrow.detections", "sparrow.sites",
"thrasher.detections", "thrasher.sites")))
# Save .rda files to package directory
save(sparrow.detections, file="C:/R_Code/Rdistance/data/sparrow.detections.rda")
save(sparrow.sites, file="C:/R_Code/Rdistance/data/sparrow.sites.rda")
save(thrasher.detections, file="C:/R_Code/Rdistance/data/thrasher.detections.rda")
save(thrasher.sites, file="C:/R_Code/Rdistance/data/thrasher.sites.rda")
# Added in Sept 2017 for version 2.0 release
# Rename objects and .rda files
load("C:/R_Code/Rdistance/data/sparrow.detections.rda")
sparrowDetectionData <- sparrow.detections
save(sparrowDetectionData, file="C:/R_Code/Rdistance/data/sparrowDetectionData.rda")
load("C:/R_Code/Rdistance/data/sparrow.sites.rda")
sparrowSiteData <- sparrow.sites
save(sparrowSiteData, file="C:/R_Code/Rdistance/data/sparrowSiteData.rda")
load("C:/R_Code/Rdistance/data/thrasher.detections.rda")
thrasherDetectionData <- thrasher.detections
save(thrasherDetectionData, file="C:/R_Code/Rdistance/data/thrasherDetectionData.rda")
load("C:/R_Code/Rdistance/data/thrasher.sites.rda")
thrasherSiteData <- thrasher.sites
save(thrasherSiteData, file="C:/R_Code/Rdistance/data/thrasherSiteData.rda")
# END
|
994959845c16c251ae92a626538a777627c90162
|
19fb44f07c65e2cd21727d32fb4cf39cf78537e7
|
/Scripts/R_Script/03_Data_Model.R
|
599a00a1d0302967df1103f2de75c0ecb3d8c3d8
|
[] |
no_license
|
tmcdevittgalles/Population_Dynamics
|
9c826b347f7d610e3d7385a538c891b21febeeda
|
e7ccaf723720a8b0c2e8a6592fcaccb0e7a1673f
|
refs/heads/master
| 2023-08-28T08:09:19.603339
| 2021-11-04T18:12:46
| 2021-11-04T18:12:46
| 401,440,431
| 0
| 0
| null | 2021-11-04T18:12:47
| 2021-08-30T18:05:50
|
R
|
UTF-8
|
R
| false
| false
| 16,392
|
r
|
03_Data_Model.R
|
##### Powell Center: Phenological patterns of mosquitoes #######
# Travis McDevitt-Galles
# 09/20/2021
# title: 03_Data_Model
# Improving the data model to include night v day sammpling
library(dplyr)
library(ggplot2)
library(patchwork)
library(rstan)
library(rstanarm)
library(matrixStats)
library(gamm4)
#Set working directory
setwd("C:/Users/tmcdevitt-galles/Documents/Population_dynamics")
### Loading in simple dataset
full.df <- read.csv("Data/simple_df.csv")
dim(full.df) ## 1923 X 24
names(full.df)
## Data structure
##
## Species: Aedes vexans
## Domain : D05 - great lakes
## Site: UNDE
## Plots: 11
## Years: 5 , 2014,2016,2017, 2018, 2019
## Unique sampling events : 1923
## adding julian date to data frame
full.df$Julian <- full.df$DOY+(full.df$Year-min(full.df$Year))*365
## modifying the dataset
stan.df <- full.df %>% filter(SubsetWeight >0 & TotalWeight >0 )
stan.df <- stan.df %>% group_by(SciName, Julian, DOY, Plot, Domain, Year, NorD) %>%
summarise(
Count = sum(Count),
TrapHours = sum(TrapHours),
SubsetWeight = sum( SubsetWeight),
TotalWeight = sum( TotalWeight ),
Tmin7 = mean(Tmin7),
PPT14 = mean(PPT14)
)
stan.df$Offset <- (stan.df$TrapHours/24) *
(stan.df$SubsetWeight/stan.df$TotalWeight)
stan.df %>%
ggplot(aes(x=DOY,y= Count*Offset ,color=as.factor(Year)) )+ geom_point()
stan.df %>%
ggplot(aes(x=Julian,y= log10(Count*Offset+1), color=as.factor(Year) ))+
geom_point()+ scale_color_brewer(palette="Set1", name="Year") +
theme_classic()+ylab("Mosquito Count")+ xlab("Julian date")+
theme( legend.key.size = unit(1.5, "cm"),
legend.title =element_text(size=14,margin = margin(r =10,unit = "pt")),
legend.text=element_text(size=14,margin = margin(r =10, unit = "pt")),
legend.position = "top",
axis.line.x = element_line(color="black") ,
axis.ticks.y = element_line(color="black"),
axis.ticks.x = element_line(color="black"),
axis.title.x = element_text(size = rel(1.8)),
axis.text.x = element_text(vjust=0.5, color = "black",size=14),
axis.text.y = element_text(vjust=0.5,color = "black",size=14),
axis.title.y = element_text(size = rel(1.8), angle = 90) ,
strip.text.x = element_text(size=20) )
## Creating weather data
weather.df <- read.csv("Data/simple_weather.csv")
weather.df$Julian <- weather.df$DOY+(weather.df$Year-min(weather.df$Year))*365
Weather <- model.matrix(~ scale(weather.df$PPT)*scale(weather.df$TMIN))
### Temperature
gam.temp <- gam( TMIN ~ s(Julian,k = 30),
data=weather.df, family="gaussian")
summary(gam.temp)
## lets try and plot this with new data
pred.df <- dplyr::select( weather.df, c("Julian"))
dum.df <- unique(pred.df)
weather.df$STemp<- predict(gam.temp, newdata = dum.df)
weather.df %>%
ggplot( aes(x=Julian, y=STemp) ) +
geom_point( aes(x= Julian,y=TMIN),size=1)+
geom_line(size=2,alpha=.75)+ theme_classic()
weather.df$mPPT <- caTools::runmean(weather.df$PPT,30)
plot(weather.df$Julian,weather.df$mPPT, type = "l")
plot(weather.df$Julian,weather.df$TMIN, type = "l")
plot(weather.df$Julian,weather.df$STemp, type = "l")
Weather <- model.matrix(~scale(weather.df$STemp)*scale(weather.df$mPPT))
data.m <- model.matrix(~ 0+ stan.df$NorD)
stan_d <- list( N = nrow(stan.df), P = ncol(Weather),
Time = nrow(Weather),
Julian = stan.df$Julian,
X= Weather, dP = ncol(data.m),
DX = data.m,
Y= stan.df$Count, offset = stan.df$Offset )
ar_output5 <- stan( 'Scripts/Stan/Ar_data.stan',
data=stan_d, iter = 4000,
control = list(max_treedepth = 10))
print(ar_output5, pars = c("rho", 'beta', "dBeta"))
traceplot(ar_output5)
post <- extract(ar_output5)
plot(x=1:nrow(weather.df),y =log10(exp(colMedians(as.matrix(post$m)))), type= "l")
points(x=stan.df$Julian, y= log10((stan.df$Count*stan.df$Offset+1)))
#saveRDS(ar_output5 ,"bayes_back.rds")
#ar_output5 <- readRDS("bayes_back.rds")
## plotting rho values
rho.df <- as.data.frame(post$rho)
colnames(rho.df) <- "Rho"
## plotting rho values
beta.df <- as.data.frame(post$beta)
colnames(beta.df) <- c("Intercept", "TMin", "PPT", "TMin:PPT")
beta.df <- cbind.data.frame(beta.df, rho.df)
beta.df <- tidyr::pivot_longer(beta.df, cols= 1:5, names_to="Parameters",
values_to = "Estimate")
ggplot(beta.df, aes(x=Parameters, y=Estimate))+ geom_violin(alpha=.5,fill="gray")+
geom_boxplot(color="black", width=.05, outlier.size = 0) +
geom_hline(yintercept = 0, size=1 ) +theme_classic()+
ylab("Estimate")+ xlab("Parameters")+
theme( legend.key.size = unit(1.5, "cm"),
legend.title =element_text(size=14,margin = margin(r =10,unit = "pt")),
legend.text=element_text(size=14,margin = margin(r =10, unit = "pt")),
legend.position = "top",
axis.line.x = element_line(color="black") ,
axis.ticks.y = element_line(color="black"),
axis.ticks.x = element_line(color="black"),
axis.title.x = element_text(size = rel(1.8)),
axis.text.x = element_text(vjust=0.5, color = "black",size=14),
axis.text.y = element_text(vjust=0.5,color = "black",size=14),
axis.title.y = element_text(size = rel(1.8), angle = 90) ,
strip.text.x = element_text(size=20) )
plot1.df <- beta.df %>% filter( Parameters =="PPT" |
Parameters == "TMin"|
Parameters == "TMin:PPT")
ggplot(plot1.df, aes(x=Parameters, y=Estimate))+ geom_violin(alpha=.5,fill="gray")+
geom_boxplot(color="black", width=.05, outlier.size = 0) +
geom_hline(yintercept = 0, size=1 ) +theme_classic()+
ylab("Estimate")+ xlab("Parameters")+
theme( legend.key.size = unit(1.5, "cm"),
legend.title =element_text(size=14,margin = margin(r =10,unit = "pt")),
legend.text=element_text(size=14,margin = margin(r =10, unit = "pt")),
legend.position = "top",
axis.line.x = element_line(color="black") ,
axis.ticks.y = element_line(color="black"),
axis.ticks.x = element_line(color="black"),
axis.title.x = element_text(size = rel(1.8)),
axis.text.x = element_text(vjust=0.5, color = "black",size=14),
axis.text.y = element_text(vjust=0.5,color = "black",size=14),
axis.title.y = element_text(size = rel(1.8), angle = 90) ,
strip.text.x = element_text(size=20) )
plot.df <- as.data.frame(colMedians(as.matrix(post$m)))
test <- colQuantiles(as.matrix(post$m), probs=c(0.025,0.975))
plot.df$Q_2.5 <- test[,1]
plot.df$Q_97.5 <- test[,2]
plot.df$Julian <- 1:nrow(plot.df)
colnames(plot.df)[1] <- "Count"
plot.df %>%
ggplot( aes(x=Julian, y=log10(exp(Count)+1))) +
geom_ribbon(aes(x=Julian, ymin= log10(exp(Q_2.5)+1),
ymax= log10(exp(Q_97.5)+1) ), fill ="grey",alpha=.7 )+
geom_line(color="black",alpha=.75, size=2)+
geom_point(data=stan.df, aes(x=Julian, y=log10(( Count /TrapHours) +1),
color=as.factor(Year)) )+
scale_color_brewer(palette="Set1", name="Year")+
ylab("log10(Predicted mosquito count)") +
xlab("Julian date")+ theme_classic()+
theme( legend.key.size = unit(1.5, "cm"),
legend.title =element_text(size=14,margin = margin(r =10,unit = "pt")),
legend.text=element_text(size=14,margin = margin(r =10, unit = "pt")),
legend.position = "top",
axis.line.x = element_line(color="black") ,
axis.ticks.y = element_line(color="black"),
axis.ticks.x = element_line(color="black"),
axis.title.x = element_text(size = rel(1.8)),
axis.text.x = element_text(vjust=0.5, color = "black",size=14),
axis.text.y = element_text(vjust=0.5,color = "black",size=14),
axis.title.y = element_text(size = rel(1.8), angle = 90) ,
strip.text.x = element_text(size=20) )
###
test <- colQuantiles(as.matrix(post$m), probs=c(0.025,0.975))
plot.df$Q_2.5 <- test[,1]
plot.df$Q_97.5 <- test[,2]
plot.df$Julian <- 1:nrow(plot.df)
colnames(plot.df)[1] <- "Count"
at1 <- plot.df %>% filter(Julian >800 & Julian < 1100)
stan1.df <- stan.df %>% filter(Julian >800 & Julian < 1100)
clor <- RColorBrewer::brewer.pal(1,"Set1")
at1 %>%
ggplot( aes(x=Julian, y=log10(exp(Count)+1))) +
geom_ribbon(aes(x=Julian, ymin= log10(exp(Q_2.5)+1),
ymax= log10(exp(Q_97.5)+1) ), fill ="grey",alpha=.7 )+
geom_line(color="black", alpha=.75, size=2) +
geom_point(data=stan1.df, aes(x=Julian, y=log10(( Count /TrapHours) +1),
color=as.factor(Year)) )+
scale_color_manual( values ="#377EB8", name="Year")+
ylab("log10(Predicted mosquito count)") + ylim(0,2.5)+
xlab("Julian date")+ theme_classic()+
theme( legend.key.size = unit(1.5, "cm"),
legend.title =element_text(size=14,margin = margin(r =10,unit = "pt")),
legend.text=element_text(size=14,margin = margin(r =10, unit = "pt")),
legend.position = "top",
axis.line.x = element_line(color="black") ,
axis.ticks.y = element_line(color="black"),
axis.ticks.x = element_line(color="black"),
axis.title.x = element_text(size = rel(1.8)),
axis.text.x = element_text(vjust=0.5, color = "black",size=14),
axis.text.y = element_text(vjust=0.5,color = "black",size=14),
axis.title.y = element_text(size = rel(1.8), angle = 90) ,
strip.text.x = element_text(size=20) )
plot.df %>%
ggplot( aes(x=Julian, y=log10((Count*Offset)+1))) +
geom_point(aes(x= Julian, y= Pred*.52),size=1)+
geom_point(color="red", size=.7)+ facet_wrap(~Plot, scales ="free")
plot.df %>%
ggplot( aes(x=Julian, y=log10((Count*Offset)+1), color = as.factor(Year))) +
geom_point(aes(x= Julian, y= Pred*Offset),size=2)+
geom_point(color="black", size=1.5,alpha=.5)+ylab("Mosquito Count") +
xlab("Julian date")
scale_color_brewer(palette = "Set1",
name="Predicted count year")+ theme_classic()+
theme( legend.key.size = unit(1.5, "cm"),
legend.title =element_text(size=14,margin = margin(r =10,unit = "pt")),
legend.text=element_text(size=14,margin = margin(r =10, unit = "pt")),
legend.position = "top",
axis.line.x = element_line(color="black") ,
axis.ticks.y = element_line(color="black"),
axis.ticks.x = element_line(color="black"),
axis.title.x = element_text(size = rel(1.8)),
axis.text.x = element_text(vjust=0.5, color = "black",size=14),
axis.text.y = element_text(vjust=0.5,color = "black",size=14),
axis.title.y = element_text(size = rel(1.8), angle = 90) ,
strip.text.x = element_text(size=20) )
plot.df %>%
ggplot( aes(x=Julian, y=log10(Count*Offset+1))) +
geom_ribbon(aes(x=Julian, ymin=0, ymax=(Q_97.5*Offset)))+
geom_point(color="red", size=1.5)+ facet_wrap(~Year, scales ="free")
plot.df %>%
ggplot( aes(x=exp(Q_97.5), y=exp(Pred))) +geom_point() +
geom_abline(slope=1, intercept=0)
### Removing a year
nRm.df <- stan.df %>% filter(Year != 2016)
stan_d <- list( N = nrow(nRm.df ), P = ncol(Weather),
Time = nrow(Weather),
Julian = nRm.df$Julian,
X= Weather,
Y= nRm.df$Count, offset = nRm.df$Offset )
ar_output5 <- stan( 'Scripts/Stan/Ar_take_3.stan',
data=stan_d, iter = 2000,
control = list(max_treedepth = 10))
print(ar_output5, pars = c("rho", 'beta'))
traceplot(ar_output5)
post <- extract(ar_output5)
plot(x=1:nrow(weather.df),y =exp(colMedians(as.matrix(post$m))), type= "l")
points(x=stan.df$Julian, y= log10((stan.df$Count*stan.df$Offset+1)))
saveRDS(ar_output5 ,"bayes_back.rds")
## plotting rho values
rho.df <- as.data.frame(post$rho)
colnames(rho.df) <- "Rho"
## plotting rho values
beta.df <- as.data.frame(post$beta)
colnames(beta.df) <- c("Intercept", "TMin", "PPT", "TMin:PPT")
beta.df <- cbind.data.frame(beta.df, rho.df)
beta.df <- tidyr::pivot_longer(beta.df, cols= 1:5, names_to="Parameters",
values_to = "Estimate")
ggplot(beta.df, aes(x=Parameters, y=Estimate))+ geom_violin(alpha=.5,fill="gray")+
geom_boxplot(color="black", width=.05, outlier.size = 0) +
geom_hline(yintercept = 0, size=1 ) +theme_classic()+
ylab("Estimate")+ xlab("Parameters")+
theme( legend.key.size = unit(1.5, "cm"),
legend.title =element_text(size=14,margin = margin(r =10,unit = "pt")),
legend.text=element_text(size=14,margin = margin(r =10, unit = "pt")),
legend.position = "top",
axis.line.x = element_line(color="black") ,
axis.ticks.y = element_line(color="black"),
axis.ticks.x = element_line(color="black"),
axis.title.x = element_text(size = rel(1.8)),
axis.text.x = element_text(vjust=0.5, color = "black",size=14),
axis.text.y = element_text(vjust=0.5,color = "black",size=14),
axis.title.y = element_text(size = rel(1.8), angle = 90) ,
strip.text.x = element_text(size=20) )
plot.df <- as.data.frame(colMedians(as.matrix(post$m)))
test <- colQuantiles(as.matrix(post$m), probs=c(0.025,0.975))
plot.df$Q_2.5 <- test[,1]
plot.df$Q_97.5 <- test[,2]
plot.df$Julian <- 1:nrow(plot.df)
colnames(plot.df)[1] <- "Count"
stan2.df <- stan.df %>% filter(Year != 2016)
plot.df %>%
ggplot( aes(x=Julian, y=log10(exp(Count)+1))) +
geom_ribbon(aes(x=Julian, ymin= log10(exp(Q_2.5)+1),
ymax= log10(exp(Q_97.5)+1) ), fill ="grey",alpha=.7 )+
geom_line(color="black",alpha=.75, size=2)+
geom_point(data=stan2.df, aes(x=Julian, y=log10(( Count /TrapHours) +1),
color=as.factor(Year)) )+
scale_color_brewer(palette="Set1", name="Year")+
ylab("log10(Predicted mosquito count)") +
xlab("Julian date")+ theme_classic()+
theme( legend.key.size = unit(1.5, "cm"),
legend.title =element_text(size=14,margin = margin(r =10,unit = "pt")),
legend.text=element_text(size=14,margin = margin(r =10, unit = "pt")),
legend.position = "top",
axis.line.x = element_line(color="black") ,
axis.ticks.y = element_line(color="black"),
axis.ticks.x = element_line(color="black"),
axis.title.x = element_text(size = rel(1.8)),
axis.text.x = element_text(vjust=0.5, color = "black",size=14),
axis.text.y = element_text(vjust=0.5,color = "black",size=14),
axis.title.y = element_text(size = rel(1.8), angle = 90) ,
strip.text.x = element_text(size=20) )
###
test <- colQuantiles(as.matrix(post$m), probs=c(0.025,0.975))
plot.df$Q_2.5 <- test[,1]
plot.df$Q_97.5 <- test[,2]
plot.df$Julian <- 1:nrow(plot.df)
colnames(plot.df)[1] <- "Count"
at1 <- plot.df %>% filter(Julian >800 & Julian < 1100)
stan1.df <- stan.df %>% filter(Julian >800 & Julian < 1100)
clor <- RColorBrewer::brewer.pal(1,"Set1")
at1 %>%
ggplot( aes(x=Julian, y=log10(exp(Count)+1))) +
geom_ribbon(aes(x=Julian, ymin= log10(exp(Q_2.5)+1),
ymax= log10(exp(Q_97.5)+1) ), fill ="grey",alpha=.7 )+
geom_line(color="black", alpha=.75, size=2) +
geom_point(data=stan1.df, aes(x=Julian, y=log10(( Count /TrapHours) +1),
color=as.factor(Year)) )+
scale_color_manual( values ="#377EB8", name="Year")+
ylab("log10(Predicted mosquito count)") +# ylim(0,2.5)+
xlab("Julian date")+ theme_classic()+
theme( legend.key.size = unit(1.5, "cm"),
legend.title =element_text(size=14,margin = margin(r =10,unit = "pt")),
legend.text=element_text(size=14,margin = margin(r =10, unit = "pt")),
legend.position = "top",
axis.line.x = element_line(color="black") ,
axis.ticks.y = element_line(color="black"),
axis.ticks.x = element_line(color="black"),
axis.title.x = element_text(size = rel(1.8)),
axis.text.x = element_text(vjust=0.5, color = "black",size=14),
axis.text.y = element_text(vjust=0.5,color = "black",size=14),
axis.title.y = element_text(size = rel(1.8), angle = 90) ,
strip.text.x = element_text(size=20) )
|
1c56622940397a110e3ca4a347248f80d05bc280
|
a885c93cc749ace48fbc5051ac7bbb6c16d862be
|
/server.R
|
d355c892c3ccbf16d50dc5b5c42df7ad6f141b9d
|
[] |
no_license
|
Prateek2690/UserDefinedFunctions
|
53b0d31d2d869ab341a7b22ee66a689b2ba33d79
|
ece5d8aadd812229edefbf428ea9eac5f9ede0c5
|
refs/heads/master
| 2021-01-17T19:25:17.039099
| 2016-07-18T17:00:48
| 2016-07-18T17:00:48
| 63,621,330
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,527
|
r
|
server.R
|
library(shiny)
#****************************
# SETTING UP SERVER INTERFACE
#****************************
shinyServer(
function(input, output){
# reactive Function will take the inputs fron UI.R and use them for read.table() to read the data from the file
# file$datapath -> gives the path of the file
data <- reactive({
file1 <- input$file
if(is.null(file1)){return()}
read.table(file = file1$datapath,
sep = input$sep,
header = input$header,
stringsAsFactors = input$stringsAsFactors)
})
# This reactive output contains the summary of the dataset
# This reactive ouput contains the dataset and display the dataset in table format
output$table <- renderTable({
if(is.null(data())){return()}
data()
})
# The following renderUI is used to dynamically generate the tabsets when the file is loaded. Until the file is loaded
output$tb <- renderUI({
tabsetPanel((tabPanel("About file", tableOutput(('fileff')), tabPanel("Data", tableOutput("table")), tabPanel("Summary"))))
})
# Output Goes Here
output$myname <- renderText(input$name)
output$myage <- renderText(input$age)
output$mygender <- renderText(input$gender)
# input goes here from UI
# input$name
# input$age
}
)
|
f66701c5e3488b6528dc7bfb43d76a60aa259813
|
f9217cba6730017dac596e467659253052fec0db
|
/Transaction_analysis.R
|
c72ff9804660a67ecefe5a1b39e8959894e122c6
|
[] |
no_license
|
xueyingwang/my_R
|
47224bc018cc3fafa5452a40bd543c9b959ec905
|
2100d79b482fdd0bf50ffa2f84d204fc37ba9fb7
|
refs/heads/master
| 2020-04-30T20:26:24.883350
| 2019-03-30T20:01:45
| 2019-03-30T20:01:45
| 177,066,497
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,726
|
r
|
Transaction_analysis.R
|
#Load file
library(stringr)
library(lubridate)
library(MASS)
?read.table
transaction = read.table("SupermarketTransactions.txt",sep="\t",header=F,stringsAsFactors = F)
colnames(transaction)=scan("SupermarketTransactionsColumnLabels.txt",what="character",sep="\t")
no_transaction=dim(transaction)[1]
new_amount_paid <- str_replace(transaction$`Amount paid`, pattern = '\\$', replacement ='')
?str_replace
transaction$`Amount paid`= as.numeric(new_amount_paid)
tax=read.table("SupermarketTransactionsTax.csv",row.names=1,sep=",",
header=T)
#1. How many transactions are made on Tuesday? Answer:1979
?as.Date
transaction$Weekday=weekdays(as.Date(transaction$`Purchase Date`,format='%m/%d/%Y'))
num_Tues=length(which(na.omit(weekday=="Tuesday")))
#2. What percentage of purchases (transactions) are made by male customers? Answer:49%
length(which(transaction$Gender=="M"))/no_transaction #6889/14059
paste(round(table(transaction$Gender)["M"] / dim(transaction)[1]*100,2),"%")
#3. What percentage of supermarket patrons are male? Answer: 50.52%
gender_and_id <- transaction[, c(3,4)]
unique_customers <- unique(gender_and_id)
length(which(unique_customers$Gender == 'M'))/length(unique(transaction$`Customer ID`))
#4. What is the average, median, and standard deviation of amount paid?
#average=$13.00; median=$11.25 Std=$8.22
mean(transaction$`Amount paid`)
median(transaction$`Amount paid`)
sd(transaction$`Amount paid`)
#5. How many transactions are made on Tuesday by male customers?
#968 transactions are made on Tuesday by male customers
length(which(transaction$Gender=="M"&transaction$Weekday=="Tuesday"))
#6. How many transactions that are made on Tuesday by male customers exceed 5 units per transaction?
#117 transactions
length(which(transaction$Gender=="M"&transaction$Weekday=="Tuesday"&transaction$`Units Sold`>5))
#7. Create a contingency table between gender and annual income.
table(transaction$Gender,transaction$`Annual Income`)
# $10K - $30K $110K - $130K $130K - $150K $150K + $30K - $50K $50K - $70K $70K - $90K $90K - $110K
#F 1587 307 390 140 2243 1224 959 320
#M 1503 336 370 133 2358 1146 750 293
#8. How many unique customer IDs are in the data set? Answer:5404 unique IDs
num_patrons=length(unique(transaction$`Customer ID`))
#9. Which day of the week has largest number of transactions? #Monday
library(plyr)
table(transaction$Weekday)[which.max(table(transaction$Weekday))]
#Friday Monday Saturday Sunday Thursday Tuesday Wednesday
#1976 2056 1988 2017 2040 1979 2003
#10. Is amount paid normally distributed? Create several visualizations (histogram, stem-leaf-plot, qqplot) of the distribution of amount paid and interpret the shape.
#No, the amount paid is not normally distributed since the qqline is departure from the qqnorm. QQ plot is not straight. Histogram is not symetric.
#it appears to be right-skewed.
hist(transaction$`Amount paid`)
stem(transaction$`Amount paid`)
qqnorm(transaction$`Amount paid`)
qqline(transaction$`Amount paid`)
#11. Sales tax rate in each state and by product category is provided in Taxes.xlsx. Add a new column TaxRate and assign tax rate to each transaction.
for(k in 1:(nrow(transaction))){
for(i in 1:(dim(tax)[1])){#product category; rownames 45
for(j in 1:(dim(tax)[2])){#state name; Colnames 10
if(as.character(transaction$`Product Category`[k])==as.character(rownames(tax)[i])&as.character(transaction$State[k])==as.character(colnames(tax)[j])){
transaction$Taxrate[k]=as.numeric(tax[i,j])
next}
}
}
}
#12. What is the total government tax revenue generated for each country from the supermarket chain?
#USA=$2928.05 Mexico=$835.4 Canada=$278.65
unique(transaction$Country)
transaction$supermarket_revenue <- transaction$`Amount paid`/(1+as.numeric(transaction$Taxrate)/100)
transaction$govn_revenue<-transaction$`Amount paid`-transaction$supermarket_revenue
#transactions$Government.Revenue=as.numeric(gsub("\\$","",transactions$`Amount paid`))-transactions$Supermarket.Revenue
sum(transaction[transaction$Country=="USA",]$govn_revenue)
sum(transaction[transaction$Country=="Mexico",]$govn_revenue)
sum(transaction[transaction$Country=="Canada",]$govn_revenue)
#13. Contrast amount paid for males and females by creating a side by side box plot.
boxplot(transaction$`Amount paid`~transaction$Gender,data=transaction)
#14. Create a scatter plot between amount paid and government revenue.
plot(x=transaction$`Amount paid`,y=transaction$govn_revenue,xlab="Amount paid",ylab="Government Revenue")
|
f83f6eca7ad90658f258a5b873d5b7a1a9a8640a
|
986b80d564588d1d702aac13e2eb24a91cacfc05
|
/man/box_and_whisker_monthly_plots.Rd
|
d190928d45fbecd6b4371478e8b3ecf24a404bc8
|
[] |
no_license
|
strathclyde-marine-resource-modelling/StrathE2E2
|
592b73968d3f19513d3fffb435916605f7c47237
|
a5f5c0507e92cd8c48afc75c14bffa91d4132cc5
|
refs/heads/master
| 2020-05-18T06:33:23.756987
| 2019-06-20T15:43:55
| 2019-06-20T15:43:55
| 184,236,707
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 414
|
rd
|
box_and_whisker_monthly_plots.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/box_and_whisker_monthly_plots.R
\name{box_and_whisker_monthly_plots}
\alias{box_and_whisker_monthly_plots}
\title{plot some monthly result sumamries}
\usage{
box_and_whisker_monthly_plots(model, results)
}
\arguments{
\item{model}{model object}
\item{results}{model results object}
}
\description{
plot some monthly result sumamries
}
|
2225e6355e61ba7b721084daf481d0cc35bcb223
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/scifigure/tests/test-init_experiments.R
|
f169935fbe2c93b2f25f01c93da2f291e933b781
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 270
|
r
|
test-init_experiments.R
|
context("Checking init_experiments")
names = c("Run_16_01", "Run_16_04", "Run_16_07",
"Run_16_09", "Run_16_12")
testthat::test_that("more names than experiments", {
testthat::expect_error({
init_experiments(nexp = 2, exp_names = names)
})
})
|
1e9757b261e37cf96285f7aebb832cc43cdbcb65
|
56e6b1457cebcb2ecd72f89d77b4f6e5dba7360f
|
/FACS_regeneration_ECESCX/DESeq_DualTominj.r
|
c6361e1ef203bf74be9a2c21dfd200114318106e
|
[] |
no_license
|
ciwemb/fan-2019-tendon
|
cd8417aeaef178e093dda70dbb8320bcc4860600
|
f6510386161d7dbdbca7bfdb12cc00503055d09c
|
refs/heads/master
| 2020-07-27T15:24:25.588127
| 2019-09-17T20:58:23
| 2019-09-17T20:58:23
| 209,140,194
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,853
|
r
|
DESeq_DualTominj.r
|
#set working directory
setwd("/Volumes/sequence/harvey/git/TendonDual_Tominj_DESeq")
#Load metadata table and store it
TendonDualTominj_metadata<-read.table("TendonDualTominj.metadata.tsv", header=TRUE)
TendonDualTominj_metadata
#metadata data summary
summary(TendonDualTominj_metadata)
#Load DESeq library
library("DESeq")
#Define the count table??
#Load in the read count data
cds2<-newCountDataSetFromHTSeqCount(TendonDualTominj_metadata, directory="/Volumes/sequence/harvey/git/TendonDual_Tominj_DESeq/")
cds2
#Normalizing Data
cds2<-estimateSizeFactors(cds2)
sizeFactors(cds2)
#Estimating Dispersion; parametricDispersion failed so had to do local fitType
cds2<-estimateDispersions(cds2, fitType = c("local"), sharingMode= c("gene-est-only"))
plotDispEsts(cds2)
head(fData(cds2))
write.csv(counts(cds2, normalized=TRUE), file="/Volumes/sequence/harvey/git/TendonDual_Tominj_DESeq/normalized_cds_counts.csv")
#Adjust Variance for PCA
vsd<-varianceStabilizingTransformation(cds2)
vsd
#Make PCA plot with DESeq
plotPCA(vsd)
#Calculating Differential Expression; write out file as .csv
Dual_vs_Tominj <- nbinomTest(cds2, "Dual", "Tominj" )
Dual_vs_Tominj
write.csv(Dual_vs_Tominj, file = "/Volumes/sequence/harvey/git/TendonDual_Tominj_DESeq/Dual_vs_Tominj.csv")
#GOI list for plot subsetting
Canonical <- c("Bgn","Ctgf","Dcn","Fmod","Fn1","Tnc","Tnmd","Egr1","Scx","Mkx")
Canonical
Tenocyte10X <- c("Abi3bp","Cilp2","Clec11a","Clu","CrispId2","Ctsk","Dkk3","Ecm2","Fibin","Kera","Leprel1","Lum","Pdgfrl","Prelp","Prg4","Serpinf1","Sfrp2","Sparc","Thbs1","Thbs2","Thbs4")
Tenocyte10X
Sheath10X <- c("Has1","Ly6a","Npdc1","Phlda3","Plin2","S100a10")
Sheath10X
Collagen <- c("Col1a1","Col1a2","Col3a1","Col5a1","Col5a2","Col6a1","Col6a2","Col6a3","Col8a1","Col12a1")
Collagen
#Plot Differential Expression MA plots
plotMA(Dual_vs_Tominj[order(Dual_vs_Tominj$padj),])
#To plot through ggplot an MA plot with color coded significant genes and GOIs
library( dplyr )
library( ggplot2)
library( stringr)
Dual_vs_Tominj$M <- log2( Dual_vs_Tominj$baseMeanB + 1 ) - log2( Dual_vs_Tominj$baseMeanA + 1 )
Dual_vs_Tominj$A <- ( log2( Dual_vs_Tominj$baseMeanB + 1 ) + log2( Dual_vs_Tominj$baseMeanA + 1 ) ) / 2
ggplot( Dual_vs_Tominj ) +
geom_point( aes( A, M ), col="grey")+
labs( x = "Mean log2 normalized counts" , y ="log2FoldChange")+
geom_point( data=filter( Dual_vs_Tominj, padj < 0.05 ), aes( A, M ), col="skyblue" )+
geom_point( data=filter( Dual_vs_Tominj, id %in% Canonical ), aes( A, M ), shape=21, col="red" )+
geom_point( data=filter( Dual_vs_Tominj, id %in% Tenocyte10X ), aes( A, M ), shape=21, col="black")+
geom_point( data=filter( Dual_vs_Tominj, id %in% Sheath10X ), aes( A, M ), shape=21, col="brown")+
geom_point( data=filter( Dual_vs_Tominj, id %in% Collagen ), aes( A, M ), shape=21, col="purple")
#Plot Differential Expression Volcano plots
plot(Dual_vs_Tominj$log2FoldChange, -log10(Dual_vs_Tominj$padj))
#To plot through ggplot a DE Volcano plot
Dual_vs_Tominj$log2FC <- log2( Dual_vs_Tominj$baseMeanB + 1 ) - log2( Dual_vs_Tominj$baseMeanA + 1 )
Dual_vs_Tominj$log10padj <- ( -log10( Dual_vs_Tominj$padj) )
ggplot( Dual_vs_Tominj ) +
geom_point( aes( log2FC, log10padj ), col="grey" )+
labs( x = "log2FoldChange" , y = "-log10padj")+
geom_point( data=filter( Dual_vs_Tominj, padj < 0.05 ), aes( log2FC, log10padj ), col="skyblue" )+
geom_point( data=filter( Dual_vs_Tominj, id %in% Canonical ), aes( log2FC, log10padj ), shape=21, col="red" )+
geom_point( data=filter( Dual_vs_Tominj, id %in% Tenocyte10X ), aes( log2FC, log10padj ), shape=21, col="black")+
geom_point( data=filter( Dual_vs_Tominj, id %in% Sheath10X ), aes( log2FC, log10padj ), shape=21, col="brown")+
geom_point( data=filter( Dual_vs_Tominj, id %in% Collagen ), aes( log2FC, log10padj ), shape=21, col="purple")
|
2bb4f08cd8d22b824759c73b4c02f0f025bb2ebe
|
d3635129ac51c57f908ca724b5297ad409595386
|
/R/FitCEV.R
|
a7732399392e8a2363da179f255e01726000b475
|
[] |
no_license
|
whitneyhuang83/ConcurrentExtremes
|
a5a186e6c6412162e25aa7f89d827a38a08aad45
|
73bf1aec1eea0f2220c71d9eec8a85f0242ec020
|
refs/heads/main
| 2023-04-12T09:32:04.445401
| 2021-04-29T16:00:22
| 2021-04-29T16:00:22
| 362,191,741
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,668
|
r
|
FitCEV.R
|
library(texmex)
library(evd)
library(rmutil)
library(ismev)
# dat: A (nxp) numeric matrix or data.frame that contains the data
# which: The variable indexed by the column number on which to condition
# mqu: Marginal quantiles
# dqu: Dependence quantile
# x_pred: A vector in which the conditional quantiles will be calculated
# p: The quantile levels of interest
# type: The type of sample quantile algorithm. This is an integer between 1 and 9
CEVFit <- function(dat, which = 2, mqu = c(0.75, 0.6),
dqu = 0.6, constrain = F, x_pred,
p, type = 8){
fit <- mex(dat, which = which, mqu = mqu,
dqu = dqu, constrain = constrain)
gpd2 <- fit$margins$models$Column2$par
gpd2[1] = exp(gpd2[1])
thres2 <- fit$margins$models$Column2$threshold
x <- x_pred
p2 <- mqu[2] + (1 - mqu[2]) * evd::pgpd(x, thres2, gpd2[1], gpd2[2])
xx <- qlaplace(p2)
z <- fit$dependence$Z
n <- length(x)
yy <- array(dim = c(n, length(z)))
depPars = fit$dependence$coefficients[1:2]
for (j in 1:n){
yy[j,] = depPars[1] * xx[j] + xx[j]^(depPars[2]) * z
}
py <- apply(yy, 1, function(x) plaplace(quantile(x, p, type = type)))
rate1 <- fit$margins$models$Column1$rate
gpd1 = fit$margins$models$Column1$par
gpd1[1] = exp(gpd1[1])
thres1 <- fit$margins$models$Column1$threshold
y <- array(dim = c(5, length(x_pred)))
for (i in 1:5){
for (j in 1:length(x_pred)){
if (py[i, j] > 1 - rate1) {
y[i, j] = evd::qgpd((py[i, j] - 1 + rate1) / rate1, thres1, gpd1[1], gpd1[2])
} else {
y[i, j] = quantile(dat[, 1], py[i, j], type = type)
}
}
}
return(list(y = y, x = x))
}
|
3db1c12d5e50130f4e41da38cae6e8e4f43df877
|
52384e5931df60533cc55307dd115e6a0f078bb1
|
/scriptProject1.R
|
72bd99517355899c5670fea83682059a102daed5
|
[] |
no_license
|
julvi/MachineLearning
|
6918a997180618878b739488485cf650b95a3237
|
3f701c28a2c463c7af9a27f3b7e8f968bd781993
|
refs/heads/master
| 2021-05-08T10:18:31.428396
| 2018-02-01T13:12:14
| 2018-02-01T13:12:14
| 119,837,468
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,591
|
r
|
scriptProject1.R
|
#############################################################################################
# Project 1 #
#############################################################################################
#clear the work space
rm(list=ls())
#----------------------
#Load the data into R
#----------------------
#Set working directory
setwd("~/Google Drive/PhD/Courses/Introducion to Machine Learning and Data Mining /dataset")
#Read data in
dat <- read.table("phageDS.complete25FEB.txt", header = T, as.is = T)
#dat <- read.csv("phageDS.complete4.csv", sep=",", check.names=FALSE)
dim(dat)
#[1] 126 16
#######Remove the columns#######
#1: phage ID --> does not give any information it is different for each observation
#######Class or related to class attributes#######
#5: annotated host: our class labels
#7: host GC content: related to the host
#6: host genome size: related to the host
#######Categorcal attributes: we will convert them to numerical columns#######
#4: phage family: categorical attribute
#8: predicted host: categorical attribute
datnew <- dat[,c(-1,-4,-5,-6,-7,-8)]
#Chek that the dimensions of the data set is still ok
dim(datnew)
#[1] 126 10
###################################################################################################################
#Defining the class: annotated host
###################################################################################################################
#Extract class labels of observations. Jump over the first row (column names)
classlabels <- dat[1:126,5]
##Extract the class names present in data
classNames <- unique(classlabels)
#Extract numeric class assignments
y <- as.numeric(as.factor(classlabels))
y <- y-1
###################################################################################################################
#Plot Phage GC content according to the phage genome size and color differently different classes
###################################################################################################################
#Choose which attributes to plot
attX = datnew[,1] #phage_genome_size
attY = datnew[,2] #phage_GCcontent
#Make a simple plot
plot(attX, attY)
#Make it a more fancy plot:
#First assign titles and labels to the plot, and determine its size by giving the minimum
#and maximum values. Do not plot anything (the option type="n")
plot(c(min(attX), max(attX)), c(min(attY), max(attY)), xlab="Phage genome size", ylab="phage GCcontent", main="Phage data", bty="L", type="n")
# plot points for each sensor in separate colors
cols <- colors()
for(i in sort(unique(y))){
points(attX[y==i], attY[y==i], col=cols[(i+1)*10])
}
#Now a plot where there is room for the legend
plot(c(min(attX), max(attX)+150000), c(min(attY), max(attY)), xlab="Phage genome size", ylab="phage GCcontent", main="Phage data", bty="L", type="n")
# plot points in separate colors
cols <- colors()
for(i in sort(unique(y))){
points(attX[y==i], attY[y==i], col=cols[(i+1)*10])
}
#Turn the class names into independent charaters
classNew<-as.character(classNames)
#Get the order that classes were gone through and plotted in the for loop
sorted <- sort(classNew, index.return=TRUE)
#Add legend
legend(200500, 70, legend=classNames[sorted$ix], fill = cols[10*(1:8)],cex=0.7)
# NOTE: cex change the size of the text
###################################################################################################################
#Standardize the attributes
###################################################################################################################
#Make the 2 categorical attributes into something that we can use for PCA.
source('categoric2numeric.R')
#Convert the column of phage family to 9 columns of numeric values
col4 = categoric2numeric(dat[,4])
#attach the numeric columns to datne
datnew[,11:19] <- col4[[1]]
#extract the familynames
familynames <- col4[[2]]
#assign the familynames as column names
colnames(datnew)[11:19] <- familynames
#Convert the column of predicted host to 9 columns of numeric values
col8 = categoric2numeric(dat[,8])
datnew[,20:28] <- col8[[1]]
predhost <- col8[[2]]
colnames(datnew)[20:28] <- predhost
#Extract the means of columns
means <- colMeans(datnew)
#Subtract the column means from each row.
#Transpose result since apply returns a matrix corresponding to the transposed datfinal
datzeromean<- t(apply(datnew,1,'-',means))
#Extract the standard deviation of the columns
sd <-apply(datzeromean, 2, 'sd')
datStandard <- t(apply(datzeromean,1,'%/%',sd))
#Check that column means are now close to each other
colMeans(datStandard)
#Check that the colum SD are now close to one
apply(datStandard, 2, 'sd')
#weight the converted categorical columns by dividing the values by the square root of the number
#of converted columns per categorical column
datStandard[,11:19] <- datStandard[,11:19]*(1/sqrt(9))
datStandard[,20:28] <- datStandard[,20:28]*(1/sqrt(9))
###------------------------------------------------
#Check for outliers
###------------------------------------------------
attributes <- colnames(datStandard)
#A boxplot of the eight attributes (NON standardized)
par(mfrow=c(1,1))
boxplot(datnew, main="Boxplots of attribute values")
#A boxplot of the eight attributes (standardized)
par(mfrow=c(1,1))
boxplot(datStandard, main="Boxplots of attribute values")
#Here we can see that there are some outliers for attribute: 1,5,6,7,9,10
#Plot a histogram for those attributes to take a closer look
m = c(1,5,6,7,9,10 );
yvals <- c()
for(i in 1:6)
{
res <- hist(datStandard[,m[i]],breaks=51, plot=FALSE);
yvals <- c(yvals, res$counts)
}
par(mfrow=c(2,3))
for(i in 1:6)
{
hist(datStandard[,m[i]],breaks=51, main=unlist(attributes[m[i]]), ylim=c(min(yvals), max(yvals)))
}
#For sure numer 3 has a outlier, it is hard to see the others.
# It looks more like they are widely dirstributed.
#We remove the outlier from number 3 attribute
idxOutlier = datStandard[,6]>4
# Finally we will remove these from the data set
X = datStandard[-which(idxOutlier),]
yl = datStandard[-which(idxOutlier)]
N = N-sum(idxOutlier);
#Now we look at the boxplot again
par(mfrow=c(1,1))
boxplot(X, main="Boxplots of attribute values, removed 1 outlier")
#NOTE: now it looks better. I am not sure whether to remove the rest also, what do you think?
idxOutlierT = X[,1]>1.5 |X[,5]>1.5 | X[,7]>1.5 | X[,8]>1.5 | X[,9]>1.5
# Finally we will remove these from the data set
XT = X[-which(idxOutlierT),]
yT = y[-which(idxOutlierT)]
NT = N-sum(idxOutlierT);
#Now we look at the boxplot again
par(mfrow=c(1,1))
boxplot(XT, main="Boxplots of attribute values, with no outliers")
#NOTE: I am not sure what is wrong with numer 6 attribute, but the rest look ok
#----------------------------------------------------------
#Make PCA
#----------------------------------------------------------
#Get the SVD decomposition of the standardized data
svdres <- svd(datStandard)
#Extract the singular values from the result list, svdres; svdres$d is the diagonal matrix
singularvals <- svdres$d
#Calculate the variance explained by each PC - for the report.
pcvariance <- singularvals^2/sum(singularvals^2)
#Inspect what the different components explain (how much data variance each component explains). V is a 10*10 matrix attribute*component
V <- svdres$v
#Inspect the first principal component
V[,1]
#This is explaining the principal directions of the considered PCA components. For the report.
#Plot the cumulative proportion of variance explained by the PCs
plot(cumsum(pcvariance), main="Data variance explained by PCs", xlab="Number of PCs included in variance sum", ylab="Proportion of variance explained")
Z <- svdres$u%*%diag(svdres$d)
#Extract the two principal components i and j
i <- 1; j <- 2;
pcx <- Z[,i]
pcy <- Z[,j]
plot(c(min(pcx), max(pcx)), c(min(pcy), max(pcy)), xlab="PC 1", ylab="PC 2", main="Phage genome size", type="n", bty="L")
#Plot points for each principal componen in separate colors
cols <- colors()
for(i in sort(unique(y))){
points(pcx[y==i], pcy[y==i], col=cols[(i+1)*10])
}
#NOTE: In the exercise is stated that we in the report should explain
#the data projected onto the considered principal components. I am unsure what is meant by that.
#I think that it is what we are doing when plotting the points for each principal component in
#seperate colors above.
#Now a plot where there is room for the legend
plot(c(min(pcx), max(pcx)+5), c(min(pcy), max(pcy)+3), xlab="PC 1", ylab="PC 2", main="Phage genome size", type="n", bty="L")
#Plot points for each principal componen in separate colors
cols <- colors()
for(i in sort(unique(y))){
points(pcx[y==i], pcy[y==i], col=cols[(i+1)*10])
}
#Get the order that classes were gone through and plotted in the for loop
sorted <- sort(classNew, index.return=TRUE)
#Add legend
legend(4.5,9, legend=classNames[sorted$ix], fill = cols[10*(1:8)], cex=0.7)
#NOTE: In the exercise is stated that we in the report should explain
#the data projected onto the considered principal components. I am unsure what is meant by that.
#I think that it is what we are doing when plotting the points for each principal component in
#seperate colors.
#NOTE: Another way to do the PCA.
install.packages("ChemometricsWithR")
library(ChemometricsWithR)
phagePCA=PCA(scale(datPCA))
par(mfrow=c(1,1))
scoreplot(phagePCA)
loadingplot(phagePCA, show.names = TRUE)
#biplot(phagePCA, score.col = phagePCA$X)
screeplot(phagePCA,type="percentage")
loadings(phagePCA)
#NOTE: This looks like what we have found when scaling that is good.
########-------------------------------------------------
#Summary statictics
########-------------------------------------------------
#Calculate the mean, variance, median and range of the different variables.
#Before any scaling
mean_dat <- apply(datPCA, 2, mean)
var_dat <- apply(datPCA, 2, var)
median_dat <- apply(datPCA, 2, median)
range_dat <- diff(apply(datPCA, 2, range)) ## range returns the minimum and maximum of the vector
#After that the difference is taken between those.
#NOTE: comparing the mean and the median could give a hint whether there are any outliers.
#Looking at these values could also tell whether or not it is a good ide to standadize data.
#Since there is a big difference, it is good that we scale.
########-------------------------------------------------
#Similiarities
########-------------------------------------------------
#Look at the relation between the attributes
pairs(datStandard[,1:10])
#Look at the correlation between the attributes
cor_dat<-cor(datStandard)
symnum(cor_dat)
#Look at a correlation plot
#install.packages("corrgram")
library(corrgram)
#Make it so that the size of the correlation is plotted below the diagonal
panel.cor <- function(x, y, digits=2, cex.cor)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- abs(cor(x, y))
text(0.5, 0.25, paste("r=",round(r,2)))
}
corrgram(datStandard[,1:10], order=TRUE, lower.panel=panel.cor,
upper.panel=panel.pts, text.panel=panel.txt,
main="Correlation in the Phage Data Set")
#Another correlation plot
library(gclus)
dta.r <- abs(cor(datPCA)) # get correlations
dta.col <- dmat.color(dta.r) # get colors
# reorder variables so those with highest correlation
# are closest to the diagonal
dta.o <- order.single(dta.r)
cpairs(datPCA, dta.o, panel.colors=dta.col, gap=.5,
main="Variables Ordered by Correlation" )
#The ones close to the diagonal has the highest correlation
#Other measures of similarities
source("similarity.R")
#how to make a plot with the similarities
#http://stackoverflow.com/questions/15271103/how-to-modify-this-correlation-matrix-plot
v1=numeric(length=10)
for (i in 1:10){
v1[i]<-similarity(t(datPCA[,1]),t(datPCA[,i]),'cos')
}
v2=numeric(length=10)
for (i in 1:10){
v2[i]<-similarity(t(datPCA[,2]),t(datPCA[,i]),'cos')
}
v3=numeric(length=10)
for (i in 1:10){
v3[i]<-similarity(t(datPCA[,3]),t(datPCA[,i]),'cos')
}
v4=numeric(length=10)
for (i in 1:10){
v4[i]<-similarity(t(datPCA[,4]),t(datPCA[,i]),'cos')
}
v5=numeric(length=10)
for (i in 1:10){
v5[i]<-similarity(t(datPCA[,5]),t(datPCA[,i]),'cos')
}
v6=numeric(length=10)
for (i in 1:10){
v6[i]<-similarity(t(datPCA[,6]),t(datPCA[,i]),'cos')
}
v7=numeric(length=10)
for (i in 1:10){
v7[i]<-similarity(t(datPCA[,7]),t(datPCA[,i]),'cos')
}
v8=numeric(length=10)
for (i in 1:10){
v8[i]<-similarity(t(datPCA[,8]),t(datPCA[,i]),'cos')
}
v9=numeric(length=10)
for (i in 1:10){
v9[i]<-similarity(t(datPCA[,9]),t(datPCA[,i]),'cos')
}
v10=numeric(length=10)
for (i in 1:10){
v10[i]<-similarity(t(datPCA[,10]),t(datPCA[,i]),'cos')
}
cos_together=c(v1,v2,v3,v4,v5,v6,v7,v8,v9,v10)
cos_together
#other methods jac, ext, cos, cal
###------------------------------------------------
#Check if the attributes are normally distributed
###------------------------------------------------
#Check for normallity by making a qq plot of all the attributes
par(mfrow=c(2,5))
for (i in 1:10){
qqnorm(datPCA[,i])
}
#NOTE: Some of them look normally distributed
#And for the data with zero mean and 1 in SD:
par(mfrow=c(2,5))
for (i in 1:10){
qqnorm(datStandard[,i])
}
#NOTE: I am not sure why it looks a bit strange
#Check by plotting a histogram
par(mfrow=c(2,5))
for (i in 1:10){
hist(datPCA[,i],col=i)
}
par(mfrow=c(2,5))
for (i in 1:10){
hist(datStandard[,i],col=i)
}
#NOTE: again very few look normally distributed
####################################################################
##A boxplot for each attribute for each class
####################################################################
# Get the number of data objects, attributes, and classes
N = dim(datnew)[1];
M = dim(datnew)[2];
C = length(classNames);
#A boxplot for each attribute for each class (NON standardized)
yvals = c()
for(m in 0:(C-1))
{
res <- boxplot(datnew[m==y,], plot=FALSE)
yvals = rbind(yvals, res$stats)
}
par(mfrow=c(2,4))
for(m in 0:(C-1))
{
boxplot(datnew[m==y,], main=paste("Boxplot for", classNames[m+1]), ylim=c(min(yvals), max(yvals)))
}
########################################################
#Make a 3d plot of the data
########################################################
#NOTE: I don't think we should include this in the report, it does not make so much sense when
#we have so many dimensions - compared to only 4 in the Iris set.
library(scatterplot3d)
par(mfrow=c(1,1))
# note that if more than three classes are inspected
#then this vector of colors is not long enough. Thus more colors need to be added.
cols <- c("blue", "green3", "red", "yellow","black","grey","magenta","pink")
cols <- cols[1:length(classNames)]
s3d <- scatterplot3d(datPCA[,1:3], type="n")
for(c in 1:C){
s3d$points3d(datPCA[(c-1)==y,1:3], col=cols[c])
}
#legend("topright", legend=classNames, fill=unique(cols[match(y,as.numeric(classNames))]))
library(rgl)
# if more than three classes are inspected this code must also be modified accordingly
cols = rep("black", times=length(y))
cols[y==0] <- "blue"
cols[y==1] <- "green3"
cols[y==2] <- "red"
cols[y==3] <- "pink"
cols[y==4] <- "grey"
cols[y==5] <- "yellow"
cols[y==6] <- "grey"
cols[y==7] <- "magenta"
plot3d(datPCA[,1:3], col=cols, size=3)
#Notes for the report:
# Saving a plot directly to a pgn-file:
#png("logprice_relations.png",width=800,height=600)
#How to save a table so that it can be included in the report
#library(xtable)
#capture.output(print(xtable(cor(saltdata)),type="html"),file="cortable.html")
|
d4dcf37c3c9f137f6cf5d15a162285440d24d591
|
e9f111b3afcb23ae1ed30c1747de280e972b68a4
|
/code/fig5.R
|
fcaa8f32d60d29f53d8d0e3e8e14fa064ce685bd
|
[] |
no_license
|
santiagocdo/covid19paranoia
|
8543848e8d0c55ae4d41d8befb6fc05603c4fe42
|
d31285902cbd40a917d92ac417ca7318cd732e83
|
refs/heads/main
| 2023-09-03T11:25:27.539642
| 2021-11-12T21:48:49
| 2021-11-12T21:48:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 40,368
|
r
|
fig5.R
|
### Analysis: Investigating impact of mask policy on paranoia and belief updating
###
### Description:
###
###
###
### Figure: 4
### Written by: Praveen Suthaharan
# clear environment
rm(list=ls())
# set working directory
setwd("C:/Pandemic_2020/revisions/data")
#install.packages("usmap")
library(usmap)
library(ggplot2)
library(dplyr)
# read in mask data
mask <- read.csv("Figure4A_mapData.csv")
# convert mask policy from integer to factor
mask$mwr <- as.factor(mask$mwr)
# how many states recommend mask-wearing?
length(mask[which(mask$mwr == 0),]$abbr)
# how many states require?
length(mask[which(mask$mwr == 1),]$abbr)
# Figure 4A: generate US map with mask-policy labels
plot_usmap(data = mask, values = "mwr", color="black", labels = TRUE, label_color = "black") +
scale_fill_manual(values = c('0' = "#FFA500", '1' = "#008000"), name = "Mask Policy",
labels = c("Recommended","Mandated")) +
theme(legend.position = "none")
################################################################################################
################################################################################################
################################################################################################
rm(list=ls())
# set working directory
setwd("C:/Pandemic_2020/revisions/data")
# load libraries
library(dplyr)
library(ggplot2)
library(ggpubr)
library(devtools)
library(openintro)
library(plotrix)
library(gridExtra)
library(boot) # library to perform bootstrap
source('C:\\Pandemic_2020\\revisions\\code\\theme_publication.R')
# read data
dat <- read.csv('pandemicPRL.csv')
# extract pandemic dataset from January to July (i.e., pre-lockdown to post-lockdown)
dat_pandemic <- dat[which(dat$dataset == "pandemic" & (dat$period == "prelockdown" |
dat$period == "lockdown" |
dat$period == "postlockdown")),]
# calculate paranoia score
dat_pandemic$paranoia_score <- rowMeans(dat_pandemic[, grepl("rgpts_", names(dat_pandemic))], na.rm = TRUE)
# create dummy variables for the period (time) and group membership (mask_policy)
dat_pandemic = dat_pandemic %>%
mutate(time = month >= 4,
mask_policy = state_mask_mandate >= 1)
# renaming boolean values to appropriate labels
dat_pandemic$mask_policy[which(dat_pandemic$mask_policy == "FALSE")] <- "Recommended"
dat_pandemic$mask_policy[which(dat_pandemic$mask_policy == "TRUE")] <- "Required"
# creating dataframe with relevant features
did_df <- data.frame(month = dat_pandemic$month,
time = dat_pandemic$time,
mask_policy = dat_pandemic$mask_policy,
paranoia = dat_pandemic$paranoia_score
)
# function to return DiD estimate [(C-A)-(D-B)]
run_DiD <- function(my_data, indices){
d <- my_data[indices,]
return(
mean(d[which(d$time == "TRUE" & d$mask_policy == "Required"),]$paranoia) -
mean(d[which(d$time == "FALSE" & d$mask_policy == "Required"),]$paranoia) -
(mean(d[which(d$time == "TRUE" & d$mask_policy == "Recommended"),]$paranoia) -
mean(d[which(d$time == "FALSE" & d$mask_policy == "Recommended"),]$paranoia))
)
}
regression_DiD = lm(paranoia_score ~ time*mask_policy, data = dat_pandemic)
summary(regression_DiD)
## Statistics
# F(3,529) = 10.62
# p(DiD) = 0.0328
# DiD estimate = 0.39621 (or a 39.6% increase)
# perform bootstrapping
set.seed(555)
boot_est <- boot(did_df, run_DiD, R=1000, parallel="multicore", ncpus = 2)
boot_est
# calculate bootstrapped p-value
pvalue <- mean(abs(boot_est$t0) < abs(boot_est$t-mean(boot_est$t)))
pvalue
# 0.038
# other measures to calculate using bootstrapped values
quantile(boot_est$t, c(0.025, 0.975))
boot_est$t0/sd(boot_est$t)
plot(density(boot_est$t))
plot(boot_est$t, type="l")
# Figure 4B: DiD
fig_4b <- ggplot(did_df, aes(x=month, y=paranoia, color = mask_policy)) +
stat_summary(geom = 'smooth', size=2) +
geom_vline(xintercept = 3, linetype = 'dashed', size=1) +
labs(x = "",
y = "",
color = "")+
scale_color_manual(values=c("#FFA500","#008000"))+
scale_x_continuous(breaks = c(1,2,3,4,5),
labels = c("Jan","Mar","Apr","Jun","Jul"))
fig_4b + theme_Publication() + theme(axis.title.y = element_blank(),
axis.title.x = element_blank(),
axis.text = element_blank(),
axis.line = element_line(colour="black", size = 1.5),
axis.ticks = element_line(colour="black", size = 1.5),
legend.text = element_blank(),
legend.position = "none")
################################################################################################
################################################################################################
################################################################################################
rm(list=ls())
# set working directory
setwd("C:/Pandemic_2020/revisions/data")
# load libraries
library(dplyr)
library(ggplot2)
library(ggpubr)
#install.packages("devtools")
library(devtools)
#devtools::install_github("kassambara/easyGgplot2")
#library(easyGgplot2)
library(openintro)
library(plotrix)
library(gridExtra)
library(dplyr)
library(ggplot2)
library(plotrix)
library(tidyverse)
library(ggpubr)
library(gridExtra)
library(reshape2)
library(openintro)
library(lubridate)
library(anytime)
source('C:\\Pandemic_2020\\revisions\\code\\theme_publication.R')
# read data
dat <- read.csv('data.csv')
#### WSR, Mu3 and protesting in required vs recommended states
pandemic_postlockdown <- dat[which(dat$dataset == "pandemic" & dat$period == "postlockdown"),]
pandemic_postlockdown_df <- data.frame(mwr = pandemic_postlockdown$state_mask_mandate,
wsr = rowMeans(cbind(pandemic_postlockdown$wsr_block1,
pandemic_postlockdown$wsr_block2)),
lsr = rowMeans(cbind(pandemic_postlockdown$lsr_block1,
pandemic_postlockdown$lsr_block2)),
mu03_block1 = pandemic_postlockdown$mu03_1,
mu03_block1_precision = pandemic_postlockdown$mu03_1_precision,
mu02_block1 = pandemic_postlockdown$mu02_1,
mu02_block1_precision = pandemic_postlockdown$mu02_1_precision
)
# how many participants in the reopening period?
length(pandemic_postlockdown_df$mwr)
# Of those, how many are located in mask-recommended states?
length(pandemic_postlockdown_df[which(pandemic_postlockdown_df$mwr == 0),]$mwr)
# mask-required states?
length(pandemic_postlockdown_df[which(pandemic_postlockdown_df$mwr == 1),]$mwr)
### 3.1.2: Win-switch rate (top figure)
fig4b_1 <- ggplot(pandemic_postlockdown_df, aes(x = mwr,
y= wsr,
fill = as.factor(mwr))) +
geom_point(shape=16,color="black",alpha=0.4,position = position_jitterdodge(), show.legend = FALSE) +
geom_boxplot(alpha = 0.7, width=0.5, lwd=1.2) +
scale_fill_manual(name = "",
values = c("0" = "#FFA500", "1" = "#008000"))
fig4b_1 + theme_Publication() + theme(axis.title.y = element_blank(),
axis.title.x = element_blank(),
axis.text = element_blank(),
axis.line = element_line(colour="black", size = 1.5),
axis.ticks = element_line(colour="black", size = 1.5),
legend.text = element_blank(),
legend.position = "none")
# WSR
t.test(pandemic_postlockdown_df$wsr~ pandemic_postlockdown_df$mwr,
mu=0,
alt="two.sided",
conf=0.95,
var.eq=F,
paired=F)
# Effect size
install.packages("lsr")
library(lsr)
cohensD(wsr ~ mwr,
data = pandemic_postlockdown_df)
# LSR
t.test(pandemic_postlockdown_df$lsr~ pandemic_postlockdown_df$mwr,
mu=0,
alt="two.sided",
conf=0.95,
var.eq=F,
paired=F)
# fdr correction using WSR and LSR
pvalues_behavior <- c(0.01986, 0.1357)
pvalues_behavior_sorted <- sort(pvalues_behavior)
p.adjust(pvalues_behavior_sorted, method = "BH")
### 3.3: weighted Mu3
fig4b_2 <- ggplot(pandemic_postlockdown_df, aes(x = mwr,
y= mu03_block1,
fill = as.factor(mwr),
size = mu03_block1_precision)) +
geom_point(shape=16,color="black",alpha=0.4,position = position_jitterdodge(), show.legend = FALSE) +
geom_boxplot(mapping = aes(weight = mu03_block1_precision),
alpha = 0.7, width=0.5, lwd=1.2) +
scale_fill_manual(name = "",
values = c("0" = "#FFA500", "1" = "#008000"))
fig4b_2 + theme_Publication() + theme(axis.title.y = element_blank(),
axis.title.x = element_blank(),
axis.text = element_blank(),
axis.line = element_line(colour="black", size = 1.5),
axis.ticks = element_line(colour="black", size = 1.5),
legend.text = element_blank(),
legend.position = "none")
# weighted t-test: mu03
library(weights)
mu03_wtdttest <- wtd.t.test(x=pandemic_postlockdown_df$mu03_block1[pandemic_postlockdown_df$mwr==0],
y=pandemic_postlockdown_df$mu03_block1[pandemic_postlockdown_df$mwr==1],
weight=pandemic_postlockdown_df$mu03_block1_precision[pandemic_postlockdown_df$mwr==0],
weighty=pandemic_postlockdown_df$mu03_block1_precision[pandemic_postlockdown_df$mwr==1],
samedata=FALSE)# t(141) = 0.62
# p-value = 0.00027 (2.680758e-04)
# wtd.mu[recommended] = -0.51
# wtd.mu[required] = 0.28
# 95% CI calculation
t_score <- mu03_wtdttest$coefficients[1]
sample_se <- mu03_wtdttest$additional[4]
sample_mean_difference <- mu03_wtdttest$additional[1]
margin_error <- t_score*sample_se
lower_bound <- sample_mean_difference - margin_error
upper_bound <- sample_mean_difference + margin_error
print(c(lower_bound,upper_bound))
# Cohen's d
Cohen_d = sample_mean/sample_se
# weighted t-test: mu02
wtd.t.test(x=pandemic_postlockdown_df$mu02_block1[pandemic_postlockdown_df$mwr==0],
y=pandemic_postlockdown_df$mu02_block1[pandemic_postlockdown_df$mwr==1],
weight=pandemic_postlockdown_df$mu02_block1_precision[pandemic_postlockdown_df$mwr==0],
weighty=pandemic_postlockdown_df$mu02_block1_precision[pandemic_postlockdown_df$mwr==1],
samedata=FALSE)# t(141) = 0.62
# p-value = 0.02 (0.02370412)
# wtd.mu[recommended] = -0.21
# wtd.mu[required] = -0.10
# fdr correction using mu03 and mu02
pvalues_belief <- c(2.680758e-04, 0.02370412)
pvalues_belief_sorted <- sort(pvalues_belief)
p.adjust(pvalues_belief_sorted, method = "BH")
## Protests
dat_protest <- read.csv("Figure4B_protest.csv")
dat_protest <- dat_protest[which(dat_protest$EVENT_TYPE == "Protests"),]
protest_df <- data.frame(date = dat_protest$EVENT_DATE,
date2 = anydate(dat_protest$EVENT_DATE),
type = dat_protest$EVENT_TYPE,
#association = dat_protest$ASSOC_ACTOR_1,
state = dat_protest$ADMIN1)
protest_df$state <- state2abbr(protest_df$state)
mask_required_states <- c("CA","NM","MI","IL","NY","MA","RI","MD","VA","ME","DE")
mask_recommend_states <- c("AK","AL","AR","AZ","CO","CT","DC","FL","GA","HI","IA",
"ID","IN","KS","KY","LA","MN","MO","MS","MT","NC","ND",
"NE","NH","NJ","NV","OH","OK","OR","PA","SC","SD","TN",
"TX","UT","VT","WA","WI","WV","WY")
protest_df$mask <- ifelse(protest_df$state %in% mask_required_states, "required",
ifelse(protest_df$state %in% mask_recommend_states, "recommended","")
)
# analyze data up to July 17th (end date of our pandemic data collection)
protest_subset_df <- protest_df[c(1:8656),]
protest_count_summary <- protest_subset_df %>%
dplyr::group_by(date2,mask) %>%
dplyr::summarise(protest_count = n())
protest_count_summary$mask <- factor(protest_count_summary$mask, levels = c("recommended","required"))
fig4b_3 <- ggplot(protest_count_summary, aes(x = mask,
y= protest_count,
fill = as.factor(mask))) +
geom_point(shape=16,color="black",alpha=0.4,position = position_jitterdodge(), show.legend = FALSE) +
geom_boxplot(alpha = 0.7, width=0.5, lwd=1.2) +
scale_fill_manual(name = "",
values = c("recommended" = "#FFA500", "required" = "#008000"))
fig4b_3 + theme_Publication() + theme(axis.title.y = element_blank(),
axis.title.x = element_blank(),
axis.text = element_blank(),
axis.line = element_line(colour="black", size = 1.5),
axis.ticks = element_line(colour="black", size = 1.5),
legend.text = element_blank(),
legend.position = "none")
# Two-sample Welch's t-test for protest count
t.test(protest_count_summary$protest_count ~ protest_count_summary$mask,
mu=0,
alt="two.sided",
conf=0.95,
var.eq=F,
paired=F)
cohensD(protest_count ~ mask, protest_count_summary)
################################################################################################
################################################################################################
################################################################################################
rm(list=ls())
# set working directory
setwd("C:/Pandemic_2020/revisions/data")
# load libraries
library(dplyr)
library(ggplot2)
library(ggpubr)
library(plotrix)
source("C:/Pandemic_2020/revisions/code/theme_publication.R")
# read data
dat <- read.csv('data.csv')
dynata <- read.csv('dynata.csv')
# extract pandemic, postlockdown data
pandemic_reopening <- dat[which(dat$dataset == "pandemic" & dat$period == "postlockdown"),]
# june.masks.req.nine <- c("CA","NM","MI","IL","NY","MA","RI","MD","VA")
# june.states.mask.rec <- c("AK","AL","AR","AZ","CO","CT","DC","FL","GA","HI","IA",
# "ID","IN","KS","KY","LA","MN","MO","MS","MT","NC","ND",
# "NE","NH","NJ","NV","OH","OK","OR","PA","SC","SD","TN",
# "TX","UT","VT","WA","WI","WV","WY")
# list of states with mask policies
states_june_mwr <- c("CA","NM","MI","IL","NY","MA","RI","MD","VA",
"AK","AL","AR","AZ","CO","CT","DC","FL","GA","HI","IA",
"ID","IN","KS","KY","LA","MN","MO","MS","MT","NC","ND",
"NE","NH","NJ","NV","OH","OK","OR","PA","SC","SD","TN",
"TX","UT","VT","WA","WI","WV","WY")
# extract dynata data for those states
dynata <- dynata[which(dynata$state %in% states_june_mwr),]
dynata.df <- data.frame(state = dynata$state,
mask.response = dynata$mask,
value = dynata$respondents)
# recode mask_response
dynata.df$mask.response <- ifelse(dynata.df$mask.response == "Always",5,
ifelse(dynata.df$mask.response == "Frequently",4,
ifelse(dynata.df$mask.response == "Sometimes",3,
ifelse(dynata.df$mask.response == "Rarely",2,
ifelse(dynata.df$mask.response == "Not at all",1,"")))))
dynata.df$mask.response <- as.numeric(dynata.df$mask.response)
dynata.count <- dynata.df %>%
dplyr::group_by(state, mask.response) %>%
dplyr::summarise(count.respondents = sum(value, na.rm = TRUE))
dynata.sum <- dynata.count %>%
dplyr::group_by(state) %>%
dplyr::mutate(sum.count = sum(count.respondents, na.rm = TRUE))
dynata.relFreq <- dynata.sum %>%
dplyr::group_by(state) %>%
dplyr::mutate(rel.freq = count.respondents/sum.count)
dynata.fiveAlways <- dynata.relFreq[which(dynata.relFreq$mask.response == 5),]
colnames(dynata.fiveAlways) <- c("State","mask.response","count.respondents","sum.count","rel.freq")
# create dataframe with relevant features
pandemic_reopening_df <- data.frame(State = pandemic_reopening$state,
MWR = pandemic_reopening$state_mask_mandate,
CTL = pandemic_reopening$state_ctl,
wsr = rowMeans(cbind(pandemic_reopening$wsr_block1,
pandemic_reopening$wsr_block2)),
lsr = rowMeans(cbind(pandemic_reopening$lsr_block1,
pandemic_reopening$lsr_block2)),
mu03 = rowMeans(cbind(pandemic_reopening$mu03_1,
pandemic_reopening$mu03_2)),
mu02 = rowMeans(cbind(pandemic_reopening$mu02_1,
pandemic_reopening$mu02_2)),
mu03_precision = rowMeans(cbind(pandemic_reopening$mu03_1_precision,
pandemic_reopening$mu03_2_precision)),
mu02_precision = rowMeans(cbind(pandemic_reopening$mu02_1_precision,
pandemic_reopening$mu02_2_precision)))
pandemic_reopening_df$CTL_group <- ifelse(pandemic_reopening_df$CTL <= median(pandemic_reopening_df$CTL, na.rm = TRUE), "loose","tight")
pandemic_reopening_dynata <- left_join(pandemic_reopening_df, dynata.fiveAlways)
# recommended states
pandemic_reopening_dynata_rec <- pandemic_reopening_dynata[which(pandemic_reopening_dynata$MWR == 0),]
# How many loose, recommended states?
nrow(pandemic_reopening_dynata_rec[which(pandemic_reopening_dynata_rec$CTL_group == "loose"),])
# tight, recommended states?
nrow(pandemic_reopening_dynata_rec[which(pandemic_reopening_dynata_rec$CTL_group == "tight"),])
stats_rec <- pandemic_reopening_dynata_rec %>%
dplyr::group_by(CTL_group) %>%
dplyr::summarise(mask_wearing_belief_mean = mean(rel.freq, na.rm = TRUE),
mask_wearing_belief_sem = std.error(rel.freq, na.rm = TRUE))
fig4c_1 <- ggplot(stats_rec, aes(x = CTL_group, y= mask_wearing_belief_mean, fill = as.factor(CTL_group))) +
geom_bar(stat="identity", width=.5, color = "black", lwd=1.2) +
geom_errorbar(aes(x=CTL_group, ymin=mask_wearing_belief_mean-mask_wearing_belief_sem, ymax=mask_wearing_belief_mean+mask_wearing_belief_sem), width=0.2, colour="black", alpha=0.9, size=2) +
geom_point(data=pandemic_reopening_dynata_rec, aes(x=CTL_group, y=rel.freq), position = position_jitter(width = .15),
shape=21, fill="black", size=1.5) +
scale_fill_manual("",values = c("#9370DB", "#8B008B"))
fig4c_1 + theme_Publication() + theme(axis.title.y = element_blank(),
axis.title.x = element_blank(),
axis.text = element_blank(),
axis.line = element_line(colour="black", size = 1.5),
axis.ticks = element_line(colour="black", size = 1.5),
legend.text = element_blank(),
legend.position = "none")
# t-test: mask-wearing belief difference by CTL in mask-recommended states
# 38 - loose, 82 - tight
t.test(pandemic_reopening_dynata_rec$rel.freq ~ pandemic_reopening_dynata_rec$CTL_group,
mu=0,
alt="two.sided",
conf=0.95,
var.eq=F,
paired=F)
cohensD(rel.freq ~ CTL_group, pandemic_reopening_dynata_rec)
# required states
pandemic_reopening_dynata_req <- pandemic_reopening_dynata[which(pandemic_reopening_dynata$MWR == 1),]
# How many loose, required states?
nrow(pandemic_reopening_dynata_req[which(pandemic_reopening_dynata_req$CTL_group == "loose"),])
# tight, required states?
nrow(pandemic_reopening_dynata_req[which(pandemic_reopening_dynata_req$CTL_group == "tight"),])
stats_req <- pandemic_reopening_dynata_req %>%
dplyr::group_by(CTL_group) %>%
dplyr::summarise(mask_wearing_belief_mean = mean(rel.freq, na.rm = TRUE),
mask_wearing_belief_sem = std.error(rel.freq, na.rm = TRUE))
fig4c_2 <- ggplot(stats_req, aes(x = CTL_group, y= mask_wearing_belief_mean, fill = as.factor(CTL_group))) +
geom_bar(stat="identity", width=.5, color = "black", lwd=1.2) +
geom_errorbar(aes(x=CTL_group, ymin=mask_wearing_belief_mean-mask_wearing_belief_sem, ymax=mask_wearing_belief_mean+mask_wearing_belief_sem), width=0.2, colour="black", alpha=0.9, size=2) +
geom_point(data=pandemic_reopening_dynata_req, aes(x=CTL_group, y=rel.freq), position = position_jitter(width = .15),
shape=21, fill="black", size=1.5) +
scale_fill_manual("",values = c("#9370DB", "#8B008B"))
fig4c_2 + theme_Publication() + theme(axis.title.y = element_blank(),
axis.title.x = element_blank(),
axis.text = element_blank(),
axis.line = element_line(colour="black", size = 1.5),
axis.ticks = element_line(colour="black", size = 1.5),
legend.text = element_blank(),
legend.position = "none")
# t-test: mask-wearing belief difference by CTL in mask-required states
# 48 - loose, 4 - tight
t.test(pandemic_reopening_dynata_req$rel.freq ~ pandemic_reopening_dynata_req$CTL_group,
mu=0,
alt="two.sided",
conf=0.95,
var.eq=F,
paired=F)
cohensD(rel.freq ~ CTL_group, pandemic_reopening_dynata_req)
## sample size summary
# recommended states by CTL
rec_total <- nrow(pandemic_reopening_dynata_rec)
rec_loose <- nrow(pandemic_reopening_dynata_rec[which(pandemic_reopening_dynata_rec$CTL_group == "loose"),])
rec_tight <- nrow(pandemic_reopening_dynata_rec[which(pandemic_reopening_dynata_rec$CTL_group == "tight"),])
# required states by CTL
req_total <- nrow(pandemic_reopening_dynata_req)
req_loose <- nrow(pandemic_reopening_dynata_req[which(pandemic_reopening_dynata_req$CTL_group == "loose"),])
req_tight <- nrow(pandemic_reopening_dynata_req[which(pandemic_reopening_dynata_req$CTL_group == "tight"),])
# Figure 4C (recommended states)
p1 <- ggplot(pandemic_reopening_dynata_rec, aes(x=CTL_group,
y=rel.freq,
fill = CTL_group)) +
geom_boxplot(alpha = 0.7) +
scale_y_continuous(name = "Mask-wearing belief") +
scale_x_discrete(name = "CTL") +
labs(title = "Mask-recommended states",
subtitle = bquote("(N" == .(rec_total) *
";" ~
n[loose] == .(rec_loose) ~
"," ~
n[tight] == .(rec_tight)*
")" )
) +
scale_fill_manual(name = "CTL",
values = c("loose" = "#9370DB", "tight" = "#8B008B"))
p1 <- p1 + theme_Publication()
p1
# Figure 4C (required states)
p2 <- ggplot(pandemic_reopening_dynata_req, aes(x=CTL_group,
y=rel.freq,
fill = CTL_group)) +
geom_boxplot(alpha = 0.7) +
scale_y_continuous(name = "Mask-wearing belief") +
scale_x_discrete(name = "CTL") +
labs(title = "Mask-mandated states",
subtitle = bquote("(N" == .(req_total) *
";" ~
n[loose] == .(req_loose) ~
"," ~
n[tight] == .(req_tight)*
")" )
) +
scale_fill_manual(name = "CTL",
values = c("loose" = "#9370DB", "tight" = "#8B008B"))
p2 <- p2 + theme_Publication()
p2
################################################################################################
################################################################################################
################################################################################################
rm(list=ls())
# set working directory
setwd("C:/Pandemic_2020/revisions/data")
# load libraries/other packages
library(dplyr)
library(tidyverse)
library(ggpubr)
source("C:/Pandemic_2020/revisions/code/theme_publication.R") # source: https://rpubs.com/Koundy/71792
library(gridExtra)
library(DT)
# read in data
dat <- read.csv("data.csv",
stringsAsFactors = FALSE)
# subset for replication data
dat_replication <- dat[which(dat$dataset == "replication"),]
# calculate paranoia scores
dat_replication$paranoia_score <- rowMeans(dat_replication[, grepl("gpts_", names(dat_replication))], na.rm = TRUE)
# calculate paranoia grouping
dat_replication$paranoia_group <- ifelse(rowSums(dat_replication[, grepl("gpts_per", names(dat_replication))], na.rm = TRUE) > 10, "high","low")
# recode and calculate covid vaccine conspiracy belief score and vaccine attitude
dat_replication_df <- dat_replication %>%
select(paranoia_score, paranoia_group,
mask_behavior_1:mask_behavior_16,
#qanon_rating,
#covid_conspiracy_vaccine_1:covid_conspiracy_vaccine_5,
#vaccine_scale_1:vaccine_scale_5,
names(dat_replication[, grepl("wsr_", names(dat_replication))]),
names(dat_replication[, grepl("lsr_", names(dat_replication))]),
names(dat_replication[, grepl("mu02_", names(dat_replication))]),
names(dat_replication[, grepl("mu03_", names(dat_replication))]),
names(dat_replication[, grepl("kappa2_", names(dat_replication))]),
names(dat_replication[, grepl("omega2_", names(dat_replication))]),
names(dat_replication[, grepl("omega3_", names(dat_replication))]))
# calculate mask attitude score (did not include Q2,Q4,Q5)
# Question 1
dat_replication_df$mask_behavior_1 <- ifelse(dat_replication_df$mask_behavior_1 == "Yes",1,
ifelse(dat_replication_df$mask_behavior_1 == "No",-1,""))
# convert character to numeric
dat_replication_df$mask_behavior_1 <- as.data.frame(sapply(dat_replication_df$mask_behavior_1, as.numeric))
# Question 2
dat_replication_df$mask_behavior_2 <- ifelse(dat_replication_df$mask_behavior_2 == "Yes",1,
ifelse(dat_replication_df$mask_behavior_2 == "No",-1,""))
#if prior question was a 'no'
#dat_replication_df <- within(dat_replication_df, mask_behavior_2[mask_behavior_1 != -1 & !is.na(mask_behavior_2)] <- '')
# convert character to numeric
dat_replication_df$mask_behavior_2 <- as.data.frame(sapply(dat_replication_df$mask_behavior_2, as.numeric))
# Question 3
dat_replication_df[,5:10] <- ifelse(dat_replication_df[,5:10] == "Extremely unlikely",-1,
ifelse(dat_replication_df[,5:10] == "Unlikely",-0.5,
ifelse(dat_replication_df[,5:10] == "No plans to go",0,
ifelse(dat_replication_df[,5:10] == "Likely",0.5,
ifelse(dat_replication_df[,5:10] == "Extremely likely",1,"")))))
# convert character to numeric
dat_replication_df[,5:10] <- as.data.frame(sapply(dat_replication_df[,5:10], as.numeric))
# Question 4
dat_replication_df$mask_behavior_9 <- ifelse(dat_replication_df$mask_behavior_9 == "Yes",1,
ifelse(dat_replication_df$mask_behavior_9 == "Not sure",0,
ifelse(dat_replication_df$mask_behavior_9 == "No",-1,"")))
# convert character to numeric
dat_replication_df$mask_behavior_9 <- as.data.frame(sapply(dat_replication_df$mask_behavior_9, as.numeric))
# Question 5
dat_replication_df[,12:14] <- ifelse(dat_replication_df[,12:14] >= 0 & dat_replication_df[,12:14] < 25,-1,
ifelse(dat_replication_df[,12:14] >=25 & dat_replication_df[,12:14] < 50,-0.5,
ifelse(dat_replication_df[,12:14] == 50,0,
ifelse(dat_replication_df[,12:14] > 50 & dat_replication_df[,12:14] <= 75,0.5,
ifelse(dat_replication_df[,12:14] > 75 & dat_replication_df[,12:14] <= 100,1,"")))))
# convert character to numeric
dat_replication_df[,12:14] <- as.data.frame(sapply(dat_replication_df[,12:14], as.numeric))
# Question 6
dat_replication_df$mask_behavior_13 <- ifelse(dat_replication_df$mask_behavior_13 == "More comfortable",1,
ifelse(dat_replication_df$mask_behavior_13 == "Indifferent",0,
ifelse(dat_replication_df$mask_behavior_13 == "Less comfortable",-1,"")))
# convert character to numeric
dat_replication_df$mask_behavior_13 <- as.data.frame(sapply(dat_replication_df$mask_behavior_13, as.numeric))
# Question 7
dat_replication_df$mask_behavior_14 <- ifelse(dat_replication_df$mask_behavior_14 == "More comfortable",1,
ifelse(dat_replication_df$mask_behavior_14 == "Indifferent",0,
ifelse(dat_replication_df$mask_behavior_14 == "Less comfortable",-1,"")))
# convert character to numeric
dat_replication_df$mask_behavior_14 <- as.data.frame(sapply(dat_replication_df$mask_behavior_14, as.numeric))
# Question 8
dat_replication_df$mask_behavior_15 <- ifelse(dat_replication_df$mask_behavior_15 == "Yes, a lot",1,
ifelse(dat_replication_df$mask_behavior_15 == "Yes, some",0.5,
ifelse(dat_replication_df$mask_behavior_15 == "Not sure",0,
ifelse(dat_replication_df$mask_behavior_15 == "No, it does nothing",-0.5,
ifelse(dat_replication_df$mask_behavior_15 == "No, it increases the spread",-1,"")))))
# convert character to numeric
dat_replication_df$mask_behavior_15 <- as.data.frame(sapply(dat_replication_df$mask_behavior_15, as.numeric))
# Question 9
dat_replication_df$mask_behavior_16 <- ifelse(dat_replication_df$mask_behavior_16 == "Yes",-1,
ifelse(dat_replication_df$mask_behavior_16 == "Not sure",0,
ifelse(dat_replication_df$mask_behavior_16 == "No",1,"")))
# convert character to numeric
dat_replication_df$mask_behavior_16 <- as.data.frame(sapply(dat_replication_df$mask_behavior_16, as.numeric))
# compute mask attitude score
dat_replication_df$mask_attitude_score <- rowMeans(dat_replication_df[, c(3:18)], na.rm = TRUE)
# calculate task behavior/beliefs
dat_replication_df$wsr <- rowMeans(cbind(dat_replication_df$wsr_block1,
dat_replication_df$wsr_block2), na.rm = TRUE)
dat_replication_df$lsr <- rowMeans(cbind(dat_replication_df$lsr_block1,
dat_replication_df$lsr_block2), na.rm = TRUE)
dat_replication_df$mu02 <- rowMeans(cbind(dat_replication_df$mu02_1,
dat_replication_df$mu02_2), na.rm = TRUE)
dat_replication_df$mu02_precision <- rowMeans(cbind(dat_replication_df$mu02_1_precision,
dat_replication_df$mu02_2_precision), na.rm = TRUE)
dat_replication_df$mu03 <- rowMeans(cbind(dat_replication_df$mu03_1,
dat_replication_df$mu03_2), na.rm = TRUE)
dat_replication_df$mu03_precision <- rowMeans(cbind(dat_replication_df$mu03_1_precision,
dat_replication_df$mu03_2_precision), na.rm = TRUE)
dat_replication_df$paranoia_group <- factor(dat_replication_df$paranoia_group, levels = c("low","high"))
# how many participants in the replication study set?
nrow(dat_replication_df)
# how many are low paranoia?
nrow(dat_replication_df[which(dat_replication_df$paranoia_group == "low"),])
# high?
nrow(dat_replication_df[which(dat_replication_df$paranoia_group == "high"),])
fig4d_1 <- ggplot(dat_replication_df, aes(x = paranoia_group,
y= mask_attitude_score,
fill = as.factor(paranoia_group))) +
geom_point(shape=16,color="black",alpha=0.4,position = position_jitterdodge(), show.legend = FALSE) +
geom_boxplot(alpha = 0.7, width=0.5, lwd=1.2) +
scale_fill_manual(name = "",
values = c("low" = "#FADBD8", "high" = "#E74C3C"))
fig4d_1 + theme_Publication() + theme(axis.title.y = element_blank(),
axis.title.x = element_blank(),
axis.text = element_blank(),
axis.line = element_line(colour="black", size = 1.5),
axis.ticks = element_line(colour="black", size = 1.5),
legend.text = element_blank(),
legend.position = "none")
# Mask attitude
t.test(dat_replication_df$mask_attitude_score ~ dat_replication_df$paranoia_group,
mu=0,
alt="two.sided",
conf=0.95,
var.eq=F,
paired=F)
cohensD(mask_attitude_score ~ paranoia_group, dat_replication_df)
fig4d_2 <- ggplot(dat_replication_df, aes(x = paranoia_group,
y= wsr,
fill = as.factor(paranoia_group))) +
geom_point(shape=16,color="black",alpha=0.4,position = position_jitterdodge(), show.legend = FALSE) +
geom_boxplot(alpha = 0.7, width=0.5, lwd=1.2) +
scale_fill_manual(name = "",
values = c("low" = "#FADBD8", "high" = "#E74C3C"))
fig4d_2 + theme_Publication() + theme(axis.title.y = element_blank(),
axis.title.x = element_blank(),
axis.text = element_blank(),
axis.line = element_line(colour="black", size = 1.5),
axis.ticks = element_line(colour="black", size = 1.5),
legend.text = element_blank(),
legend.position = "none")
# WSR
t.test(dat_replication_df$wsr ~ dat_replication_df$paranoia_group,
mu=0,
alt="two.sided",
conf=0.95,
var.eq=F,
paired=F)
cohensD(wsr ~ paranoia_group, dat_replication_df)
# LSR
t.test(dat_replication_df$lsr ~ dat_replication_df$paranoia_group,
mu=0,
alt="two.sided",
conf=0.95,
var.eq=F,
paired=F)
# fdr correction using WSR and LSR
pvalues_behavior <- c(5.077e-09, 0.0003023)
pvalues_behavior_sorted <- sort(pvalues_behavior)
pvalues_behavior_sorted
p.adjust(pvalues_behavior_sorted, method = "BH")
### 3.3: weighted Mu3
fig4d_3 <- ggplot(dat_replication_df, aes(x = paranoia_group,
y= mu03,
fill = as.factor(paranoia_group),
size = mu03_precision)) +
geom_point(shape=16,color="black",alpha=0.4,position = position_jitterdodge(), show.legend = FALSE) +
geom_boxplot(mapping = aes(weight = mu03_precision),
alpha = 0.7, width=0.5, lwd=1.2) +
scale_fill_manual(name = "",
values = c("low" = "#FADBD8", "high" = "#E74C3C"))
fig4d_3 + theme_Publication() + theme(axis.title.y = element_blank(),
axis.title.x = element_blank(),
axis.text = element_blank(),
axis.line = element_line(colour="black", size = 1.5),
axis.ticks = element_line(colour="black", size = 1.5),
legend.text = element_blank(),
legend.position = "none")
# weighted t-test: mu03
mu03_mask_attitude_wtdttest <- wtd.t.test(x=dat_replication_df$mu03[dat_replication_df$paranoia_group=="low"],
y=dat_replication_df$mu03[dat_replication_df$paranoia_group=="high"],
weight=dat_replication_df$mu03_precision[dat_replication_df$paranoia_group=="low"],
weighty=dat_replication_df$mu03_precision[dat_replication_df$paranoia_group=="high"],
samedata=FALSE)# t(138) = -6.041
# p-value < 0.001 (1.349674e-08)
# wtd.mu[low] = -1.23
# wtd.mu[high] = -0.20
# 95% CI calculation
t_score <- mu03_mask_attitude_wtdttest$coefficients[1]
sample_se <- mu03_mask_attitude_wtdttest$additional[4]
sample_mean_difference <- mu03_mask_attitude_wtdttest$additional[1]
margin_error <- t_score*sample_se
lower_bound <- sample_mean_difference - margin_error
upper_bound <- sample_mean_difference + margin_error
print(c(lower_bound,upper_bound))
# Cohen's d
Cohen_d = sample_mean/sample_se
Cohen_d
# weighted t-test: mu02
wtd.t.test(x=dat_replication_df$mu02[dat_replication_df$paranoia_group=="low"],
y=dat_replication_df$mu02[dat_replication_df$paranoia_group=="high"],
weight=dat_replication_df$mu02_precision[dat_replication_df$paranoia_group=="low"],
weighty=dat_replication_df$mu02_precision[dat_replication_df$paranoia_group=="high"],
samedata=FALSE)# t(177) = -3.286
# p-value < 0.001 (0.001225297)
# wtd.mu[low] = -0.258
# wtd.mu[high] = -0.151
# fdr correction using mu03 and mu02
pvalues_belief <- c(1.349674e-08, 0.001225297)
pvalues_belief_sorted <- sort(pvalues_belief)
pvalues_belief_sorted
p.adjust(pvalues_belief_sorted, method = "BH")
|
9cea5d5178c2b14503a9f35e354dac31043d2ce2
|
e2bb80ef3895fe815ee50182a40b8e531313f21e
|
/R/io-hdf5.R
|
ddee7a7456e350287c66f02cfb33da6749094e81
|
[] |
no_license
|
libertyh/rave
|
cdf4faf67550b9425da41979cbefacec1079fcdd
|
614fa360d157180057b075305213cfd5a25c5f84
|
refs/heads/master
| 2022-11-07T16:03:29.344116
| 2020-06-25T19:49:52
| 2020-06-25T19:49:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,882
|
r
|
io-hdf5.R
|
#' Lazy Load "HDF5" File via \code{\link[hdf5r]{hdf5r-package}}
#'
#' @description Wrapper for class \code{\link[rave]{LazyH5}}, which load data with
#' "lazy" mode - only read part of dataset when needed.
#'
#' @param file "HDF5" file
#' @param name \code{group/data_name} path to dataset
#' @param read_only default is TRUE, read dataset only
#' @param ram load to RAM immediately
#'
#' @seealso \code{\link[rave]{save_h5}}
#' @export
load_h5 <- function(file, name, read_only = TRUE, ram = FALSE){
re = tryCatch({
re = LazyH5$new(file_path = file, data_name = name, read_only = read_only)
re$open()
re
}, error = function(e){
if(!read_only){
stop('Another process is locking the file. Cannot open file with write permission; use ', sQuote('save_h5'), ' instead...\n file: ', file, '\n name: ', name)
}
cat2('Open failed. Attempt to open with a temporary copy...', level = 'INFO')
# Fails when other process holds a connection to it!
# If read_only, then copy the file to local directory
tmpf = tempfile(fileext = 'conflict.h5')
file.copy(file, tmpf)
LazyH5$new(file_path = tmpf, data_name = name, read_only = read_only)
})
if(ram){
f = re
re = re[]
f$close()
}
re
}
#' Save objects to "HDF5" file without trivial checks
#' @param x array, matrix, or vector
#' @param file \code{HDF5} file
#' @param name path to dataset in format like \code{"group/data_name"}
#' @param chunk chunk size
#' @param level compress level
#' @param replace if dataset exists, replace?
#' @param new_file remove old file if exists?
#' @param ctype dataset type: numeric? character?
#' @param ... passed to other \code{LazyH5$save}
#'
#' @seealso \code{\link{load_h5}}
#' @examples
#'
#' file <- tempfile()
#' x <- 1:120; dim(x) <- 2:5
#'
#' # save x to file with name /group/dataset/1
#' save_h5(x, file, '/group/dataset/1', chunk = dim(x))
#'
#' # read data
#' y <- load_h5(file, '/group/dataset/1')
#' y[]
#' @export
save_h5 <- function(x, file, name, chunk = 'auto', level = 4,replace = TRUE, new_file = FALSE, ctype = NULL, ...){
f = tryCatch({
f = LazyH5$new(file, name, read_only = FALSE)
f$open()
f$close()
f
}, error = function(e){
cat2('Saving failed. Attempt to unlink the file and retry...', level = 'INFO')
if(file.exists(file)){
# File is locked,
tmpf = tempfile(fileext = 'conflict.w.h5')
file.copy(file, tmpf)
unlink(file, recursive = FALSE, force = TRUE)
file.copy(tmpf, file)
unlink(tmpf)
}
# Otherwise it's some wierd error, or dirname not exists, expose the error
LazyH5$new(file, name, read_only = FALSE)
})
on.exit({
f$close(all = TRUE)
}, add = TRUE)
f$save(x, chunk = chunk, level = level, replace = replace, new_file = new_file, ctype = ctype, force = TRUE, ...)
return()
}
|
8c37e46e275eab60e98625e70b30acd41cb92b6d
|
158ecb8ed1e2d57639d082228ec52f18e06a9733
|
/cachematrix.R
|
323118fa0bc2ee92e37f4c317ef86da4812d9415
|
[] |
no_license
|
deepaksuresh/datasciencecoursera
|
3d1530670c19a9a1ae4c6529520f2dbf77209a82
|
57572ed5b922a51cc311d20b12e3475bfc1cf891
|
refs/heads/master
| 2020-04-08T16:23:01.117616
| 2014-11-23T20:01:44
| 2014-11-23T20:01:44
| 26,993,505
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,136
|
r
|
cachematrix.R
|
## Matrix inversion is usually a costly computation and their may be some benefit to
##caching the inverse of a matrix rather than compute it repeatedly.
##Following pair of functions calculates the inverse, caches it, returns inverse from cache
##if it is present, else calculates it and stores in cache
## Function to initialize inverse, returns a function containing the matrix whose inverse
##is to be calculated and a set of helper functions
makeCacheMatrix <- function(x = matrix()) {
inverse<-NULL
set<-function(y){
x<<-y
inverse<<-NULL
}
get<-function() x
setinverse<-function(inv) inverse <<-inv
getinverse<-function() inverse
list(set = set,get = get,setinverse=setinverse,getinverse=getinverse)
}
##function returns inverse of a matrix; if the inverse had been calculated, cached matrix is
##returned, else, inverse is calculated and then returned
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv<-x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)}
data <-x$get()
inv<-solve(data)
x$setinverse(inv)
inv
}
|
aaa7ec075a01bc0cac1e3c8b1adb58b0eeec0c63
|
5c2e5322bd534eb23a3d9d440b272d535d125c72
|
/man/eq_clean_data.Rd
|
f94dfd701f335f3a19874caaeb2b94c0b577cfac
|
[] |
no_license
|
BouranDS/My_Capstone
|
a2f5bf96ec111f947b7fbd38bae55b83c153af02
|
edcd0e720ae41cd5140f28f0b9af47337317a6d1
|
refs/heads/master
| 2022-12-23T17:03:07.868709
| 2020-09-22T10:30:02
| 2020-09-22T10:30:02
| 297,568,497
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 983
|
rd
|
eq_clean_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function_type1.R
\name{eq_clean_data}
\alias{eq_clean_data}
\title{eq_clean_data}
\usage{
eq_clean_data(
datalist,
YEAR = "YEAR",
MONTH = "MONTH",
DAY = "DAY",
LATITUDE = "LATITUDE",
LONGITUDE = "LONGITUDE"
)
}
\arguments{
\item{datalist}{stands for data.frame whichcontains earthquake information.}
\item{YEAR}{Interger}
\item{MONTH}{INteger between 1 to 12}
\item{DAY}{Integer between 1 to 31}
\item{LATITUDE}{Double}
\item{LONGITUDE}{Double}
}
\value{
data.frame object
}
\description{
eq_clean_data makes earthquake data in the required format. In which each column has its data in the correct format.
}
\examples{
{
my_data <- data.frame(
YEAR = c("1984", "2000", "2014"),
MONTH = c("1", "2", "12"),
DAY = c("19", "20", "24"),
LONGITUDE = c("12.344", "14.89", "13.345"),
LATITUDE = c("-8.00", "-8.89", "-9.345")
)
my_data_clean <- eq_clean_data(my_data)
}
}
\seealso{
Create_date
}
|
5aa49df5f6c914833ffde02f125f2deba7b1214d
|
3378c3985ec79ab9eabde41a5f76dd8b64c6cac8
|
/vig/aws/s3/htmlpdfr/fs_aws_s3.R
|
7f0682da8ced491d8414cd3f6394f7c1c6dd15e4
|
[
"MIT"
] |
permissive
|
fagan2888/pyfan
|
9ee5f8305782348e7a5b08e2e9523bfff7387c9d
|
a9fed72fe4c5ec6f42d00dc5984827e0beb986ff
|
refs/heads/master
| 2023-01-21T06:34:52.169622
| 2020-12-01T05:05:57
| 2020-12-01T05:05:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 696
|
r
|
fs_aws_s3.R
|
## ----global_options, include = FALSE----------------------------------------------------------
try(source("../../../.Rprofile"))
## import platform as platform
## print(platform.release())
## # This assums using an EC2 instance where amzn is in platform name
## if 'amzn' in platform.release():
## s3_status = True
## else:
## s3_status = False
## print(s3_status)
## import boto3
## s3 = boto3.client('s3')
## spn_local_path_file_name = "C:/Users/fan/pyfan/vig/aws/setup/_data/iris_s3.dta"
## str_bucket_name = "fans3testbucket"
## spn_remote_path_file_name = "_data/iris_s3.dta"
## s3.upload_file(spn_local_path_file_name, str_bucket_name, spn_remote_path_file_name)
|
89ee81aac4afe9c1edbc83399d8a981dbd52c7cf
|
f6aeb9fcaae4dc01c7ebc9504810dc5ccb20630a
|
/mri/MRSI_roi/mrsi_r/man/mrsi_plot_many.Rd
|
ef5417f3d2970de483818d91e0033b8c2e0e375b
|
[] |
no_license
|
LabNeuroCogDevel/7TBrainMech_scripts
|
e28468b895e1845c676bb4c3188719f248fd1988
|
1089f64ee73841cabae40d88a9913dacd761ed9e
|
refs/heads/master
| 2023-08-30T17:10:31.737956
| 2023-08-18T17:40:04
| 2023-08-18T17:40:04
| 160,859,221
| 4
| 1
| null | 2019-04-16T15:10:27
| 2018-12-07T18:09:20
|
MATLAB
|
UTF-8
|
R
| false
| true
| 671
|
rd
|
mrsi_plot_many.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_fits.R
\name{mrsi_plot_many}
\alias{mrsi_plot_many}
\title{Plot best model fit over raw data for multiple metabolites}
\usage{
mrsi_plot_many(d, regions, mtbl_thres, saveplot = F)
}
\arguments{
\item{d}{dataframe with region, metabolite, and crlb columns}
\item{regions}{numeric region(s) to include}
\item{mtbl_thres}{list of quoted metabolite and thres column names
list(Glu.Cr="Glu.SD"), GABA.Cr="GABA.SD"))}
\item{saveplot}{boolean save or not}
}
\description{
return a plot of fits and raw
}
\examples{
mrsi_plot_many(d, 1, list(Glu.Cr='Glu.SD', GABA.Cr="GABA.SD")) \%>\% print
}
|
467909012085e573b37f2603f49dfebbc7a86bf5
|
9735be8af00c95237679ac3e6510d4e8316ecd91
|
/code/functions/gta_hs_check_job_results.R
|
868b6ef19dc9e55d2fb389ea502b9f76c0e5e868
|
[] |
no_license
|
global-trade-alert/hs-code-finder
|
3564ddf1d674ebb6a1a24e3b85cc2ee2fc7d6aec
|
7c025c813207a6254900e5eeebe6fb62fe0d90e7
|
refs/heads/master
| 2021-06-24T14:40:21.289206
| 2020-11-24T12:37:50
| 2020-11-24T12:37:50
| 179,287,296
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,287
|
r
|
gta_hs_check_job_results.R
|
gta_hs_check_job_results <- function(job.ids=NULL,
prob.threshold = 0,
prob.is.na = TRUE,
prob.return = TRUE) {
results <- list(all=data.frame())
for (j.id in job.ids) {
query <- paste0("SELECT jp.job_id, jp.phrase_id, pl.phrase",if(prob.return){", cs.probability"},", pl.exit_status, cs.hs_code_6, hd.hs_description_4, hd.hs_description_6
FROM hs_job_phrase jp
JOIN hs_phrase_log pl
ON pl.phrase_id = jp.phrase_id
JOIN hs_code_suggested cs
ON cs.phrase_id = jp.phrase_id
LEFT JOIN hs_codes_app happ
ON cs.hs_code_6=happ.hs_code_6
LEFT JOIN hs_descriptions hd
ON happ.hs_id = hd.hs_id
WHERE (jp.job_id = ",j.id,")",if(prob.return){paste0(" AND (cs.probability >= ",prob.threshold, " ", if(prob.is.na){"OR cs.probability IS"}else{"AND cs.probability IS NOT"}," NULL)")},";")
result = gta_sql_get_value(query)
# Add to overall results set
results$all <- rbind(results$all, result)
# 1 (UNPROCESSED) NOT ENOUGH ENTRIES TO PROCESS PHRASE
# 2 (PROCESSED) IF CODE SELECTED AND CODE SUGGESTED ARE AVAILABLE
# 3 (NOT A PRODUCT) IF MAJORITY OF CHECKS LABEL AS "NOT A PRODUCT"
# 4 (NO CODES) IF CHECKED ENOUGH TIMES BUT NO CODES HAVE BEEN SELECTED FOR THIS PHRASE
# 5 (ROUND LIMIT) IF NROUND >=4
# create exit status tables
exit.status <- c("unfinished",
"processed",
"service",
"no.codes",
"round.limit")
exit.results <- list()
exit.results <- list("all" = result)
for (es in 1:5) {
eval(parse(text=paste0("exit.results <- c(exit.results, list(",paste0(exit.status[es]),"= subset(result, exit.status == es)[,c('job.id','hs.code.6','phrase.id','phrase'",if(prob.return){",'probability'"},")]))")))
}
eval(parse(text=paste0("results <- c(results, list('job.",j.id,"' = exit.results))")))
}
return(results)
}
|
a4c3d6639b6ff09c56b2d6156b8436eb5d0e414d
|
cb50c680f9174c369c0e24e01bcf56e871fc99fa
|
/man/modelDiagnostics.Rd
|
3b4c44bc40822abbcd8dde65320571e89df0d6ad
|
[] |
no_license
|
ashleych/OLSModelSelector
|
05c4f78664f8e91323f0e72d7da67e2344c45378
|
6d1aeb1d07e6ffb6e3c945991d2f8282aa5a4ebe
|
refs/heads/master
| 2022-11-23T06:22:17.949350
| 2022-11-12T10:30:37
| 2022-11-12T10:30:37
| 200,585,100
| 0
| 0
| null | 2021-08-24T07:13:11
| 2019-08-05T04:58:59
|
HTML
|
UTF-8
|
R
| false
| true
| 1,459
|
rd
|
modelDiagnostics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ModelDiagnoser.R
\name{modelDiagnostics}
\alias{modelDiagnostics}
\title{Provides detailed diagnostic tests including tests for stationarity, autocorrelation for each model}
\usage{
modelDiagnostics(
allModelObjects,
testData = test_df,
direction_config = macrometa
)
}
\arguments{
\item{allModelObjects}{model Objects as generated by the function modelDeveloper}
\item{testData}{Test data to be provided as data.frame. If blank, it will look for test_df created by ValidationSampler in the Global environment.}
\item{direction_config}{To capture the expected sign of the coefficients of regression. This is a two column dataframe or datatable. Column names must be mandatorily 'Variable' and 'Type'. Variable contains the names of independent variables. Type denotes the expected relationship with response variable.It can take on either 1 or -1. If 1, indicates positive correlation, and hence expected signs of the coefficient is positive. If -1, indicates negative correlation, and hence expected signs of coefficients for those variables is negative}
}
\value{
Results of various diagnostic tets of Stationarity, BP, DW, Tests of significance etc provided.
}
\description{
Provides detailed diagnostic tests including tests for stationarity, autocorrelation for each model
}
\examples{
modelDiagnostics(allModelObjects) # allModelObjects created by modelDeveloper()
}
|
e27ca763536078a61ba3686cd7a1dfb57f9d5bfd
|
2d2491b342b737d2801cb964b3e4ccf960ee5fed
|
/man/rp.grid.Rd
|
c117ae507907649e5f4152a49579c3958ad2216c
|
[] |
no_license
|
cran/rpanel
|
7f64c93985c228f909f313bdf283b816b084f3ec
|
b2d139fedc899592e8cd9132c4bf7f1c9b16bc0d
|
refs/heads/master
| 2023-02-21T17:57:55.911293
| 2023-02-07T07:21:06
| 2023-02-07T07:21:06
| 17,699,324
| 1
| 3
| null | 2015-12-05T19:08:29
| 2014-03-13T06:10:11
|
R
|
UTF-8
|
R
| false
| false
| 2,359
|
rd
|
rp.grid.Rd
|
\name{rp.grid}
\alias{rp.grid}
\title{Define a subsidiary grid within an rpanel}
\description{A subsidiary grid is defined at a specified location within an rpanel.}
\usage{
rp.grid(panel, name=paste("grid", .nc(), sep=""), pos=NULL, background=NULL,
parentname=deparse(substitute(panel)), ...)
}
\arguments{
\item{panel}{the panel to which the grid should be attached.}
\item{name}{a string defining the name of the grid. For use with \code{\link{rp.widget.dispose}}}
\item{pos}{See the help information on "grid" mode in \code{\link{rp.pos}}, for more information. }
\item{background}{a character variable defining a background colour. (This is not the same as colours in \R, but simple colours are available.)}
\item{parentname}{this specifies the widget inside which the grid should appear.}
\item{...}{...}
}
\note{
The former argument \code{parent} has been discontinued in version 1.1, while the argument \code{bg} has been renamed \code{background} for consistency with the other functions.
}
\details{
The role of this function is to specify a subsidiary grid at a particular row and column position of the parent grid. Nesting of grids within grids is permitted. See the help information on "grid" mode in \code{\link{rp.pos}} for a description of the settings of the \code{pos} argument.
}
\references{
rpanel: Simple interactive controls for \R functions using the tcltk package. Journal of Statistical Software, 17, issue 9.
}
\examples{
\dontrun{
panel <- rp.control()
rp.grid(panel, pos=list(row=0, column=0, sticky="news"),
background="red", name="g0")
rp.grid(panel, pos=list(row=1, column=1, sticky="news", width=100, height=100),
background="navy", name="g1")
rp.grid(panel, pos=list(row=2, column=2, sticky="news", width=150, height=200),
background="green", name="g2")
rp.button(panel, function(panel) { panel }, "press A",
pos=list(row=1, column=1, sticky=""), parentname="g1")
rp.button(panel, function(panel) { panel }, "press B",
pos=list(row=2, column=2, sticky="news"), parentname="g1")
rp.button(panel, function(panel) { panel }, "press C",
pos=list("left",width=50, height=150), parentname="g2")
rp.grid(panel, pos=list(row=0, column=0, sticky="", width=10, height=10),
background="yellow", parentname="g0")
}}
\keyword{iplot}
\keyword{dynamic}
|
30d2eb85310de8f464c60bd509654932bc11bd6e
|
831fa3a8f2352109d15adfdff54be86d590a4e20
|
/Map_of_Philippines.R
|
410ebd48c122e5c294edb11d9684274d7c5b6790
|
[] |
no_license
|
zoeruben/OmicsWorkshop2019
|
85e94d78bff7abff9af0fec624d5a750a95ddd92
|
61f7c641c993bd78352c6d444ba3a05e7834a88a
|
refs/heads/master
| 2020-06-02T18:50:56.181452
| 2019-06-11T04:28:15
| 2019-06-11T04:28:15
| 191,272,543
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 477
|
r
|
Map_of_Philippines.R
|
getwd()
setwd("../../git/sandbox/map/")
rm(list=ls())
getwd()
####install packages####
install.packages("maps")
install.packages("mapdata")
install.packages("ggplot2")
install.packages("ggmap")
####load packages####
library(maps)
library(mapdata)
library(ggplot2)
library(ggmap)
m<-map("worldHires", xlim=c(116,127), ylim=c(4,21),
col="turquoise", fill=TRUE, lwd=0.5)
map.scale(122,5, relwidth=0.30, metric=TRUE, ratio=FALSE, cex=0.6)
map.axes()
|
b2b0a28d8abbae9e7265cbe9e0a213794bdd4a93
|
9a7c8495c4dcd1c660ff1f3562dabed316be5049
|
/plot1.R
|
5974aa3233ad077756118e5c61655cfa70519c60
|
[] |
no_license
|
hwalseokchoi/ExploratoryDataAnalysis
|
3121b7fe454e26a7f5f6e2a8d8385067dd639a1c
|
bc7ea9d7597f3b4987cb647838e49cd873e9969a
|
refs/heads/master
| 2023-08-19T20:47:54.260605
| 2021-10-19T08:37:49
| 2021-10-19T08:37:49
| 418,834,172
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 527
|
r
|
plot1.R
|
#Read data from household_power_consumption.txt"
EPC_data <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(EPC_data) <-c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
head(EPC_data)
subEPC_data <- subset(EPC_data, EPC_data$Date == "1/2/2007" | EPC_data$Date == "2/2/2007")
#plot1
hist(as.numeric(subEPC_data$Global_active_power), col="red", main="Global Active Power", xlab = "Global Active Power (kilowatts)")
|
46129d93a643b625fce378abbed48f0f1d8f1e96
|
478a35388e92dbc99fdfef60d1497712fe2c5fa5
|
/Sun_Trading_test.R
|
cc0439ecd792930f656a6e014fc15426afc5d4ca
|
[] |
no_license
|
guxinhui1991/LeetCode
|
c1b8623b64afc2f8794e73acb592c6bf99bed4b4
|
aabf314cdb59aafe6f61eaa6a603a528eeb20392
|
refs/heads/master
| 2022-06-17T03:37:28.276053
| 2022-05-31T20:18:05
| 2022-05-31T20:18:05
| 138,253,138
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,141
|
r
|
Sun_Trading_test.R
|
# Xinhui Gu
# May 2nd, 2017
# Question 1
# To get the max drawdown
MaxDD = function(c){
len = length(c)
pnl = c[2:len] - c[1:(len-1)]
serieMaxLoss = pnl[1]
for (i in 1:(len-1)){
if(serieMaxLoss > pnl[i])
serieMaxLoss = pnl[i]
}
return(serieMaxLoss)
}
MaxDD(c(-1, 2, 3,10, 5))
# Question 2
# To get the BS gamma
BlackScholes = function(S, K, r = 0.01, sigma = 0.15, time =0.3, dividend = 0){
q=dividend
d1=(log(S/K) + (r-q-sigma^2/2)*T)/(sigma*sqrt(T))
gamma = dnorm(d1)*exp(-q*T)/(S*sigma*sqrt(T))
return(gamma)
}
# Question 3
# To get the probability of same birthday
BirthdayProb = function(N){
prob = (factorial(365))/(365^N*factorial(365-N+1))
return(prob)
}
# Question 4
# To get the R2
NonLmR2 = function(x, y){
len = length(x)
new_x = matrix(cbind(const, x), len, 2)
new_x_t = matrix(t(new_x), 2, len)
new_y = matrix(y, len, 1)
b1=sum((x-mean(x))*(y-mean(y)))/(sum((x-mean(x))^2))
b0=mean(y)-b1*mean(x)
beta_hat =c(b0,b1)
y_hat = new_x%*%beta_hat
y_bar = mean(y)
sum((y - y_hat)^2)
sum((y - y_bar)^2)
R2 = sum((y_hat - y_bar)^2)/sum((y - y_bar)^2)
return(R2)
}
|
53b79ae8565a25a8777bb7d1fe5bda47b92096c6
|
1437ee9c859768a3d90f2e3b7c7effe7194be67b
|
/Plot 3.R
|
8344c8d6aa35a94182ac692b386b6f7aae6dbb19
|
[] |
no_license
|
DVJENS/Exploratory-Data-Analysis-Assignment-1
|
890517b96a96251bb08e2507be44c8ef180d2223
|
4f0d44c3710fead21daf7c2bcf6d07290eae4d33
|
refs/heads/master
| 2021-01-25T09:20:55.904951
| 2017-06-09T04:12:59
| 2017-06-09T04:12:59
| 93,817,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,266
|
r
|
Plot 3.R
|
# Read in household power consumption data and create subset of data
pow <- read.table("C:/Exploratory Data Analysis/Exploratory Analysis Week 1/household_power_consumption.txt",skip=1,sep=";")
names(pow) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
smallpower <- subset(pow,pow$Date=="1/2/2007" | pow$Date =="2/2/2007")
# Transform the Date and Time
smallpower$Date <- as.Date(smallpower$Date, format="%d/%m/%Y")
smallpower$Time <- strptime(smallpower$Time, format="%H:%M:%S")
smallpower[1:1440,"Time"] <- format(smallpower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
smallpower[1441:2880,"Time"] <- format(smallpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# create the plot
plot(smallpower$Time,smallpower$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
with(smallpower,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(smallpower,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(smallpower,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
title(main="Energy sub-metering")
|
4b7f871027711f29c597b753204d1d27511cf1e0
|
b81ebffe56a46c0cb500f141ac2d8b9ee1a668a5
|
/vectorized_operation.r
|
239f24b414ecdcbb2a8ce68d8af2643efdc974be
|
[] |
no_license
|
atta49/R_programing
|
1b4faae45349666b79c32c92511bd0c3eb46e4ae
|
bfc5ecd79631059ff55e070a58494ec62d46157c
|
refs/heads/master
| 2023-06-11T07:51:56.203021
| 2021-07-06T09:47:47
| 2021-07-06T09:47:47
| 372,713,247
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 220
|
r
|
vectorized_operation.r
|
x<-1:4;y<-5:8
x+y
x>2
x>=2
y==8
x/y
#vectorize operation
rep(1,4)
x<-matrix(1:4,2,2);y<-matrix(rep(10,4),2,2)
x*y #element wise multiplication
x/y
x%*%y# true multiplication 1st martix row multiply by 2nd martiz column
|
2cee869d1572099a4d4ad0f2eba49f9dac56d578
|
40bcca516b2e31e8bc28c6241085c0cf41c0c7f4
|
/plot1.R
|
5847b955613e543b3fc8835572c44d5abded7e77
|
[] |
no_license
|
christinch3n/ExpData_project2
|
7b125bc81f4bc291ec46612ea31f1805f95c9de2
|
3898fa9100208a63e930d08f9fbcbf31dddb0139
|
refs/heads/master
| 2021-01-21T12:43:31.866738
| 2014-10-25T16:03:49
| 2014-10-25T16:07:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 476
|
r
|
plot1.R
|
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Sum up emissions per year (Pollutant is "PM25-PRI" in all rows!)
emission_sums <- rowsum(NEI$Emissions, as.factor(NEI$year))
png("plot1.png", width=480, height=480)
plot(rownames(emission_sums), emission_sums, main="United States - All Sources", xlab="Year", ylab="Total Emission from PM25", pch=8)
dev.off()
## Emissions in the United States decreased significantly from 1999 - 2008.
|
571c687a78efebf0c5ed6bda6ea9f58b43280297
|
0ff06478c18026955ebf512cd3dcaef7293e1c30
|
/man/hanabiPlot.Rd
|
072f63d1c4957db06c6575949403488eead4062f
|
[
"CC0-1.0"
] |
permissive
|
charles-plessy/smallCAGEqc
|
83d19b21890eed9455eaca13c87455bd53f45950
|
e3642f25b43314779c33388129b5d47a5a1538ec
|
refs/heads/master
| 2021-03-13T01:36:47.956099
| 2018-01-25T04:27:20
| 2018-01-25T04:27:20
| 34,089,765
| 1
| 1
| null | 2017-03-22T05:47:31
| 2015-04-17T01:24:16
|
R
|
UTF-8
|
R
| false
| true
| 1,956
|
rd
|
hanabiPlot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hanabiPlot.R
\name{hanabiPlot}
\alias{hanabiPlot}
\title{hanabiPlot}
\usage{
hanabiPlot(RAR, S, GROUP = NULL, legend.pos = "topleft", pch = 1,
col = "black", ...)
}
\arguments{
\item{RAR}{A rarefaction table, or a hanabi object.}
\item{S}{A vector of subsample sizes.}
\item{GROUP}{A vector grouping the samples. Coerced to factor.}
\item{legend.pos}{Position of the legend, passed as "x" parameter to the
"legend" function.}
\item{pch}{Plot character at the tip of the lines.}
\item{col}{A vector of colors}
\item{...}{Further arguments to be passed to the first plot function,
that plots the empty frame.}
}
\description{
Plot feature discovery curves
}
\details{
Plots the number of features (genes, transcripts, ...) detected for a
given number of counts (reads, unique molecules, ...). Each library is
sub-sampled by rarefaction at various sample sizes, picked to provide
enough points so that the curves look smooth. The final point is plotted
as an open circle, hence the name "hanabi", which means fireworks in
Japanese.
The rarefactions take time to do, so this step is done by a separate
function, so that the result is easily cached.
}
\examples{
\dontrun{
hanabi(genes, npoints = 20, step = 0.8, from = 0) \%>\% hanabiPlot
hanabi(genes, npoints = 20, step = 0.9) \%>\% hanabiPlot
}
bedFiles <- system.file(package = "smallCAGEqc", "extdata") \%>\%
list.files("*BED", full.names = TRUE)
bed <- loadBED12(bedFiles)
rar <- tapply(bed$score, bed$library, hanabi, from = 0) \%>\%
structure(class = "hanabi") # tapply discards the class !
hanabiPlot(rar, GROUP = levels(bed$library))
hanabiPlot(rar, GROUP = levels(bed$library), col=c("red", "green", "blue"))
hanabiPlot(rar, col="purple")
}
\seealso{
vegan, plot.hanabi, hanabi
Other Hanabi functions: \code{\link{hanabi}},
\code{\link{plot.hanabi}}, \code{\link{points.hanabi}}
}
|
e62f785ea40cb21cf8c02640f35b3dd553d92d22
|
7d326bc0e0c3306cd9d32e51a8759c7913ec0e6f
|
/man/CrowdStat.ndd.Rd
|
c56b4a9e5dc4fc654e76a6b94e7b78b1aceab481
|
[
"MIT"
] |
permissive
|
marjoleinbruijning/FDPtools
|
2abaef7ccd53cc7b220816c7f9c8ea4232df9fd1
|
4ff2ed09cf2e5f40c72b5f3955a6243a16a35ff4
|
refs/heads/master
| 2021-01-20T16:35:37.561045
| 2016-03-17T09:45:22
| 2016-03-17T09:45:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 560
|
rd
|
CrowdStat.ndd.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/NddMeassuresFunctions.R
\name{CrowdStat.ndd}
\alias{CrowdStat.ndd}
\title{CrowdStat.nddcount}
\usage{
\method{CrowdStat}{ndd}(FdpData, r)
}
\arguments{
\item{r}{a vector containing all distances at which the
below statistic must be calculated}
\item{distancemat}{a matrix containing the distances
between all individuals}
}
\description{
primitive function which counts points (trees)
surrounding a given focal point (tree) in the census data
}
\author{
Marco D. Visser
}
|
a1edc29925c8073f771308dd0269fba961cd61a0
|
0ca7633ec7944a84d61f231a8d04d670cc15d5d2
|
/man/predict.speedglm.Rd
|
206219a9b1bf712f8fd1457f6d0fb5b584f7f4cc
|
[] |
no_license
|
MarcoEnea/speedglm
|
54b255406f7aae5efd9e8a0f166f60579f3d0475
|
4a15e74ce2959b9962491457fbb0308e3f8fb45c
|
refs/heads/master
| 2022-02-21T01:26:48.737121
| 2022-02-19T18:39:41
| 2022-02-19T18:39:41
| 143,534,422
| 5
| 2
| null | 2022-02-19T17:02:59
| 2018-08-04T14:12:04
|
R
|
UTF-8
|
R
| false
| false
| 1,268
|
rd
|
predict.speedglm.Rd
|
\name{predict.speedglm}
\alias{predict.speedglm}
\title{Predict method for a speedglm object}
\description{
\code{summary} The method is currently under construction but some functionalities are available.
}
\usage{
\method{predict}{speedglm}(object, newdata, type = c("link", "response"),
na.action = na.pass, ...)
}
\arguments{
\item{object}{an object of class 'speedlgm'.}
\item{newdata}{An optional data frame with new data or the original data.}
\item{type}{Type of prediction.}
\item{na.action}{function determining what should be done with missing values in \code{newdata}.}
\item{\dots}{further optional arguments}
}
\details{If \code{newdata} is omitted prediction are based on the data used for the fit only if argument \code{fitted} was previously set to \code{TRUE} in the speedglm object.
Currently the method does not work for function \code{shglm}.}
\value{
\item{pred}{a vector of predictions.}
}
\author{ Tomer Kalimi and Marco Enea }
\seealso{ \link{speedglm} }
\examples{
set.seed(10)
y <- rgamma(20,1.5,1)
x <-round( matrix(rnorm(20*2),20,2),digits=3)
colnames(x) <-c("s1","s2")
da <- data.frame(y,x)
mod <- speedglm(y~s1+s2, data=da, family=Gamma(log), fitted=TRUE)
predict(mod)
}
\keyword{models}
|
fa4d35039bd98f3d9b6b745cae0537992c0d4ac5
|
c348e148840c1260985291d1adb8f7860fb6037f
|
/ex7-2.R
|
cb6beaf9f362be68f2980d148cd13f92042a0cba
|
[] |
no_license
|
harute931507/R_practice
|
8d478ad884bb8cd15c35b941499bc4f7c8c09dfe
|
aa882783d915a58048e7fbb061b3b7df87ec1f3e
|
refs/heads/master
| 2020-03-31T03:45:56.599245
| 2018-10-06T19:59:58
| 2018-10-06T19:59:58
| 151,876,825
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 3,040
|
r
|
ex7-2.R
|
attach(mtcars);
# Exercise 1
# Create a scatterplot of mpg (x-axis) against drat (y-axis) and add a label to the x-axis.
# Which of the following statements is correct:
# a. plot(mpg,drat,xaxis="Miles per gallon")
# b. plot(mpg,drat,xlab="Miles per gallon")
## ANS: b
# Exercise 2
# We just saw how to customize the label on the x-axis. For the next exercise:
# a. Customize the y-axis label like we customized the x-axis in the previous exercise..
# b. Produce a plot customizing x and y axis, range and colours.
# Choose a range for x axis from 0 to 40, for y axis from 0 to 7, red colour and highlighted points.
## ANS:
## a:
## plot(mpg,drat,xlab="Miles per gallon",ylab="Rear axle ratio");
## b:
## plot(mpg,drat,xlab="Miles per gallon",ylab="Rear axle ratio",xlim=c(0,40),col=2,lwd=2,ylim=c(0,7));
# Exercise 3
# We have to add a title to our plot. What command do we have to type?
# a. plot(mpg,drat,main="Miles vs Rar")
# b. plot(mpg,drat,title="Miles vs Rar")
## ANS: a
# Exercise 4
# We just saw how to add labels, titles and custom details such as colours and the size of points.
# Now we will have to construct our plot in different stages.
# Firstly we have to plot our data, secondly we will add axes, title and text.
# a.Plot our data specifying that axes havenˇ¦t to be plotted.
# b.Add axes, labels and text afterwards
## ANS:
## a: plot(mpg,drat,axes=F);
## b:
## add axes:
## axis(side=1,at=c(0,40)); axis(side=2,at=c(0,10));
## add labels:
## mtext("Miles",side=1); mtext("Miscellaneous",side=2);
## add title:
## mtext("Miles vs Rar",side=3);
# Exercise 5
# Now we want to add a legend to our plot. Which statement is correct?
# a. plot(mpg,drat,legend=1)
# b. plot(mpg,drat);legend()
## ANS:
## plot(mpg,drat);
## points(mpg,wt,col=2);
## legend("topright",legend=c("Rar","Wt"),col=1:2,ncol=2,pch=1);
# Exercise 6
# Customize our legend:
# a.Use different types of symbols, colours, background colours and position.
# b.Insert new variables to our plot and then customize the legend
## ANS:
## a:
## plot(mpg,drat,pch=2);
## points(mpg,wt,col=2);
## legend("bottomright",legend=c("Rar","Wt"),col=c(1,2),ncol=3,pch=c(2,1),bg="light blue");
## b:
## points(mpg,cyl,col=3,pch=3);
## legend("bottomright",legend=c("Rar","Cyl","Wt"),col=c(1:3),ncol=3,pch=c(2,1,3),bg="light blue");
# Exercise 7
# Finally, we will build a plot using four continuous variables in two stages:
# a. plot two variables at a time, eliminating axes.
# Introduce axes and labels afterwards.
# b. Insert a legend using diffent colours and adifferent symbol for every variable.
# Put the legend in a top right position using x and y coordinates.
## ANS:
## plot(mpg,drat,axes=F,xlab="",ylab="");
## points(mpg,wt,pch=2,col=2);
## points(mpg,cyl,pch=3,col=3);
## axis(side=1,at=c(0,40));
## axis(side=2,at=c(0,30));
## mtext("Miles",side=1);
## mtext("Miscellaneous",side=2);
## mtext("Miles vs Miscellaneous",side=3);
## legend("topright",legend=c("Rar","Weight","Cyl"),col=c(1:3),ncol=3,pch=1:3,bg="light blue");
|
841c2e095de8f926739fbfb0212f37fdefc147e4
|
e2cf95573135143abda85710adc33336bf41adef
|
/R/swap_headings.R
|
54b40232ee64a2bde77a30b6245ab5af556e1d54
|
[] |
no_license
|
kwojdalski/rpm2
|
41d3cac7f1ef08585ed5e5a847f86c665accf285
|
a6b0ea44832210b0601deed5691eaac664752d6c
|
refs/heads/master
| 2021-08-15T21:43:13.871194
| 2017-11-18T10:22:15
| 2017-11-18T10:22:15
| 110,359,212
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,733
|
r
|
swap_headings.R
|
# @knitr fun_swapHeadings
# Rmd <-> Rnw document conversion
# Conversion support functions
# called by .swap()
.swapHeadings <- function(from, to, x){
nc <- nchar(x)
ind <- which(substr(x, 1, 1)=="\\")
if(!length(ind)){ # assume Rmd file
ind <- which(substr(x, 1, 1)=="#")
ind.n <- rep(1, length(ind))
for(i in 2:6){
ind.tmp <- which(substr(x[ind], 1, i)==substr("######", 1, i))
if(length(ind.tmp)) ind.n[ind.tmp] <- ind.n[ind.tmp] + 1 else break
}
for(i in 1:length(ind)){
n <- ind.n[i]
input <- paste0(substr("######", 1, n), " ")
h <- x[ind[i]]
h <- gsub("\\*", "_", h) # Switch any markdown boldface asterisks in headings to double underscores
heading <- gsub("\n", "", substr(h, n+2, nc[ind[i]]))
#h <- gsub(input, "", h)
if(n <= 2) subs <- "\\" else if(n==3) subs <- "\\sub" else if(n==4) subs <- "\\subsub" else if(n >=5) subs <- "\\subsubsub"
output <- paste0("\\", subs, "section{", heading, "}\n")
x[ind[i]] <- gsub(h, output, h)
}
} else { # assume Rnw file
ind <- which(substr(x, 1, 8)=="\\section")
if(length(ind)){
for(i in 1:length(ind)){
h <- x[ind[i]]
heading <- paste0("## ", substr(h, 10, nchar(h)-2), "\n")
x[ind[i]] <- heading
}
}
ind <- which(substr(x, 1, 4)=="\\sub")
if(length(ind)){
for(i in 1:length(ind)){
h <- x[ind[i]]
z <- substr(h, 2, 10)
if(z=="subsubsub") {p <- "##### "; n <- 19 } else if(substr(z, 1, 6)=="subsub") { p <- "#### "; n <- 16 } else if(substr(z, 1, 3)=="sub") { p <- "### "; n <- 13 }
heading <- paste0(p, substr(h, n, nchar(h)-2), "\n")
x[ind[i]] <- heading
}
}
}
x
}
|
07b16b02fbd3df8ed93de5473559611d01c9b63d
|
566654448a180399905bf06a20125cfd529f4772
|
/tests/testthat/helper.R
|
46adf738e1090070406bf3195ae7a64ea57a401f
|
[] |
no_license
|
nealrichardson/elbr
|
a236f654aa6be3c4119dac687c9cdb7ea46575c6
|
6a9cc3205b1cea13e439b83ea28d0300d0529d58
|
refs/heads/master
| 2021-04-15T18:21:57.172514
| 2018-10-22T14:29:48
| 2018-10-22T14:29:48
| 126,269,430
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 136
|
r
|
helper.R
|
Sys.setlocale("LC_COLLATE", "C") ## What CRAN does
set.seed(999)
options(elbr.dir=".")
public <- function (...) with(globalenv(), ...)
|
c6eb8b846ff3943f0ed6d65727b1f218d3b46a75
|
680b301598673a0c823563238ec29b4bb3a87632
|
/jacques/broche_analysis_Rparams.R
|
a225637ab3d9e003df3a59a85c0ee607e53fa9d1
|
[] |
no_license
|
jmrinaldi/gene-regulation
|
fa2bdd4fe65de65840c2a688dd9d0c6422fb8913
|
d04bb28f19c2d7b86165c3371227fd161559f08a
|
refs/heads/master
| 2021-01-16T01:07:27.148832
| 2016-02-11T06:00:13
| 2016-02-11T06:00:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 901
|
r
|
broche_analysis_Rparams.R
|
## TEMPORARY FOR DEBUGGING:
dir.main <- "~/BeatriceRoche/"
setwd(dir.main)
r.params.path <- "results/DEG/sickle_pe_q20_bowtie2_pe_sorted_name_params.R"
org.db <- "org.EcK12.eg.db" ## Should be added to parameters
gene.info.file <- "genome/Escherichia_coli_str_k_12_substr_mg1655_GCA_000005845.2_gene_info.tab"
organism.names <- c("name" = "Escherichia coli",
"clusterProfiler" = NA,
"kegg"="eco")
gtf.file <- "genome/Escherichia_coli_str_k_12_substr_mg1655.GCA_000005845.2.28.gtf"
gtf.source <- "ftp://ftp.ensemblgenomes.org/pub/bacteria/release-28/fasta/bacteria_0_collection/escherichia_coli_str_k_12_substr_mg1655/"
# pet.gene <- "b2531"
genes.of.interest <- c("b2531")
go.map.file <- 'genome/Escherichia_coli_str_k_12_substr_mg1655_GCA_000005845.2_gene_GO.tab'
go.description.file <- "genome/GO_description.tab"
|
4a3ea834c121e8cbf46de7aa2c0bceaa5c4b7d7d
|
325d076c5fcdba87e8bad019a147b37eeb677e90
|
/man/emptyraster.Rd
|
ec4509856e5fc8c1048b96c1e7a10a7fb8beae50
|
[
"CC-BY-4.0"
] |
permissive
|
iiasa/ibis.iSDM
|
8491b587b6ccc849477febb4f164706b89c5fa3c
|
e910e26c3fdcc21c9e51476ad3ba8fffd672d95e
|
refs/heads/master
| 2023-08-26T12:38:35.848008
| 2023-08-19T21:21:27
| 2023-08-19T21:21:27
| 331,746,283
| 11
| 1
|
CC-BY-4.0
| 2023-08-22T15:09:37
| 2021-01-21T20:27:17
|
R
|
UTF-8
|
R
| false
| true
| 700
|
rd
|
emptyraster.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-spatial.R
\name{emptyraster}
\alias{emptyraster}
\title{Create an empty \code{SpatRaster} based on a template}
\usage{
emptyraster(x, ...)
}
\arguments{
\item{x}{A \code{SpatRaster*} object corresponding.}
\item{...}{other arguments that can be passed to \code{\link{terra}}}
}
\value{
an empty \code{\link{SpatRaster}}, i.e. all cells are \code{NA}.
}
\description{
This function creates an empty copy of a provided
\code{SpatRaster} object. It is primarily used in the package to create the
outputs for the predictions.
}
\examples{
require(terra)
r <- rast(matrix(1:100, 5, 20))
emptyraster(r)
}
\keyword{utils}
|
c3eee3c59614884612f14fcf2af8dce4e99dabe3
|
872315061dfdfb86864f85f27704f4b0bddb1be4
|
/scripts/new_figures.R
|
d51914cc5bb5b73b0b6bcb57733cb9ffff5f5032
|
[
"MIT"
] |
permissive
|
kwells4/mtec_analysis
|
2a2e01b54d1ded0bfa25c65d750ec381a6c846e2
|
f7a8ec78c21d33483f0ebed3e59f3bb0bd812b3d
|
refs/heads/master
| 2021-07-10T01:07:13.927793
| 2021-04-15T21:29:57
| 2021-04-15T21:29:57
| 183,113,023
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 66,702
|
r
|
new_figures.R
|
###########
# This file has pseudotime calls added in.
library(mTEC.10x.pipeline)
library(mTEC.10x.data)
library(dplyr)
library(slingshot)
###############
# Move all these functions to the package
rename_stage <- function(seurat_object, set_ident = TRUE) {
levels(seurat_object@meta.data$stage) <- c(levels(seurat_object@meta.data$stage),
"Cortico_medullary", "Ccl21a_high",
"Early_Aire",
"Aire_positive", "Late_Aire")
seurat_object@meta.data$stage[seurat_object@meta.data$stage ==
"Immature"] <- "Ccl21a_high"
seurat_object@meta.data$stage[seurat_object@meta.data$stage ==
"Intermediate"] <- "Early_Aire"
seurat_object@meta.data$stage[seurat_object@meta.data$stage ==
"Mature"] <- "Aire_positive"
seurat_object@meta.data$stage[seurat_object@meta.data$stage ==
"Late_mature"] <- "Late_Aire"
seurat_object@meta.data$stage[seurat_object@meta.data$stage ==
"cTEC"] <- "Cortico_medullary"
seurat_object@meta.data$stage <- factor(seurat_object@meta.data$stage,
levels = c("Cortico_medullary",
"Ccl21a_high",
"Early_Aire",
"Aire_positive",
"Late_Aire",
"Tuft",
"unknown"))
if (set_ident){
idents <- data.frame(seurat_object@ident)
if (!identical(rownames(seurat_object@meta.data), rownames(idents))){
seurat_object@meta.data <- seurat_object[match(rownames(idents),
rownames(mtec@meta.data))]
}
seurat_object <- Seurat::SetAllIdent(seurat_object, id = "stage")
seurat_object@ident <- factor(seurat_object@ident,
levels = c("Cortico_medullary",
"Ccl21a_high",
"Early_Aire",
"Aire_positive",
"Late_Aire",
"Tuft",
"unknown"))
}
return(seurat_object)
}
get_avg_exp <- function(mtec_obj, avg_expr_id = "stage") {
idents <- data.frame(mtec_obj@ident)
if (!identical(rownames(mtec_obj@meta.data), names(mtec_obj@ident))){
mtec_obj@meta.data <- mtec_obj[match(rownames(idents),
rownames(mtec@meta.data))]
}
mtec_obj <- Seurat::SetAllIdent(mtec_obj, id = avg_expr_id)
avg.expression <- log1p(Seurat::AverageExpression(mtec_obj))
return(avg.expression)
}
plot_corr <- function(avg_expression_1, avg_expression_2, name_1,
name_2, color_df, density = FALSE) {
stages_1 <- colnames(avg_expression_1)
stages_2 <- colnames(avg_expression_2)
stages <- intersect(stages_1, stages_2)
cor_df <- NULL
plots_list <- c()
for (i in stages) {
print(i)
df_1 <- data.frame(row.names = rownames(avg_expression_1),
avg_exp = avg_expression_1[[i]])
colnames(df_1)[1] <- name_1
df_2 <- data.frame(row.names = rownames(avg_expression_2),
avg_exp = avg_expression_2[[i]])
colnames(df_2)[1] <- name_2
plot_df <- merge(df_1, df_2, by = "row.names")
rownames(plot_df) <- plot_df$Row.names
plot_df$Row.names <- NULL
correlation <- cor(plot_df[[name_1]], plot_df[[name_2]])
correlation_plot <- round(correlation, 2)
correlation_plot <- paste0("r = ", correlation_plot)
text_x <- max(plot_df[[name_1]]) - 1
text_y <- max(plot_df[[name_2]]) / 2
color_scale <- toString(color_df[[i]])
if (density) {
plot_df$density <- get_density(plot_df[[name_1]], plot_df[[name_2]])
plot_1 <- ggplot2::ggplot(data = plot_df, ggplot2::aes_string(name_1,
name_2)) +
ggplot2::geom_point(ggplot2::aes(color = density)) +
ggplot2::ggtitle(i) +
ggplot2::scale_color_gradient(low = color_scale, high = "#A9A9A9") +
# ggplot2::theme_classic() +
ggplot2::geom_text(x = text_x, y = text_y,
label = correlation_plot)
print(plot_1)
} else {
plot_1 <- ggplot2::ggplot(data = plot_df, ggplot2::aes_string(name_1,
name_2)) +
ggplot2::geom_point(color = color_scale) + ggplot2::ggtitle(i) +
#ggplot2::theme_classic() +
ggplot2::geom_text(x = text_x, y = text_y,
label = correlation_plot)
print(plot_1)
}
if (i != "unknown") {
plots_list[[i]] <- plot_1
}
if (is.null(cor_df)) {
cor_df <- data.frame(correlation)
names(cor_df) <- i
} else {
cor_df[[i]] <- correlation
}
}
nplots <- length(stages)
if (nplots > 6) {
print("Warning in plot_corr:")
print(paste0("Works best if number of plots is less than 6. You have ",
nplots, " total plots"))
}
rows <- ceiling(nplots / 3)
cols <- 3
gridExtra::grid.arrange(grobs = plots_list, nrow = rows, ncol = cols)
return(cor_df)
}
get_density <- function(x, y, n = 100) {
dens <- MASS::kde2d(x = x, y = y, n = n)
ix <- findInterval(x, dens$x)
iy <- findInterval(y, dens$y)
ii <- cbind(ix, iy)
return(dens$z[ii])
}
master_plot <- function(mtec_obj_1, name_1, mtec_obj_2,
name_2, stages_colors, density = FALSE) {
avg_exp_1 <- get_avg_exp(mtec_obj_1)
avg_exp_2 <- get_avg_exp(mtec_obj_2)
cor_vals <- plot_corr(avg_exp_1, avg_exp_2, name_1, name_2, stages_colors)
return(cor_vals)
}
populations_dfs <- function(seurat_object, sample_name, stage_df_all){
stage_df <- data.frame(table(seurat_object@meta.data$stage))
names(stage_df) <- c("stage", "count")
stage_df$percent <- stage_df$count / sum(stage_df$count) * 100
stage_df$sample <- sample_name
if(is.null(stage_df_all)){
stage_df_all <- stage_df
} else {
stage_df_all <- rbind(stage_df_all, stage_df)
}
return(stage_df_all)
}
populations_dfs_new <- function(seurat_object, sample_name, subsample = FALSE,
subsample_by = "exp", meta_data_col = "stage"){
if (subsample) {
cells_use <- rownames(seurat_object@meta.data)[
seurat_object@meta.data[[subsample_by]] == sample_name]
seurat_object <- Seurat::SubsetData(seurat_object, cells.use = cells_use)
}
stage_df <- data.frame(table(seurat_object@meta.data[[meta_data_col]]))
names(stage_df) <- c("stage", "count")
stage_df$percent <- stage_df$count / sum(stage_df$count) * 100
stage_df$sample <- sample_name
return(stage_df)
}
population_plots <- function(stage_df_all, color, save_plot = NULL){
if (!(is.null(save_plot))){
extension <- substr(save_plot, nchar(save_plot)-2, nchar(save_plot))
if (extension == "pdf"){
pdf(save_plot)
} else if (extension == "png") {
png(save_plot)
} else {
print("save plot must be .png or .pdf")
}
}
plot_base <- ggplot2::ggplot(data = stage_df_all, ggplot2::aes_(x = ~sample,
y = ~percent,
fill = ~stage)) +
# ggplot2::theme_classic() +
ggplot2::xlab("frequency") +
ggplot2::geom_bar(stat = "identity") +
ggplot2::scale_fill_manual(values = color, name = "stage")
if (!(is.null(save_plot))){
print(plot_base)
dev.off()
}
return(plot_base)
}
gene_corr <- function(seurat_obj, gene_1, gene_2, stage_color) {
seurat_df <- as.data.frame(as.matrix(seurat_obj@data))
seurat_df <- as.data.frame(t(seurat_df[c(gene_1, gene_2), ]))
genes <- sub("-", "_", c(gene_1, gene_2))
gene_1 <- genes[1]
gene_2 <- genes[2]
seurat_obj@meta.data$stage <- factor (seurat_obj@meta.data$stage)
names(seurat_df) <- sub("-", "_", names(seurat_df))
if(!identical(rownames(seurat_df), rownames(seurat_obj@meta.data))){
seurat_df <- seurat_df[match(rownames(seurat_obj@meta.data),
rownames(seurat_df)), ]
}
seurat_df$stage <- seurat_obj@meta.data$stage
stages <- levels(seurat_df$stage)
scatterPlot <- ggplot2::ggplot(data = seurat_df,
ggplot2::aes_string(gene_1, gene_2)) +
ggplot2::geom_point(ggplot2::aes(colour = stage)) +
ggplot2::scale_color_manual(values = stage_color) +
#ggplot2::theme_classic() +
ggplot2::theme(legend.position= "none")
xdensity <- ggplot2::ggplot(data = seurat_df,
ggplot2::aes_string(gene_1)) +
ggplot2::geom_density(ggplot2::aes(colour = stage)) +
ggplot2::scale_color_manual(values = stage_color) +
#ggplot2::theme_classic() +
ggplot2::theme(legend.position = "none")
ydensity <- ggplot2::ggplot(data = seurat_df,
ggplot2::aes_string(gene_2)) +
ggplot2::geom_density(ggplot2::aes(color = stage)) +
ggplot2::scale_color_manual(values = stage_color) +
#ggplot2::theme_classic() +
ggplot2::theme(legend.position = "none")
ydensity <- ydensity + ggplot2::coord_flip()
blankPlot <- ggplot2::ggplot() +
ggplot2::geom_blank(ggplot2::aes(1,1))+
ggplot2::theme(plot.background = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank(),
panel.border = ggplot2::element_blank(),
panel.background = ggplot2::element_blank(),
axis.title.x = ggplot2::element_blank(),
axis.title.y = ggplot2::element_blank(),
axis.text.x = ggplot2::element_blank(),
axis.text.y = ggplot2::element_blank(),
axis.ticks = ggplot2::element_blank()
)
gridExtra::grid.arrange(xdensity, blankPlot, scatterPlot, ydensity,
ncol=2, nrow=2, widths=c(4, 1.4), heights=c(1.4, 5))
#plot_1 <- ggplot2::ggplot(data = seurat_df, ggplot2::aes_string(gene_1,
# gene_2)) +
# ggplot2::geom_point(ggplot2::aes(color = stage)) +
# ggplot2::scale_color_manual(values = stage_color) +
# ggplot2::theme_classic()
#print(plot_1)
}
gene_exp_df <- function(combSubset, experiment, df_contents = "ranked") {
# Find average expression of all genes at each stage
avg.expression <- log1p(Seurat::AverageExpression(combSubset))
if (df_contents == "ranked") {
plot_df <- data.frame(genes = rownames(avg.expression))
for (i in levels(combSubset@ident)) {
print(i)
# rank genes. Ties will all be given the lowest number. Ex.
# c(1, 13, 5, 5, 7, 10) wil be 1 6 2 2 4 5
rank_expression <- rank(-avg.expression[[i]], ties.method = "min")
plot_df[[i]] <- rank_expression
}
rownames(plot_df) <- plot_df$genes
plot_df$genes <- NULL
} else if ( df_contents == "expression") {
plot_df <- avg.expression
} else {
print("df_contents must be either 'ranked' or 'expression'")
}
# Change the plot_df to be stages as rows and genes as columns
plot_df <- as.data.frame(t(plot_df))
plot_df$stage <- rownames(plot_df)
plot_df$exp <- experiment
return(plot_df)
}
plot_gene_exp <- function(plot_df_all, gene_name, low_lim = 0,
high_lim = 10000, col = NULL) {
print(gene_name)
stage <- "stage"
experiment <- "exp"
gene_plot <- ggplot2::ggplot(data = plot_df_all,
ggplot2::aes_string(experiment,
gene_name,
group = stage)) +
ggplot2::geom_line(ggplot2::aes(color = stage)) +
ggplot2::geom_point(ggplot2::aes(color = stage)) +
ggplot2::ylim(low_lim, high_lim)
#ggplot2::theme_classic()
if (is.null(col)) {
gene_plot <- gene_plot + ggplot2::scale_color_brewer(palette = "Set1")
} else {
gene_plot <- gene_plot + ggplot2::scale_color_manual(values = col)
}
ggplot2::scale_color_brewer(palette = "Set1")
gene_plot_2 <- ggplot2::ggplot(data = plot_df_all,
ggplot2::aes_string(experiment,
gene_name,
group = stage)) +
ggplot2::geom_line(ggplot2::aes(color = stage)) +
ggplot2::geom_point(ggplot2::aes(color = stage))
#ggplot2::theme_classic()
if (is.null(col)) {
gene_plot_2 <- gene_plot_2 + ggplot2::scale_color_brewer(palette = "Set1")
} else {
gene_plot_2 <- gene_plot_2 + ggplot2::scale_color_manual(values = col)
}
print(gene_plot)
print(gene_plot_2)
}
plot_gene_set <- function(seurat_obj, gene_set, plot_name,
one_dataset = TRUE, data_set = NULL,
make_plot = TRUE, ...){
print(head(gene_set))
gene_set <- gene_set[gene_set %in%
rownames(seurat_obj@data)]
mean_exp <- colMeans(as.matrix(seurat_obj@data[gene_set, ]), na.rm = TRUE)
if (all(names(x = mean_exp) == rownames(x = seurat_obj@meta.data))) {
print("Cell names order match in 'mean_exp' and 'object@meta.data':
adding gene set mean expression vaules in 'object@meta.data$gene.set.score'")
seurat_obj@meta.data[[plot_name]] <- mean_exp
}
if (make_plot){
if (one_dataset){
print(tSNE_PCA(seurat_obj, plot_name, ...))
} else {
print(full_umap(seurat_obj, data_set, plot_name, ...))
}
}
return(seurat_obj)
}
genes_per_group <- function(seurat_obj, gene_set, plot_name, group_by,
one_dataset = TRUE, data_set = NULL,
make_plot = FALSE, plot_group = NULL) {
seurat_obj <- plot_gene_set(seurat_obj = seurat_obj, gene_set = gene_set,
plot_name = plot_name, one_dataset = one_dataset,
data_set = data_set, make_plot = make_plot)
mean_all <- aggregate(seurat_obj@meta.data[[plot_name]],
list(seurat_obj@meta.data[[group_by]]),
mean)
print(mean_all)
names(mean_all) <- c("group", "average_expresion")
if (!is.null(plot_group)) {
mean_all <- mean_all[mean_all$group %in% plot_group, ]
}
print(mean_all)
p <- ggplot2::ggplot(mean_all, ggplot2::aes(x = group, y = average_expresion,
group = 1)) +
ggplot2::geom_line() +
#ggplot2::ylim(0, 1) +
#ggplot2::theme_classic() +
ggplot2::ggtitle(plot_name)
print(p)
}
multiple_umap <- function(mtec, sample_list, col_by = "stage") {
umap_list <- c()
if (col_by == "stage") {
new_umap <- lapply(sample_list, function(x) full_stage_umap(mtec, x))
} else if (col_by %in% rownames(mtec@data) |
col_by %in% colnames(mtec@meta.data)) {
new_umap <- lapply(sample_list, function(x) full_gene_umap(mtec, x, col_by))
}
for (i in stage_list) {
if (col_by == "stage") {
new_umap <- full_stage_umap(mtec, i)
} else if (col_by %in% rownames(mtec@data) |
col_by %in% colnames(mtec@meta.data)) {
new_umap <- full_gene_umap(mtec, i, col_by)
}
umap_list[[i]] <- new_umap
}
nplots <- length(sample_list)
if (nplots > 6) {
print("Warning in multiple_umap:")
print(paste0("Works best if number of plots is less than 6. You have ",
nplots, " total plots"))
}
rows <- ceiling(nplots / 2)
cols <- 2
gridExtra::grid.arrange(grobs = umap_list, nrow = rows, ncol = cols)
}
full_umap <- function(mtec, data_set, col_by, plot_type = "umap",
dims_use = NULL, meta_data_col = "exp", ...) {
# Determine where in Seurat object to find variable to color by
print(col_by)
if (col_by %in% rownames(mtec@data)){
col_by_data <- as.data.frame(mtec@data[col_by, ])
}else if (col_by %in% colnames(mtec@meta.data)){
col_by_data <- as.data.frame(mtec@meta.data[, col_by, drop = FALSE])
}else if (col_by == "cluster" | col_by == "Cluster"){
col_by_data <- as.data.frame(mtec@ident)
}else {
stop("col_by must be a gene, metric from meta data or 'cluster'")
}
# Make the name in the data frame the same regardless of what it was originally
names(col_by_data) <- "colour_metric"
col_by_data$all <- col_by_data$colour_metric
if (is.null(dims_use)){
dims_use <- c(1,2)
}
if (!identical(rownames(mtec@meta.data), rownames(col_by_data))) {
print("must reorder cells")
col_by_data <- col_by_data[match(rownames(mtec@meta.data),
rownames(col_by_data)), , drop = FALSE]
}
col_by_data[[meta_data_col]] <- mtec@meta.data[[meta_data_col]]
if (is.factor(col_by_data$all)){
col_by_data$all <- factor(col_by_data$all,
levels = c("all_samples", levels(col_by_data$all)))
}
col_by_data$all[!(col_by_data[[meta_data_col]] %in% data_set)] <- "all_samples"
if (plot_type %in% names(mtec@dr)){
plot_coord <- mtec@dr[[plot_type]]@cell.embeddings
plot_names <- colnames(plot_coord)
ndims <- length(plot_names)
plot_cols <- lapply(dims_use, function(x){
if (x > ndims) {
stop("dims_use must be equal to or less than number of dimensions")
} else {
plot_col <- plot_names[x]
return(plot_col)
}
})
plot_cols <- unlist(plot_cols)
plot_coord <- plot_coord[colnames(plot_coord) %in% plot_cols, ]
axis_names <- colnames(plot_coord)
colnames(plot_coord) <- c("dim1", "dim2")
plot_df <- merge(plot_coord, col_by_data, by = "row.names")
} else {
stop("plot type must be a dimensional reduction in dr slot")
}
# Plot as discrete
if (!is.numeric(col_by_data$colour_metric)){
return_plot <- full_discrete_plots(data_set, plot_df, axis_names = axis_names,
col_by = col_by, ...)
# Plot as continuous
}else{
return_plot <- full_continuous_plots(data_set, plot_df, col_by = col_by, ...)
}
return(return_plot)
}
full_discrete_plots <- function(data_set, plot_df, col_by, axis_names = c("dim1", "dim2"),
color = NULL, save_plot = NULL, show_legend = TRUE) {
if (!(is.null(save_plot))){
extension <- substr(save_plot, nchar(save_plot)-2, nchar(save_plot))
if (extension == "pdf"){
pdf(save_plot)
} else if (extension == "png") {
png(save_plot)
} else {
print("save plot must be .png or .pdf")
}
}
plot1 <- plot_df[plot_df$all == "all_samples", ]
plot2 <- plot_df[plot_df$all != "all_samples", ]
base_plot <- ggplot2::ggplot(data = plot2, ggplot2::aes_(~dim1,
~dim2))
base_plot <- base_plot + ggplot2::geom_point(data = plot1,
ggplot2::aes_(~dim1, ~dim2),
color = "#DCDCDC",
size = 1.5,
show.legend = FALSE)
base_plot <- base_plot + ggplot2::geom_point(data = plot2,
ggplot2::aes_(~dim1, ~dim2,
color = ~all),
size = 1.5,
show.legend = show_legend)
base_plot <- base_plot + #ggplot2::theme_classic() +
ggplot2::ggtitle(paste(data_set, collapse = "_")) +
ggplot2::xlab(axis_names[1]) +
ggplot2::ylab(axis_names[2])
if (is.null(color)) {
nColors <- length(levels(factor(plot2$all)))
base_plot <- base_plot + ggplot2::scale_color_manual(
values = grDevices::colorRampPalette(
RColorBrewer::brewer.pal(9, "Set1"))(nColors), name = col_by)
} else {
base_plot <- base_plot +
ggplot2::scale_color_manual(values = color, name = col_by)
}
if (!(is.null(save_plot))){
print(base_plot)
dev.off()
}
return(base_plot)
}
full_continuous_plots <- function(data_set, plot_df, col_by, color = NULL,
limits = NULL, axis_names = c("dim1", "dim2"),
save_plot = NULL, show_legend = TRUE) {
if (!(is.null(save_plot))){
extension <- substr(save_plot, nchar(save_plot)-2, nchar(save_plot))
if (extension == "pdf"){
pdf(save_plot)
} else if (extension == "png") {
png(save_plot)
} else {
print("save plot must be .png or .pdf")
}
}
plot_name_comb <- paste(data_set, collapse = "_")
if (is.null(color)) {
low <- "#00AFBB"
high <- "#FC4E07"
}
plot1 <- plot_df[plot_df$all == "all_samples", ]
plot2 <- plot_df[plot_df$all != "all_samples", ]
base_plot <- ggplot2::ggplot(data = plot2, ggplot2::aes_(~dim1, ~dim2))
base_plot <- base_plot + ggplot2::geom_point(data = plot1,
ggplot2::aes_(~dim1, ~dim2),
color = "#DCDCDC",
size = 1.5,
show.legend = FALSE)
base_plot <- base_plot + ggplot2::geom_point(data = plot2,
ggplot2::aes_(~dim1, ~dim2,
color = ~colour_metric),
size = 1.5,
show.legend = show_legend)
base_plot <- base_plot + #ggplot2::theme_classic() +
ggplot2::ggtitle(paste0(plot_name_comb, " ", col_by)) +
ggplot2::xlab(axis_names[1]) +
ggplot2::ylab(axis_names[2])
if(is.null(limits)){
base_plot <- base_plot + ggplot2::scale_color_gradient(low = low, high = high,
name = col_by)
} else {
base_plot <- base_plot + ggplot2::scale_color_gradient(low = low, high = high,
name = col_by, limits = limits)
}
if (!(is.null(save_plot))){
print(base_plot)
dev.off()
}
return(base_plot)
}
highlight_one_group <- function(seurat_object, meta_data_col, group, color_df = NULL,
...){
seurat_object@meta.data$highlight_group <- "other_cells"
seurat_object@meta.data$highlight_group[
seurat_object@meta.data[[meta_data_col]] == group] <- group
if (!(is.null(color_df))){
color_df <- color_df[group]
color_df <- c(color_df, other_cells = "#DCDCDC")
} else {
color_df <- c(group = "#FF0000", other_cells = "#DCDCDC")
}
tSNE_PCA(seurat_object, "highlight_group", color = color_df, ...)
}
plot_avg_exp_genes <- function(seurat_object, gene_list, save_plot = NULL, ...){
if (!(is.null(save_plot))){
extension <- substr(save_plot, nchar(save_plot)-2, nchar(save_plot))
if (extension == "pdf"){
pdf(save_plot)
} else if (extension == "png") {
png(save_plot)
} else {
print("save plot must be .png or .pdf")
}
}
avg_expression <- get_avg_exp(seurat_object, ...)
avg_expression <- avg_expression[rownames(avg_expression) %in% average_gene_list, ]
avg_expression$gene <- rownames(avg_expression)
avg_expression_melt <- reshape2::melt(avg_expression)
base_plot <- ggplot2::ggplot(avg_expression_melt, ggplot2::aes(x = variable, y = value,
group = gene)) +
ggplot2::geom_line(ggplot2::aes(linetype = gene))
#ggplot2::theme_classic()
if (!(is.null(save_plot))){
print(base_plot)
dev.off()
}
}
percent_cycling_cells <- function(seurat_object, data_set, meta_data_col){
cells_use <- rownames(seurat_object@meta.data)[
seurat_object@meta.data[[meta_data_col]] == data_set]
new_seurat <- Seurat::SubsetData(seurat_object, cells.use = cells_use)
cycling_cells <- table(new_seurat@meta.data$cycle_phase)
if (!("S" %in% cycling_cells)) {
cycling_cells["S"] = 0
}
cycling_percent <- (cycling_cells["G2M"] +
cycling_cells["S"])/nrow(new_seurat@meta.data)
names(cycling_percent) <- data_set
return(cycling_percent)
}
# For this function, subset_by is best as batches
get_umi <- function(seurat_obj, subset_seurat = FALSE, subset_by = "exp",
subset_val = "isoControlBeg"){
if (subset_seurat){
if (identical(names(seurat_obj@ident), rownames(seurat_obj@meta.data))){
seurat_obj <- Seurat::SetAllIdent(seurat_obj, id = subset_by)
seurat_obj <- Seurat::SubsetData(seurat_obj, ident.use = subset_val,
subset.raw = TRUE)
} else {
stop("ident and meta.data slots not in the same order")
}
}
cell_matrix <- as.matrix(seurat_obj@raw.data)
umi <- median(colSums(cell_matrix))
return(umi)
}
percents_and_counts <- function(seurat_obj, downsample_UMI = FALSE,
one_batch = FALSE, batch = "exp", batch_name = "isoControlBeg"){
if (one_batch){
if (identical(names(seurat_obj@ident), rownames(seurat_obj@meta.data))){
seurat_obj <- Seurat::SetAllIdent(seurat_obj, id = batch)
seurat_obj <- Seurat::SubsetData(seurat_obj, ident.use = batch_name,
subset.raw = TRUE)
} else {
stop("ident and meta.data slots not in the same order")
}
}
cell_matrix <- as.matrix(seurat_obj@raw.data)
if (downsample_UMI){
data_umi <- median(colSums(cell_matrix))
factor <- lowest_UMI/data_umi
set.seed(0)
cell_matrix <- DropletUtils::downsampleMatrix(cell_matrix, prop = factor)
}
# if (one_population) {
# if (identical(names(seurat_obj@ident), rownames(seurat_obj@meta.data))){
# seurat_obj <- Seurat::SetAllIdent(seurat_obj, id = population)
# seurat_obj <- Seurat::SubsetData(seurat_obj, ident.use = population_name)
# } else {
# stop("ident and meta.data slots not in the same order")
# }
# cell_matrix <- cell_matrix[ , colnames(cell_matrix) %in% colnames(seurat_obj@data)]
# }
gene_count_list <- lapply(names(gene_lists), function(x)
gene_count_function(cell_matrix, gene_lists[[x]], x))
gene_count_df <- do.call(cbind, gene_count_list)
gene_count_df$exp <- batch_name
gene_percent_list <- sapply(names(gene_lists), function(x)
percent_list(cell_matrix, gene_lists[[x]], x))
return_list <- list(list(counts = gene_count_df, percents = gene_percent_list))
names(return_list) <- batch_name
return(return_list)
}
# percents_and_counts <- function(seurat_obj, downsample_UMI = FALSE,
# one_batch = FALSE, batch = "exp", batch_name = "isoControlBeg",
# one_population = TRUE, population = "stage", population_name = "Aire_positive"){
# if (one_batch){
# if (identical(names(seurat_obj@ident), rownames(seurat_obj@meta.data))){
# seurat_obj <- Seurat::SetAllIdent(seurat_obj, id = batch)
# seurat_obj <- Seurat::SubsetData(seurat_obj, ident.use = batch_name,
# subset.raw = TRUE)
# } else {
# stop("ident and meta.data slots not in the same order")
# }
# }
# cell_matrix <- as.matrix(seurat_obj@raw.data)
# if (downsample_UMI){
# data_umi <- median(colSums(cell_matrix))
# factor <- lowest_UMI/data_umi
# set.seed(0)
# cell_matrix <- DropletUtils::downsampleMatrix(cell_matrix, prop = factor)
# }
# if (one_population) {
# if (identical(names(seurat_obj@ident), rownames(seurat_obj@meta.data))){
# seurat_obj <- Seurat::SetAllIdent(seurat_obj, id = population)
# seurat_obj <- Seurat::SubsetData(seurat_obj, ident.use = population_name)
# } else {
# stop("ident and meta.data slots not in the same order")
# }
# cell_matrix <- cell_matrix[ , colnames(cell_matrix) %in% colnames(seurat_obj@data)]
# }
# gene_count_list <- lapply(names(gene_lists), function(x)
# gene_count_function(cell_matrix, gene_lists[[x]], x))
# gene_count_df <- do.call(cbind, gene_count_list)
# gene_count_df$exp <- batch_name
# gene_percent_list <- sapply(names(gene_lists), function(x)
# percent_list(cell_matrix, gene_lists[[x]], x))
# return_list <- list(list(counts = gene_count_df, percents = gene_percent_list))
# names(return_list) <- batch_name
# return(return_list)
# }
gene_count_function <- function(cell_matrix, gene_list, list_name){
gene_matrix <- cell_matrix[rownames(cell_matrix) %in% gene_list, ]
gene_count <- apply(gene_matrix, 2, function(x) sum(x > 0))
gene_count <- data.frame(gene_count)
names(gene_count) <- list_name
return(gene_count)
}
umi_count_function <- function(cell_matrix, gene_list, list_name){
gene_matrix <- cell_matrix[rownames(cell_matrix) %in% gene_list, ]
umi_count <- apply(gene_matrix, 2, function(x) sum(x))
umi_count <- data.frame(umi_count)
names(umi_count) <- list_name
return(umi_count)
}
percent_list <- function(cell_matrix, gene_list, gene_list_name){
# Subset cell matrix to only be genes of interest
gene_matrix <- cell_matrix[rownames(cell_matrix) %in% gene_list, ]
# Counts number of cells expressing each gene.
cell_count <- apply(gene_matrix, 1, function(x) sum(x > 0))
# Counts number of genes expressed by at least 1 cell
expr_genes <- cell_count[cell_count > 0]
# Number of expressed genes
n_expr_genes <- length(expr_genes)
# percent of expressed genes
percent <- n_expr_genes/length(gene_list)
#names(percent) <- gene_list_name
# Returns either the number or the updated list
return(percent)
}
# This is a rather silly workaround. I'm sure there is a better way.
get_perc_count <- function(percent_counts_list, list_slot, percent = FALSE,
count = FALSE){
percent_counts_one <- percent_counts_list[[list_slot]]
if (percent & count) {
stop("can only return percent or count")
} else if (!percent & !count){
stop("must bet either percent or count")
} else if (percent) {
return_val <- percent_counts_one$percents
} else if (count) {
return_val <- percent_counts_one$counts
}
return(return_val)
}
plot_sling_pseudotime <- function(seurat_object, sling_object, y_val, col_by,
pseudotime_curve, color = NULL,
save_plot = NULL, range = NULL) {
print(save_plot)
pseudotime <- data.frame(slingshot::slingPseudotime(sling_object))
pseudotime_df <- data.frame(pseudotime = pseudotime[[pseudotime_curve]],
row.names = rownames(pseudotime))
pseudotime_df <- pseudotime_df[!is.na(pseudotime_df$pseudotime), , drop = FALSE]
plot_data <- make_plot_df(seurat_object = seurat_object, y_val = y_val,
x_val = col_by, col_by = col_by)
plot_data <- merge(pseudotime_df, plot_data, by = "row.names", all = FALSE)
base_plot <- ggplot2::ggplot(plot_data, ggplot2::aes(x = pseudotime,
y = y_value,
color = col_by)) +
ggplot2::geom_point() +
ggplot2::geom_smooth(se = FALSE, color = "black")
if (is.null(color)){
nColors <- length(levels(factor(plot_df$col_by)))
base_plot <- base_plot +
ggplot2::scale_color_manual(values =
(colorRampPalette(RColorBrewer::.pal(9, "Set1")))(nColors),
name = col_by)
} else {
base_plot <- base_plot +
ggplot2::scale_color_manual(values = color)
}
if (!(is.null(range))) {
base_plot <- base_plot + ggplot2::xlim(range)
}
base_plot <- base_plot + ggplot2::ylab(y_val)
if (!(is.null(save_plot))){
ggplot2::ggsave(save_plot, plot = base_plot, height = 3, width = 7)
}
return(base_plot)
}
##################################################################################
# Get files from Snakemake
aireTrace <- snakemake@input[[1]]
controls <- snakemake@input[[2]]
allSamples <- snakemake@input[[3]]
controls_slingshot <- snakemake@input[[4]]
allSamples_slingshot <- snakemake@input[[5]]
early_aire_mtec <- snakemake@input[[6]]
sav_dir <- snakemake@output[[1]]
data_directory <- snakemake@params[[1]]
# Colors for plotting
stage_color_df <- data.frame("Cortico_medullary" = "#CC6600", "Ccl21a_high" = "#009933",
"Early_Aire" = "#0066CC", "Aire_positive" = "#660099",
"Late_Aire" = "#FF0000", "Tuft" = "#990000",
"unknown" = "#FDFBFB")
stage_color <- t(stage_color_df)[ , 1]
# Load in data
mtec <- get(load(aireTrace))
mtec_wt <- get(load(controls))
mtecCombined <- get(load(allSamples))
wt_slingshot <- get(load(controls_slingshot))
all_slingshot <- get(load(allSamples_slingshot))
progenitor_mtec <- get(load(early_aire_mtec))
TFs_all <- c("H2afz", "Top2a", "Hmgb1", "Hmgn1", "H2afx", as.character(TFs))
# Set theme
ggplot2::theme_set(ggplot2::theme_classic(base_size = 18))
############
# Figure 1 #
############
print("Figure 1")
# Make plots from aire trace
# Figure 1b
tSNE_PCA(mtec, "stage", color = stage_color, show_legend = FALSE,
save_plot = paste0(save_dir, "figure_1b.pdf"))
# Figure 1c
# Violin plots of marker genes, pick a few more here
trio_plots(mtec, geneset = c("Ackr4", "Ccl21a", "Aire"),
cell_cycle = FALSE, plot_violin = TRUE, jitter_and_violin = FALSE,
plot_jitter = FALSE, color = stage_color, sep_by = "cluster",
save_plot = paste0(save_dir, "figure_1cI.pdf"))
trio_plots(mtec, geneset = c("Krt10", "Trpm5", "GFP"),
cell_cycle = FALSE, plot_violin = TRUE, jitter_and_violin = FALSE,
plot_jitter = FALSE, color = stage_color, sep_by = "cluster",
save_plot = paste0(save_dir, "figure_1cII.pdf"))
# Figure 1d
# Pseudotime of Aire and GFP
############
# Figure 2 #
############
print("Figure 2")
load(paste0(data_directory, "gene_lists.rda")
# Figure 2a
# Dot plot of all markers Change this to be most interesting markers
markers_to_plot_full <- c("Krt5", "Ccl21a", "Ascl1", "Hes1", "Hmgb2", "Hmgn2",
"Hmgb1", "H2afx", "Stmn1", "Tubb5", "Mki67", "Ptma", "Aire", "Utf1", "Fezf2",
"Krt10", "Nupr1", "Cebpb", "Trpm5", "Pou2f3", "Dclk1")
pdf(paste0(save_dir, "figure_2aI.pdf"))
dot_plot <- Seurat::DotPlot(mtec, genes.plot = rev(markers_to_plot_full),
cols.use = c("blue", "red"), x.lab.rot = T,
plot.legend = F, dot.scale = 8, do.return = T)
dev.off()
pdf(paste0(save_dir, "figure_2aII.pdf"))
dot_plot <- Seurat::DotPlot(mtec, genes.plot = rev(markers_to_plot_full),
cols.use = c("blue", "red"), x.lab.rot = T,
plot.legend = T, dot.scale = 8, do.return = T)
dev.off()
# Figure 2b
# Jitter plots of chromatin modifiers overlayed with cell cycle state
trio_plots(mtec, geneset = c("Hmgb2", "Tubb5", "Stmn1"),
cell_cycle = TRUE, jitter_and_violin = FALSE, plot_jitter = TRUE,
color = stage_color, sep_by = "cluster",
save_plot = paste0(save_dir, "figure_2bI.pdf"), group_color = FALSE)
trio_plots(mtec, geneset = c("Aire", "Ccl21a", "Fezf2"),
cell_cycle = TRUE, jitter_and_violin = FALSE, plot_jitter = TRUE,
color = stage_color, sep_by = "cluster",
save_plot = paste0(save_dir, "figure_2bII.pdf"), group_color = FALSE)
# Figure 2c
# Flow of Mki76/Aire
# Figure 2d
# SC velocity
# Figure 2e
# UMAP of cell stage
# Change this to be slingshot data
##########
# This is my crummy workaround until I get my new package running. If plotDimRed
# is returning a ggplot object, I'm golden
umap_coords <- data.frame(mtec_wt@dr$umap@cell.embeddings)
umap_coords$stage <- mtec_wt@meta.data$stage
base_plot <- ggplot2::ggplot(umap_coords,
ggplot2::aes(x = UMAP1, y = UMAP2, color = stage)) +
ggplot2::geom_point() +
ggplot2::scale_color_manual(values = stage_color) +
ggplot2::theme(legend.position = "none")
##############
# Keep this
c <- slingCurves(wt_slingshot)[[1]]
curve1_coord <- data.frame(c$s[c$ord, dims])
curve1_coord$stage <- "line"
# This line cuts off the long tail... Probably a better option is
# to just remove unknown before running slingshot.
curve1_coord <- curve1_coord[curve1_coord$UMAP2 > -5, ]
base_plot <- base_plot + ggplot2::geom_path(data = curve1_coord,
ggplot2::aes(UMAP1, UMAP2), color = "black", size = 1)
ggplot2::ggsave(paste0(save_dir, "figure_2e.pdf"), plot = base_plot)
load("/home/kwells4/mTEC_dev/data/gene_lists.rda")
load(paste0(data_directory, "gene_lists.rda"))
gene_names <- c("Aire", "Fezf2")
plot_sets <- c("tra_fantom", "aire_genes", "fezf2_genes")
# Figure 2f
# Pseudotime of genes
plot_names <- list(protein_coding = paste0(save_dir, "figure_2fI.pdf"),
tra_fantom = paste0(save_dir, "figure_2fII.pdf"),
aire_genes = paste0(save_dir, "figure_2fIV.pdf"),
Aire = paste0(save_dir, "figure_2fIII.pdf"),
fezf2_genes = paste0(save_dir, "figure_2fVI.pdf"),
Fezf2 = paste0(save_dir, "figure_2fV.pdf"))
for (gene_set in names(gene_lists)) {
mtec_wt <- plot_gene_set(mtec_wt,
gene_lists[[gene_set]], gene_set, make_plot = FALSE)
}
# Remove unknown cells
mtec_wt_plot <- mtec_wt
mtec_wt_plot@assay$DE <- NULL
mtec_wt_plot <- Seurat::SubsetData(mtec_wt_plot, ident.remove = "unknown")
# Plot each of the genes and gene sets in pseudotime, end at 16 because the
# is where the "unknown" cells are
plot_list <- lapply(names(plot_names), function(x) plot_sling_pseudotime(
mtec_wt_plot, wt_slingshot, x, "stage", "curve1", color = stage_color,
range = c(0, 16), save_plot = plot_names[[x]]))
# ############
# # Figure 3 #
# ############
print("Figure 3")
new_exp_names <- c(aireTrace = "Trace exp",
isoControlBeg = "Ctl wk 2",
isoControlEnd = "Ctl wk 10",
timepoint1 = "wk 2",
timepoint2 = "wk 4",
timepoint3 = "wk 6",
timepoint5 = "wk 10")
mtecCombined@meta.data$pub_exp <- new_exp_names[mtecCombined@meta.data$exp]
mtecCombined@meta.data$pub_exp <- factor(mtecCombined@meta.data$pub_exp,
levels = unname(new_exp_names))
data_sets <- unique(mtecCombined@meta.data$pub_exp)
data_sets <- data_sets[data_sets != new_exp_names['aireTrace']]
# stage_color_df_2 <- data.frame("Cortico_medullary" = "#CC6600",
# "Ccl21a_high" = "#009933",
# "Early_Aire" = "#0066CC",
# "Aire_positive" = "#660099",
# "Late_Aire" = "#FF0000",
# "Tuft" = "#990000",
# "unknown" = "#D3D3D3")
# stage_color2 <- t(stage_color_df_2)[ , 1]
stage_color_df_3 <- data.frame("CorticoMedullary" = "#CC6600",
"Ccl21aHigh" = "#009933",
"EarlyAire" = "#0066CC",
"AirePositive" = "#660099",
"LateAire" = "#FF0000",
"Tuft" = "#990000",
"unknown" = "#D3D3D3")
stage_color3 <- t(stage_color_df_3)[ , 1]
# Figure 2B
# UMAP of all cells Either put the key on both or remove the key from both
tSNE_PCA(mtecCombined, "stage", save_plot = paste0(save_dir, "figure_3bI.pdf"),
color = stage_color, show_legend = FALSE)
# UMAP highlighting aire_trace cells colored by aire_trace labels
full_umap(mtecCombined, "aireTrace", col_by = "at_stage", color = stage_color,
save_plot = paste0(save_dir, "figure_3bII.pdf"), show_legend = FALSE)
# Figure 3C
# Barplots of recovery
mtecCombSub <- mtecCombined
mtecCombSub@assay$ablation_DE <- NULL
stage_list_all <- lapply(data_sets, function(x) populations_dfs_new(mtecCombSub,
x, subsample = TRUE, subsample_by = "pub_exp"))
stage_df_all <- do.call("rbind", stage_list_all)
stage_df_all$sample <- factor(stage_df_all$sample, levels = unname(new_exp_names))
population_plots(stage_df_all, color = stage_color,
save_plot = paste0(save_dir, "figure_3c.pdf"))
# Figure 3D
# Umap with slingshot overlay
##########
# This is my crummy workaround until I get my new package running. If plotDimRed
# is returning a ggplot object, I'm golden
umap_coords <- data.frame(mtecCombined@dr$umap@cell.embeddings)
umap_coords$stage <- mtecCombined@meta.data$stage
base_plot <- ggplot2::ggplot(umap_coords,
ggplot2::aes(x = UMAP1, y = UMAP2, color = stage)) +
ggplot2::geom_point() +
ggplot2::scale_color_manual(values = stage_color) +
ggplot2::theme(legend.position = "none")
##############
# Keep this
c <- slingCurves(allSamples_slingshot)[[3]]
curve1_coord <- data.frame(c$s[c$ord, dims])
curve1_coord$stage <- "line"
# This line cuts off the long tail... Probably a better option is
# to just remove unknown before running slingshot.
curve1_coord <- curve1_coord[curve1_coord$UMAP2 > -5, ]
base_plot <- base_plot + ggplot2::geom_path(data = curve1_coord,
ggplot2::aes(UMAP1, UMAP2), color = "black", size = 1)
ggplot2::ggsave(paste0(save_dir, "figure_3d.pdf"), plot = base_plot)
# ###############################################################################
# ############
# # Figure 4 #
# ############
print("Figure 4")
timecourse_color <- RColorBrewer::brewer.pal(8, "Set1")
timecourse_color <- c(timecourse_color[2:5], timecourse_color[7:8])
lowest_UMI_exp <- "timepoint2"
downsample_UMI <- TRUE
if (downsample_UMI) {
lowest_UMI <- get_umi(mtecCombSub, subset_seurat = TRUE, subset_by = "stage_exp",
subset_val = paste0("AirePositive_", lowest_UMI_exp))
}
# Figure 4a
# Start with full tSNE only coloring the Aire positive cluster
highlight_one_group(mtecCombined, meta_data_col = "stage", group = "Aire_positive",
color_df = stage_color, show_legend = FALSE,
save_plot = paste0(save_dir, "figure_4a.pdf"))
# Figure 4b
average_gene_list <- c("Aire", "Fezf2", "Gapdh", "Emc7")
mtec_aire_positive <- Seurat::SubsetData(mtecCombSub, ident.use = "AirePositive")
cells_use <- rownames(mtec_aire_positive@meta.data)[mtec_aire_positive@meta.data$exp !=
"aireTrace"]
no_at_mtec_aire <- Seurat::SubsetData(mtec_aire_positive, cells.use = cells_use)
# Make the lines thicker here
plot_avg_exp_genes(no_at_mtec_aire, average_gene_list,
save_plot = paste0(save_dir, "figure_4b.pdf"),
avg_expr_id = "pub_exp")
mtecCombined_all <- no_at_mtec_aire
# This is an okay place for a for loop (recursion)
# http://adv-r.had.co.nz/Functionals.html
for (gene_set in names(gene_lists)) {
mtecCombined_all <- plot_gene_set(mtecCombined_all,
gene_lists[[gene_set]], gene_set, make_plot = FALSE)
}
# Figure 4c
gene_sets <- c("all_other_genes", "tra_fantom", "aire_genes")
trio_plots(mtecCombined_all, geneset = gene_sets,
cell_cycle = FALSE, plot_violin = TRUE, jitter_and_violin = FALSE,
plot_jitter = FALSE, color = timecourse_color, sep_by = "pub_exp",
save_plot = paste0(save_dir, "figure_4c.pdf"))
# This could probably be made into it's own thing
# percents_counts_all <- sapply(data_sets, function(x) percents_and_counts(mtecCombSub,
# batch_name = x, downsample_UMI = TRUE, one_batch = TRUE, batch = "pub_exp",
# one_population = TRUE, population = "stage", "population_name" = "Aire_positive"))
old_data_sets <- unique(mtecCombined@meta.data$exp)
old_data_sets <- old_data_sets[old_data_sets != "aireTrace"]
# I may have fixed it, run tomorrow
percents_counts_all <- sapply(old_data_sets, function(x) percents_and_counts(mtecCombSub,
batch_name = paste0("AirePositive_", x), downsample_UMI = TRUE, one_batch = TRUE,
batch = "stage_exp"))
# percents_counts_all <- sapply(old_data_sets, function(x) percents_and_counts(mtecCombSub,
# batch_name = x, downsample_UMI = TRUE, one_batch = TRUE,
# batch = "exp", one_population = TRUE, population_name = "Aire_positive",
# population = "stage"))
percents <- sapply(names(percents_counts_all), function(x)
get_perc_count(percents_counts_all, x, percent = TRUE), USE.NAMES = TRUE)
counts <- lapply(names(percents_counts_all), function(x)
get_perc_count(percents_counts_all, x, count = TRUE))
counts_df <- do.call(rbind, counts)
counts_df$exp <- sub("AirePositive_", "", counts_df$exp)
counts_df$pub_exp <- new_exp_names[counts_df$exp]
counts_df$pub_exp <- factor(counts_df$pub_exp,
levels = unname(new_exp_names))
counts_df_m <- reshape2::melt(counts_df, variable.name = "gene_list",
value.name = "gene_count")
to_plot <- c("tra_fantom", "all_other_genes", "aire_genes", "fezf2_genes")
short_list <- c("tra_fantom", "aire_genes", "fezf2_genes")
counts_df_plot <- counts_df_m[counts_df_m$gene_list %in% to_plot, ]
counts_df_short <- counts_df_m[counts_df_m$gene_list %in% short_list, ]
percents_m <- reshape2::melt(percents)
names(percents_m) <- c("gene_list", "exp", "percent_of_genes")
percents_m$exp <- sub("AirePositive_", "", percents_m$exp)
percents_m$pub_exp <- new_exp_names[percents_m$exp]
percents_m$pub_exp <- factor(percents_m$pub_exp,
levels = unname(new_exp_names))
percents_plot <- percents_m[percents_m$gene_list %in% to_plot, ]
# Figure 4d
# Percent of gene lists
pdf(paste0(save_dir, "figure_4d.pdf"))
ggplot2::ggplot(percents_plot, ggplot2::aes(x = pub_exp, y = percent_of_genes,
group = gene_list, color = gene_list)) +
ggplot2::geom_line() +
ggplot2::ylim(0,1)
#ggplot2::theme_classic()
dev.off()
# Figure 4e
# Number of genes per cell
full_plot <- ggplot2::ggplot(counts_df_plot, ggplot2::aes(x = gene_list,
y = gene_count,
fill = pub_exp)) +
ggplot2::geom_boxplot() +
ggplot2::scale_fill_manual(values = timecourse_color) +
#ggplot2::theme_classic() +
ggpubr::stat_compare_means(method = "anova", size = 2, label.y = 6150)
zoom_plot <- ggplot2::ggplot(counts_df_short, ggplot2::aes(x = gene_list,
y = gene_count,
fill = pub_exp)) +
ggplot2::geom_boxplot(show.legend = FALSE) +
ggplot2::scale_fill_manual(values = timecourse_color) +
#ggplot2::theme_classic() +
ggplot2::theme(panel.background = ggplot2::element_blank(),
axis.title.x = ggplot2::element_blank(),
axis.title.y = ggplot2::element_blank(),
panel.border = ggplot2::element_rect(color = "black",
fill = NA,
size = 1)) +
ggpubr::stat_compare_means(method = "anova", size = 2, label.y = 300)
zoom_plot_g <- ggplot2::ggplotGrob(zoom_plot)
all_plots <- full_plot + ggplot2::annotation_custom(grob = zoom_plot_g,
xmin = 1.5,
xmax = Inf,
ymin = 1000,
ymax = Inf) +
ggplot2::annotation_custom(grob = grid::rectGrob(gp = grid::gpar(fill = NA)),
xmin = 1.5,
xmax = Inf,
ymin = -Inf,
ymax = 500)
pdf(paste0(save_dir, "figure_4e.pdf"))
all_plots
dev.off()
# ############
# # Figure 5 #
# ############
print("Figure 5")
reanalysis_colors <- c("#603E95", "#009DA1", "#FAC22B", "#D7255D")
progenitor_mtec@meta.data$pub_exp <- new_exp_names[progenitor_mtec@meta.data$exp]
progenitor_mtec@meta.data$pub_exp <- factor(progenitor_mtec@meta.data$pub_exp,
levels = unname(new_exp_names))
cells_use <- rownames(progenitor_mtec@meta.data)[progenitor_mtec@meta.data$exp !=
"aireTrace"]
no_at_mtec <- Seurat::SubsetData(progenitor_mtec, cells.use = cells_use)
# Figure 5a
# Highlight just the early aire cells
highlight_one_group(mtecCombined, meta_data_col = "stage", group = "Early_Aire",
color_df = stage_color, show_legend = FALSE,
save_plot = paste0(save_dir, "figure_5a.pdf"))
# Figure 5b
# % of cells in G2
cell_cycle <- mtecCombined@meta.data[rownames(mtecCombined@meta.data) %in%
rownames(no_at_mtec@meta.data), ]
if (!identical(rownames(cell_cycle), rownames(no_at_mtec@meta.data))) {
print("must reorder cells")
cell_cycle <- cell_cycle[match(rownames(no_at_mtec@meta.data),
rownames(cell_cycle)), , drop = FALSE]
}
no_at_mtec@meta.data$cycle_phase <- cell_cycle$cycle_phase
percent_cycling <- sapply(data_sets, USE.NAMES = TRUE,
function(x) percent_cycling_cells(no_at_mtec,
data_set = x, meta_data_col = "pub_exp"))
percent_cycling <- data.frame(percent_cycling)
percent_cycling$experiment <- rownames(percent_cycling)
percent_cycling$experiment <- factor(percent_cycling$experiment,
levels = unname(new_exp_names))
pdf(paste0(save_dir, "figure_5b.pdf"))
ggplot2::ggplot(percent_cycling, ggplot2::aes(x = experiment,
y = percent_cycling)) +
ggplot2::geom_bar(ggplot2::aes(fill = experiment),
stat = "identity") +
ggplot2::scale_fill_manual(values = timecourse_color)
dev.off()
# Figure 5c
trio_plots(no_at_mtec, geneset = c("Hmgb2", "Tubb5", "Stmn1"),
cell_cycle = FALSE, plot_violin = TRUE, jitter_and_violin = FALSE,
plot_jitter = FALSE, color = timecourse_color, sep_by = "pub_exp",
save_plot = paste0(save_dir, "figure_5cI.pdf"))
trio_plots(no_at_mtec, geneset = c("Aire", "Ccl21a", "Fezf2"),
cell_cycle = FALSE, plot_violin = TRUE, jitter_and_violin = FALSE,
plot_jitter = FALSE, color = timecourse_color, sep_by = "pub_exp",
save_plot = paste0(save_dir, "figure_5cII.pdf"))
# Figure 5d
# UMAP of reanalysis of early aire cells
tSNE_PCA(no_at_mtec, "cluster", color = reanalysis_colors,
save_plot = paste0(save_dir, "figure_5d.pdf"))
# Figure 5e
trio_plots(no_at_mtec, geneset = c("Hmgb2", "Tubb5", "Stmn2"), cell_cycle = FALSE,
plot_violin = TRUE, jitter_and_violin = FALSE, plot_jitter = FALSE,
sep_by = "pub_exp", color = timecourse_color,
save_plot = paste0(save_dir, "figure_5eI.pdf"))
# Figure 5e
trio_plots(no_at_mtec, geneset = c("Aire", "Ccl21a", "Fezf2"), cell_cycle = FALSE,
plot_violin = TRUE, jitter_and_violin = FALSE, plot_jitter = FALSE,
sep_by = "pub_exp", color = timecourse_color,
save_plot = paste0(save_dir, "figure_5eII.pdf"))
# Figure 5f
clusters <- unique(no_at_mtec@meta.data$res.0.6)
percent_cycling <- sapply(clusters, USE.NAMES = TRUE,
function(x) percent_cycling_cells(no_at_mtec,
data_set = x, meta_data_col = "res.0.6"))
percent_cycling <- data.frame(percent_cycling)
percent_cycling$cluster <- sub("\\d\\.", "", rownames(percent_cycling))
pdf(paste0(save_dir, "figure_5f.pdf"))
ggplot2::ggplot(percent_cycling, ggplot2::aes(x = cluster,
y = percent_cycling)) +
ggplot2::geom_bar(ggplot2::aes(fill = cluster),
stat = "identity") +
ggplot2::scale_fill_manual(values = reanalysis_colors)
dev.off()
# Figure 5g
stage_list_all <- lapply(data_sets, function(x) populations_dfs_new(no_at_mtec,
x, subsample = TRUE, subsample_by = "pub_exp",
meta_data_col = "res.0.6"))
stage_df_all <- do.call("rbind", stage_list_all)
stage_df_all$sample <- factor(stage_df_all$sample, levels = unname(new_exp_names))
population_plots(stage_df_all, color = reanalysis_colors,
save_plot = paste0(save_dir, "figure_5g.pdf"))
############################################################################
########################
# Supplemental Figures #
########################
#########################
# Supplemental Figure 1 #
#########################
# S1a
# Heatmap of all TFs with interesting TFs highlighted
# S1b
# Marker genes on UMAP
#########################
# Supplemental Figure 2 #
#########################
# S2a
# Violin plots of marker genes for WT
# S2b
# Dotplot of WT
# S2c
# Jitter plots of cycling with markers from AT
# S2d
# Correlation of WT with Aire Trace
# S2e
# UMAP of genes
gene_names <- c("Aire", "Fezf2")
plot_sets <- c("tra_fantom", "aire_genes", "fezf2_genes")
plot_names <- list(tra_fantom = paste0(save_dir, "figure_s2eI.pdf"),
aire_genes = paste0(save_dir, "figure_s2eIII.pdf"),
Aire = paste0(save_dir, "figure_s2eII.pdf"),
fezf2_genes = paste0(save_dir, "figure_s2eV.pdf"),
Fezf2 = paste0(save_dir, "figure_s2eIV.pdf"))
lapply(names(plot_sets), function(x) plot_gene_set(mtec_wt, gene_lists[[x]], x,
save_plot = plot_names[[x]]))
lapply(gene_names, function(x) tSNE_PCA(mtec_wt, x, save_plot = plot_names[[x]]))
#########################
# Supplemental Figure 3 #
#########################
# S3a
# Umap of stage recovery
# S3b
# Violin markers for all as in S2
# S3c Correlation of TP5 with WT
#########################
# Supplemental Figure 4 #
#########################
# S4a
# TRA recovery UMAPs
# TRAs in recovery
limit_list <- list(tra_fantom = c(0, 0.100),
aire_genes = c(0, 0.100),
fezf2_genes = c(0, 0.300))
plot_names_fig4 <- list(isoControlBeg = paste0(save_dir, "figure_s4aI.pdf"),
isoControlEnd = paste0(save_dir, "figure_s4aII.pdf"),
timepoint1 = paste0(save_dir, "figure_s4aIII.pdf"),
timepoint2 = paste0(save_dir, "figure_s4aIV.pdf"),
timepoint3 = paste0(save_dir, "figure_s4aV.pdf"),
timepoint5 = paste0(save_dir, "figure_s4aVI.pdf"))
names(plot_names_fig4) <- new_exp_names[names(plot_names_fig4)]
lapply(names(plot_names_fig4), function(x) plot_gene_set(mtecCombined,
gene_set = gene_lists[["tra_fantom"]],
plot_name = "tra_fantom",
one_dataset = FALSE,
data_set = x,
meta_data_col = "pub_exp",
limits = limit_list[["tra_fantom"]],
save_plot = plot_names_fig4[[x]]))
# S4b
# Bootstrap downsample plots
# S4c
# Number of protein coding genes seen in WT not downsampled
#########################
# Supplemental Figure 5 #
#########################
# S5a
# Ngene and nUMI before correction
# S5b
# nGene and nUMI after correction
# S5c
# Dropouts of different house keeping genes before and after
#########################
# Supplemental Figure 6 #
#########################
# Currently don't know
##################################################################################
# stage_to_plot <- c("CorticoMedullary", "Ccl21aHigh", "EarlyAire", "AirePositive",
# "LateAire", "Tuft")
# markers_to_plot <- c("Krt5", "Ccl21a", "Ascl1", "Hes1", "Hmgb2", "Hmgn2",
# "Hmgb1", "H2afx", "Ptma", "Aire", "Utf1",
# "Fezf2", "Krt10", "Trpm5", "Pou2f3", "Dclk1")
# sdp <- Seurat::SplitDotPlotGG(mtecCombined, genes.plot = rev(markers_to_plot),
# cols.use = RColorBrewer::brewer.pal(7, "Set1"),
# x.lab.rot = T, plot.legend = T, dot.scale = 8,
# do.return = T, grouping.var = "exp")
# for (i in stage_to_plot) {
# mtecSubset <- Seurat::SubsetData(mtecCombined, ident.use = i)
# sdp <- Seurat::SplitDotPlotGG(mtecSubset, genes.plot = rev(markers_to_plot),
# cols.use = RColorBrewer::brewer.pal(7, "Set1"),
# x.lab.rot = T, plot.legend = T, dot.scale = 8,
# do.return = T, grouping.var = "exp")
# }
# # TRA in recovery
# my_pal <- RColorBrewer::brewer.pal(8, "Set1")
# my_pal <- c(my_pal[2:5], my_pal[7:8])
# avg_plots <- c("ranked", "expression")
# ranked_df_all <- NULL
# exp_df_all <- NULL
# mtecCombined@meta.data$exp <- factor(mtecCombined@meta.data$exp)
# # Some genes in recovery
# for (i in levels(mtecCombined@meta.data$exp)) {
# print(i)
# if (i != "aireTrace") {
# cells_use <- rownames(mtecCombined@meta.data)[mtecCombined@meta.data$exp == i]
# # Make a seurat object with only the data set cells. This object
# # will maintain the tSNE locations from the combined analysis.
# combSubset <- Seurat::SubsetData(mtecCombined, cells.use = cells_use)
# if ("ranked" %in% avg_plots) {
# ranked_df_one <- gene_exp_df(combSubset, i, df_contents = "ranked")
# ranked_df_all <- rbind(ranked_df_all, ranked_df_one)
# }
# if ("expression" %in% avg_plots) {
# exp_df_one <- gene_exp_df(combSubset, i, df_contents = "expression")
# exp_df_all <- rbind(exp_df_all, exp_df_one)
# }
# new_umap <- full_stage_umap(mtecCombined, i)
# print(new_umap)
# }
# }
# gene_list <- c("Calcb", "Csn2", "Ubd", "H2afx", "Hes1", "Pou2f3", "Aire",
# "Utf1", "Srgn", "Fezf2", "Anxa2", "Cyba", "Cxcl10",
# "Lgals1", "Fgf21", "Plb1", "Skint10", "Rgs5", "Hagh", "Tmsb10",
# "Fabp5", "Spib", "Hmgb2", "Hmgb1", "Hmgn2", "Ascl1", "Cldn7",
# "Grap", "Cystm1", "Nos2", "Nupr1", "Cebpb", "Trpm5", "Dclk1",
# "Cdkn1a", "Selm", "Cyp2a5", "Rptn", "Ahcyl2")
# for (gene in gene_list) {
# if ("ranked" %in% avg_plots) {
# ranked_df_all$stage <- factor(ranked_df_all$stage,
# levels = stage_to_plot)
# plot_gene_exp(ranked_df_all, gene, col = stage_color3)
# }
# if ("expression" %in% avg_plots) {
# exp_df_all$stage <- factor(exp_df_all$stage,
# levels = stage_to_plot)
# plot_gene_exp(exp_df_all, gene, low_lim = 0, high_lim = 4, col = stage_color3)
# }
# }
# # Maybe recovery algorithm here...
# ############
# # Figure 6 #
# ############
# print("Figure 6")
# # Density plot of Ccl21a here
# meta_data <- mtecCombined@meta.data
# mtec_data <- t(as.matrix(mtecCombined@data))
# ccl21a_df <- as.data.frame(mtec_data[ , "Ccl21a"])
# aire_df <- as.data.frame(mtec_data[ , "Aire"])
# hmgb2_df <- as.data.frame(mtec_data[ , "Hmgb2"])
# names(ccl21a_df) <- "Ccl21a"
# names(aire_df) <- "Aire"
# names(hmgb2_df) <- "Hmgb2"
# meta_data <- merge(meta_data, ccl21a_df, by = "row.names")
# rownames(meta_data) <- meta_data$Row.names
# meta_data$Row.names <- NULL
# meta_data <- merge(meta_data, aire_df, by = "row.names")
# rownames(meta_data) <- meta_data$Row.names
# meta_data$Row.names <- NULL
# meta_data <- merge(meta_data, hmgb2_df, by = "row.names")
# rownames(meta_data) <- meta_data$Row.names
# meta_data$Row.names <- NULL
# meta_data <- meta_data[meta_data$exp != "aireTrace", ]
# meta_data$exp <- factor(meta_data$exp)
# # Finish making the bar plot!
# meta_data$Ccl21a_high_expr <- meta_data$Ccl21a > 3 & meta_data$stage == "Early_Aire"
# meta_data$True_aire <- meta_data$stage == "Aire_positive"
# meta_data$True_progenitor <- meta_data$stage == "Early_Aire"
# percents_df <- data.frame(Timepoint = character(),
# Ccl21a_whole = double(),
# Progenitor_whole = double(),
# Aire_whole = double(),
# post_aire_whole = double())
# for (i in levels(meta_data$exp)) {
# new_meta_data <- meta_data[meta_data$exp == i, ]
# df_to_add <- data.frame(Timepoint = i,
# Ccl21a_whole = sum(new_meta_data$Ccl21a_high_expr) /
# nrow(new_meta_data) * 100,
# Progenitor_whole = sum(new_meta_data$True_progenitor) /
# nrow(new_meta_data) * 100,
# Aire_whole = sum(new_meta_data$True_aire) /
# nrow(new_meta_data) * 100,
# Post_aire_whole = sum(new_meta_data$stage ==
# "Late_Aire") /
# nrow(new_meta_data) * 100)
# percents_df <- rbind(percents_df, df_to_add)
# }
# meta_data <- meta_data[meta_data$stage == "Early_Aire", ]
# p_density <- ggplot2::ggplot(meta_data, ggplot2::aes(Ccl21a,
# color = exp)) + ggplot2::geom_density() +
# ggplot2::scale_color_manual(values = my_pal, name = "timepoint") +
# ggplot2::theme_classic() + ggplot2::xlab("Ccl21a")
# print(p_density)
# p_barplot_ccl21a <- ggplot2::ggplot(percents_df,
# ggplot2::aes(Timepoint, Ccl21a_whole,
# fill = Timepoint)) +
# ggplot2::geom_bar(stat = "identity") +
# ggplot2::scale_fill_manual(values = my_pal, name = "timepoint") +
# ggplot2::theme_classic() + ggplot2::ylab("Ccl21a percent of whole")
# print(p_barplot_ccl21a)
# p_barplot_progenitor <- ggplot2::ggplot(percents_df,
# ggplot2::aes(Timepoint, Progenitor_whole,
# fill = Timepoint)) +
# ggplot2::geom_bar(stat = "identity") +
# ggplot2::scale_fill_manual(values = my_pal, name = "timepoint") +
# ggplot2::theme_classic() + ggplot2::ylab("Early Aire percent of whole")
# print(p_barplot_progenitor)
# p_barplot_aire <- ggplot2::ggplot(percents_df,
# ggplot2::aes(Timepoint, Aire_whole,
# fill = Timepoint)) +
# ggplot2::geom_bar(stat = "identity") +
# ggplot2::scale_fill_manual(values = my_pal, name = "timepoint") +
# ggplot2::theme_classic() + ggplot2::ylab("Aire Positive percent of whole")
# print(p_barplot_aire)
# p_barplot_post_aire <- ggplot2::ggplot(percents_df,
# ggplot2::aes(Timepoint, Post_aire_whole,
# fill = Timepoint)) +
# ggplot2::geom_bar(stat = "identity") +
# ggplot2::scale_fill_manual(values = my_pal, name = "timepoint") +
# ggplot2::theme_classic() + ggplot2::ylab("Late Aire percent of whole")
# print(p_barplot_post_aire)
# # Focus on progenitor population
# ###############################################################################
# # Supplement
# #########################
# # Supplemental Figure 1 #
# #########################
# # UMAP of Aire trace
# tSNE_PCA(mtec, "stage", color = stage_color,
# save_plot = paste0(save_dir, "figure_1b.pdf"))
# # Marker genes mapped on UMAP
# tSNE_PCA(mtec, "Ccl21a")
# tSNE_PCA(mtec, "Krt5")
# tSNE_PCA(mtec, "Trpm5")
# tSNE_PCA(mtec, "Krt10")
# tSNE_PCA(mtec, "Aire")
# tSNE_PCA(mtec, "GFP")
# # Put plots of UMI and gene counts here
# # Jitter and violin plots of genes that should be consistent
# trio_plots(mtec_wt, geneset = c("Actb", "Gapdh", "B2m"), cell_cycle = FALSE,
# jitter_and_violin = TRUE, plot_jitter = FALSE, color = stage_color,
# sep_by = "cluster")
# #########################
# # Supplemental Figure 2 #
# #########################
# # Heatmap of DE genes labeled by genes already known in Aire trace cells
# plot_heatmap(mtec, subset_list = TFs_all,
# color_list = c("Cdx1", "Utf1", "Tcf7", "Spib", "Cdk4", "Ptma",
# "H2afx", "Hmgb1"),
# color_list2 = c("Aire", "Irf7", "Cited2", "Spib", "Hes1", "Pax1",
# "Relb", "Lmo4", "Pou2f3"),
# cell_color = stage_color)
# #########################
# # Supplemental Figure 3 #
# #########################
# # Violin plots of wt markers
# trio_plots(mtec_wt, geneset = c("Ackr4", "Ccl21a", "Aire"), cell_cycle = FALSE,
# plot_violin = TRUE, jitter_and_violin = FALSE, plot_jitter = FALSE,
# color = stage_color, sep_by = "cluster")
# trio_plots(mtec_wt, geneset = c("Krt10", "Trpm5", "Mki67"), cell_cycle = FALSE,
# plot_violin = TRUE, jitter_and_violin = FALSE, plot_jitter = FALSE,
# color = stage_color, sep_by = "cluster")
# trio_plots(mtec_wt, geneset = c("Hmgb2", "H2afx", "Hmgb1"), cell_cycle = TRUE,
# jitter_and_violin = FALSE, plot_jitter = TRUE, plot_violin = FALSE,
# color = stage_color, sep_by = "cluster")
# # Correlation plot
# master_plot(mtec, "aire_trace", mtec_wt, "wt", stage_color_df)
# # Dot plot of all markers
# markers_to_plot_full <- c("Krt5", "Ccl21a", "Ascl1", "Hes1", "Hmgb2", "Hmgn2",
# "Hmgb1", "H2afx", "Ptma", "Aire", "Utf1",
# "Fezf2", "Krt10", "Trpm5", "Pou2f3", "Dclk1")
# dot_plot <- Seurat::DotPlot(mtec_wt, genes.plot = rev(markers_to_plot_full),
# cols.use = c("blue", "red"), x.lab.rot = T,
# plot.legend = T, dot.scale = 8, do.return = T)
# # Dot plot of gene in WT population
# dot_plot <- Seurat::DotPlot(mtec_wt, genes.plot = rev(markers_to_plot),
# cols.use = c("blue", "red"), x.lab.rot = T,
# plot.legend = T, dot.scale = 8, do.return = T)
# # Heatmap of DE genes labeled by genes already known in WT cells
# plot_heatmap(mtec_wt, subset_list = TFs_all,
# color_list = c("Cdx1", "Utf1", "Tcf7", "Spib", "Cdk4", "Ptma",
# "H2afx", "Hmgb1"),
# color_list2 = c("Aire", "Irf7", "Cited2", "Spib", "Hes1", "Pax1",
# "Relb", "Lmo4", "Pou2f3"),
# cell_color = stage_color)
# # Marker genes mapped on UMAP for wt population
# tSNE_PCA(mtec_wt, "Ccl21a")
# tSNE_PCA(mtec_wt, "Krt5")
# tSNE_PCA(mtec_wt, "Trpm5")
# tSNE_PCA(mtec_wt, "Krt10")
# tSNE_PCA(mtec_wt, "Aire")
# ########################
# # Supplemntal Figure 4 #
# ########################
# # UMAPs of gene expression at each stage, not including Aire trace
# dev.off()
|
82b7fefa08404c6ea7fd603b7bc083a5f8aab14e
|
0b5bed218b4cbe30646c3c918c9fe73047a5889e
|
/simulations/sims_comp_run.R
|
1dc7ef92cf4fd28f8ca7ad9a1fd7a5dbc5cd5247
|
[] |
no_license
|
paul-buerkner/monotonic-effects-paper
|
463b1de922100c493f56187ffa25a854112e8b00
|
1e295745a4b5566bce8c575b3885ab1ff51f0cb2
|
refs/heads/master
| 2020-03-22T00:45:42.594738
| 2019-12-08T21:26:16
| 2019-12-08T21:26:16
| 139,263,725
| 7
| 0
| null | 2018-10-22T11:28:02
| 2018-06-30T16:33:12
|
TeX
|
UTF-8
|
R
| false
| false
| 10,134
|
r
|
sims_comp_run.R
|
mo_trans <- function(x, zeta) {
.mo_trans <- function(x, zeta) {
if (x == 0) {
out <- 0
} else {
out <- length(zeta) * sum(zeta[1:x])
}
return(out)
}
sapply(x, .mo_trans, zeta = zeta)
}
# invert the sign of half of the elements of x
invert_half <- function(x) {
len <- length(x)
signs <- c(rep(1, ceiling(len / 2)), rep(-1, floor(len / 2)))
x * signs[sample(seq_len(len))]
}
# sequential difference coding of factors
sdif_coding <- function(x) {
x <- as.factor(x)
contrasts(x) <- MASS::contr.sdif(levels(x))
x
}
# compile a dummy brms model to avoid recompliation in each trial
get_dummy_model <- function(cond, effect) {
require(brms)
effect <- match.arg(effect, c("lin", "mo", "cat"))
if (cond$pred == "main") {
x <- sample(0:cond$D, size = cond$nobs, TRUE)
xf <- sdif_coding(x)
df <- data.frame(x, xf, y = 1)
if (effect == "lin") {
bform <- bf(y ~ x)
} else if (effect == "mo") {
bform <- bf(y ~ mo(x))
} else if (effect == "cat") {
bform <- bf(y ~ xf)
}
} else if (cond$pred == "interaction") {
x <- sample(0:cond$D, cond$nobs, TRUE)
z <- sample(0:cond$D, cond$nobs, TRUE)
xf <- sdif_coding(x)
zf <- sdif_coding(z)
df <- data.frame(x, xf, z, zf, y = 1)
if (effect == "lin") {
bform <- bf(y ~ x * z)
} else if (effect == "mo") {
bform <- bf(y ~ mo(x) * mo(z))
} else if (effect == "cat") {
bform <- bf(y ~ xf * zf)
}
}
if (cond$likelihood == "gaussian") {
bfamily <- brmsfamily("gaussian")
}
# very wide priors considering the scale of the data
bprior <- prior(normal(0, 10), class = "b")
# use the default priors of brms for now
out <- brm(bform, data = df, family = bfamily,
prior = bprior, chains = 0)
out
}
run_trial <- function(cond, dummy_models, seed = NULL) {
require(brms)
require(bigsplines)
if (!is.null(seed)) {
# message("Using seed ", seed)
set.seed(seed)
}
out <- list(truth = list())
alpha <- rep(1, cond$D)
if (cond$pred == "main") {
out$truth$b_Intercept <- rnorm(1, 0, 1)
out$truth$bsp_mox <- rnorm(1, 0, 1) / cond$D
if (cond$effect == "lin") {
out$truth$simo_mox1 <- alpha / sum(alpha)
} else if (cond$effect == "mo") {
out$truth$simo_mox1 <- as.vector(rdirichlet(1, alpha))
} else if (cond$effect == "cat") {
out$truth$simo_mox1 <- invert_half(as.vector(rdirichlet(1, alpha)))
}
} else if (cond$pred == "interaction") {
out$truth$b_Intercept <- rnorm(1, 0, 1)
out$truth$bsp_mox <- rnorm(1, 0, 1) / cond$D
out$truth$bsp_moz <- rnorm(1, 0, 1) / cond$D
out$truth$`bsp_mox:moz` <- rnorm(1, 0, 1) / cond$D^2
if (cond$effect == "lin") {
out$truth$simo_mox1 <- alpha / sum(alpha)
out$truth$simo_moz1 <- alpha / sum(alpha)
out$truth$`simo_mox:moz1` <- alpha / sum(alpha)
out$truth$`simo_mox:moz2` <- alpha / sum(alpha)
} else if (cond$effect == "mo") {
out$truth$simo_mox1 <- as.vector(rdirichlet(1, alpha))
out$truth$simo_moz1 <- as.vector(rdirichlet(1, alpha))
out$truth$`simo_mox:moz1` <- as.vector(rdirichlet(1, alpha))
out$truth$`simo_mox:moz2` <- as.vector(rdirichlet(1, alpha))
} else if (cond$effect == "cat") {
out$truth$simo_mox1 <- invert_half(as.vector(rdirichlet(1, alpha)))
out$truth$simo_moz1 <- invert_half(as.vector(rdirichlet(1, alpha)))
out$truth$`simo_mox:moz1` <- invert_half(as.vector(rdirichlet(1, alpha)))
out$truth$`simo_mox:moz2` <- invert_half(as.vector(rdirichlet(1, alpha)))
}
}
if (cond$likelihood == "gaussian") {
out$truth$sigma <- abs(rnorm(1, 0, 1))
}
if (cond$pred == "main") {
x <- sample(0:cond$D, cond$nobs, TRUE)
xf <- sdif_coding(x)
eta <- out$truth$b_Intercept +
out$truth$bsp_mox * mo_trans(x, out$truth$simo_mox1)
out$data <- data.frame(x, xf, eta)
} else if (cond$pred == "interaction") {
x <- sample(0:cond$D, size = cond$nobs, TRUE)
z <- sample(0:cond$D, size = cond$nobs, TRUE)
xf <- sdif_coding(x)
zf <- sdif_coding(z)
eta <- out$truth$b_Intercept +
out$truth$bsp_mox * mo_trans(x, out$truth$simo_mox1) +
out$truth$bsp_moz * mo_trans(z, out$truth$simo_moz1) +
out$truth$`bsp_mox:moz` *
mo_trans(x, out$truth$`simo_mox:moz1`) *
mo_trans(z, out$truth$`simo_mox:moz2`)
out$data <- data.frame(x, xf, z, zf, eta)
}
if (cond$likelihood == "gaussian") {
y <- rnorm(cond$nobs, eta, out$truth$sigma)
}
out$data$y <- y
# otherwise computing time explodes or errors occur for some trials
skip.iter <- TRUE
if (cond$pred == "main") {
fit_lin <- lm(y ~ x, data = out$data)
eta_lin <- fitted(fit_lin)
fit_cat <- lm(y ~ xf, data = out$data)
eta_cat <- fitted(fit_cat)
# set knots for spline models
nxlev <- length(unique(out$data$x))
knotid <- binsamp(out$data$x, nmbin = nxlev)
fit_os <- bigssp(
y ~ x, data = out$data, type = c(x = "ord"),
nknots = knotid, skip.iter = skip.iter, rseed = NULL
)
eta_os <- as.vector(predict(fit_os))
fit_ls <- bigssp(
y ~ x, data = out$data, type = c(x = "lin"),
nknots = knotid, skip.iter = skip.iter, rseed = NULL
)
eta_ls <- as.vector(predict(fit_ls))
fit_cs <- bigssp(
y ~ x, data = out$data, type = c(x = "cub"),
nknots = knotid, skip.iter = skip.iter, rseed = NULL
)
eta_cs <- as.vector(predict(fit_cs))
# isoreg always assumes the relationship to be monotonically increasing
x <- out$data$x
y <- out$data$y
is_neg <- mean(y[x == min(x)]) > mean(y[x == max(x)])
sign <- if (is_neg) -1 else 1
fit_iso <- isoreg(x = x, y = sign * y)
eta_iso <- sign * fitted(fit_iso)
fit_osmo <- ordspline(
x = x, y = sign * y, monotone = TRUE,
knots = unique(x)
)
eta_osmo <- sign * as.vector(predict(fit_osmo))
out$data <- cbind(out$data,
Estimate_lin = eta_lin,
Estimate_cat = eta_cat,
Estimate_os = eta_os,
Estimate_ls = eta_ls,
Estimate_cs = eta_cs,
Estimate_iso = eta_iso,
Estimate_osmo = eta_osmo
)
} else if (cond$pred == "interaction") {
fit_lin <- lm(y ~ x * z, data = out$data)
eta_lin <- fitted(fit_lin)
fit_cat <- lm(y ~ xf * zf, data = out$data)
eta_cat <- fitted(fit_cat)
# set knots for ordinal spline models
nxlev <- length(unique(out$data$x))
nzlev <- length(unique(out$data$z))
pred_mat <- cbind(out$data$x, out$data$z)
knotid <- binsamp(pred_mat, nmbin = c(nxlev, nzlev))
fit_os <- bigssp(
y ~ x * z, data = out$data, type = c(x = "ord", z = "ord"),
nknots = knotid, skip.iter = skip.iter, rseed = NULL
)
eta_os <- as.vector(predict(fit_os))
fit_ls <- bigssp(
y ~ x * z, data = out$data, type = c(x = "lin", z = "lin"),
nknots = knotid, skip.iter = skip.iter, rseed = NULL
)
eta_ls <- as.vector(predict(fit_ls))
fit_cs <- bigssp(
y ~ x * z, data = out$data, type = c(x = "cub", z = "cub"),
nknots = knotid, skip.iter = skip.iter, rseed = NULL
)
eta_cs <- as.vector(predict(fit_cs))
out$data <- cbind(out$data,
Estimate_lin = eta_lin,
Estimate_cat = eta_cat,
Estimate_os = eta_os,
Estimate_ls = eta_ls,
Estimate_cs = eta_cs,
# no version available for interactions
Estimate_iso = NA,
Estimate_osmo = NA
)
}
# fit Bayesian models via brms
# fit_lin <- suppressMessages(update(
# dummy_models[["lin"]], newdata = out$data, recompile = FALSE,
# warmup = cond$ndraws, iter = 2 * cond$ndraws, chains = 1,
# refresh = 0
# ))
# eta_lin <- as.data.frame(fitted(fit_lin))
# names(eta_lin) <- paste0(names(eta_lin), "_lin")
fit_mo <- suppressMessages(update(
dummy_models[["mo"]], newdata = out$data, recompile = FALSE,
warmup = cond$ndraws, iter = 2 * cond$ndraws, chains = 1,
refresh = 0
))
eta_mo <- as.data.frame(fitted(fit_mo))
names(eta_mo) <- paste0(names(eta_mo), "_mo")
out$data <- cbind(out$data, eta_mo)
# if (cond$D == 50 && cond$pred == "interaction" && cond$nobs <= 200) {
# # too many parameters for too few observations
# # which the unpenalized categorical model cannot handle
# eta_cat <- NA
# } else {
# fit_cat <- suppressMessages(update(
# dummy_models[["cat"]], newdata = out$data, recompile = FALSE,
# warmup = cond$ndraws, iter = 2 * cond$ndraws, chains = 1,
# refresh = 0
# ))
# eta_cat <- as.data.frame(fitted(fit_cat))
# names(eta_cat) <- paste0(names(eta_cat), "_cat")
# }
# out$data <- cbind(out$data, eta_lin, eta_mo, eta_cat)
out
}
file <- "simulations/comp_results.rds"
if (!grepl("2018_monotonic_effects$", getwd())) {
file <- paste0("2018_monotonic_effects/", file)
}
if (file.exists(file)) {
comp_results <- readRDS(file)
} else {
comp_results <- expand.grid(
ntrials = 1000,
D = c(4, 10, 50),
ndraws = 500,
nobs = c(50, 200, 1000),
likelihood = "gaussian",
effect = c("lin", "mo", "cat"),
pred = c("main", "interaction"),
stringsAsFactors = FALSE
)
comp_results$cond <- seq_len(nrow(comp_results))
comp_results$res <- list(list())
}
I <- seq_len(nrow(comp_results))
# only run conditions for which no results exist yet
I <- intersect(I, which(!lengths(comp_results$res)))
library(foreach)
library(doParallel)
cl <- makeCluster(detectCores())
registerDoParallel(cl)
for (i in I) {
message("Simulating condition ", i)
cond <- comp_results[i, ]
dummy_models <- list(lin = NULL, mo = NULL, cat = NULL)
# for (eff in names(dummy_models)) {
# dummy_models[[eff]] <- get_dummy_model(cond, eff)
# }
dummy_models[["mo"]] <- get_dummy_model(cond, "mo")
J <- seq_len(cond$ntrials)
comp_results$res[[i]] <-
foreach(j = J, .packages = "brms") %dopar%
run_trial(cond, dummy_models, seed = j)
print(warnings())
saveRDS(comp_results, file = file)
}
stopCluster(cl)
saveRDS(comp_results, file = file)
|
76e8f02e24b38a7ae7fc4585ad1a3fd0b3830405
|
19e6185664bfc3cfbcc9ae56fdd762826df21058
|
/cachematrix.R
|
659ba828388ab22a84ce762985f9d00a4fe828a0
|
[] |
no_license
|
praveengarimel/ProgrammingAssignment2
|
08bfafbea4a09860b0cc17d6077f8b46572e28ca
|
dd914a1237fb139def9b47f9cad60e5905f25f91
|
refs/heads/master
| 2021-01-16T17:40:55.827389
| 2015-07-25T09:51:08
| 2015-07-25T09:51:08
| 37,777,854
| 0
| 0
| null | 2015-06-20T17:01:08
| 2015-06-20T17:01:07
| null |
UTF-8
|
R
| false
| false
| 998
|
r
|
cachematrix.R
|
##The below code consists of two functions, makeCacheMatrix and cachesolve
##
## (1) MakeCacheMatrix is a function that returns a list of 4 functions(setmatrix,getmatrix,setinverse and getinverse)
makeCacheMatrix <- function(x = matrix()) {inverse <- NULL
setmatrix <- function(y){
x <<- y
inverse <<- NULL}
getmatrix <- function() x
setinverse <- function(inv2){
inverse <<- inv2}
getinverse <-function() inverse
list(setmatrix = setmatrix, getmatrix = getmatrix, setmean = setmean, getmean = getmean)
}
## (2) cachesolve is a function that first checks if the inverse of a matrix is present in the cache.
##If yes, the inverse is fetched from the cache using the getinverse function.
##If not, the new inverse is computed and the result is stored in the cache.
cacheSolve <- function(x, ...) {m <- x$getinverse()
if (!is.null(m)){
message("getting cached data")
return(m)}
data<- x$getmatrix()
m <- solve(data, ...)
x$setmatrix(m)
m ## Return a matrix that is the inverse of 'x'
}
|
5249952c6a04e8905aa028ef5b31a006511dbbf5
|
6393fba5e6a812fe66ec154d890d82be1a7eb85a
|
/analysis/2016-03-04 create global, king, singleton per sample/global_ancestry.R
|
7e879c62b96e8473448c2e6bb2a5ecf7c583b521
|
[] |
no_license
|
gtlntw/topmed
|
363d21f3b22c32c50805b11e5bd97879b90c9e05
|
9b527b826afa652581779360a13e8b77c04b8ed8
|
refs/heads/master
| 2021-01-10T11:03:40.352791
| 2016-12-13T02:59:07
| 2016-12-13T02:59:07
| 51,096,038
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,352
|
r
|
global_ancestry.R
|
##parse parameters from command
parseCommandArgs <- function (evaluate = TRUE)
{
arglist <- list()
args <- commandArgs()
i <- which(args == "--args")
if (length(i) == 0 || length(args) < 1)
return(invisible())
args <- args[(i + 1):length(args)]
for (i in seq(1, length(args), by = 2)) {
value <- NA
tryCatch(value <- as.double(args[i + 1]), warning = function(e) {
})
if (is.na(value)) {
value <- args[i + 1]
if (substr(value, 1, 2) == "c(")
value <- eval(parse(text = args[i + 1]))
}
if (evaluate)
assign(args[i], value, inherits = TRUE)
arglist[[length(arglist) + 1]] <- value
names(arglist)[length(arglist)] <- args[i]
}
return(arglist)
}
## passed in from the command prompt.
parseCommandArgs()
##print parameter
print(id)
##calculate Global based on result from LAI
lai.list <- list()
for(chr in 1:22) {
lai.list[[chr]]<- read.table(paste("/net/topmed2/working/khlin/output/LAI/",id,"/",id,"_HGDP_chr",chr,".0.Viterbi.txt", sep=""))
}
lai <- factor(unlist(lai.list), levels=c("1", "2","3","4","5","6","7"))
(p <- round(prop.table(matrix(table(lai), 1), 1), 4))
#save the result
write.table(p, file = paste0("/net/topmed2/working/khlin/output/LAI/",id,"/",id,"_global_lai.txt"), quote = F, col.names = paste0("pop.", 1:7))
|
4d614428866c123576aeca2f98e2c3419cf9960d
|
9839def077d342c67157110e7cfa58d868296137
|
/R/Rrefer/china/China.R
|
deaf99441550754dbfc1119dd993bfaceb55722e
|
[] |
no_license
|
redshim/TM1
|
221f9fa96863cf10f879e3e476535195edf7502c
|
5f6b49655ac012d2e638eca4857a0c19cb6903c9
|
refs/heads/master
| 2021-01-12T04:18:57.971923
| 2017-01-02T05:33:52
| 2017-01-02T05:33:52
| 77,583,078
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,484
|
r
|
China.R
|
Sys.getlocale()
Sys.setenv(LANG = "en")
options(encoding="utf-8")
Sys.setenv(LANG = "en_US.UTF-8")
Sys.setlocale("LC_ALL","UTF-8")
Sys.setlocale("LC_ALL", "C")
Sys.setlocale("LC_ALL","English")
sessionInfo()
LANGUAGE="en_US.utf8"
Sys.setenv(LANG = "en")
Sys.getlocale()
setwd("C://R//china")
#1.헤더를 TRUE로 줬을때, Invalid Multibyte 에러 발생
x2 <- read.csv("ff_UTF8.txt", header=T ,sep = ",", quote = '"', encoding="UTF-8")
#--> Error in type.convert(data[[i]], as.is = as.is[i], dec = dec, numerals = numerals, :
#invalid multibyte string at '<e5><85><b6>餓뽩첅鵝<93>'
#In addition: Warning message: In scan(file, what, nmax, sep, dec, quote, skip, nlines, na.strings, :
#EOF within quoted string
#2.헤더를 False로 줬을때, 컬럼 밀리는 현상
x1 <- read.csv("ff_UTF8.csv", header=F ,sep = ",", quote = '"', encoding="UTF-8")
x1[2,]
x2 <- read.csv("ff_UTF8.csv", header=T ,sep = ",", quote = '"', encoding="UNICODE")
x1 <- read.csv("china_ExcelImport_Gen.csv", header=T ,sep = ",", quote = '"', encoding="UTF-8")
################## R 코드 ##############################
# 2. 비정상 작동 스크립트
######################################################
setwd("C://R//china")
#1.헤더를 TRUE로 줬을때, Invalid Multibyte 에러 발생
x2 <- read.csv("CN_UTF8.txt", header=T ,sep = ",", quote = '"', encoding="UTF-8")
#--> Error in type.convert(data[[i]], as.is = as.is[i], dec = dec, numerals = numerals, :
#invalid multibyte string at '<e5><85><b6>餓뽩첅鵝<93>'
#In addition: Warning message: In scan(file, what, nmax, sep, dec, quote, skip, nlines, na.strings, :
#EOF within quoted string
#2.헤더를 False로 줬을때, 컬럼 밀리는 현상
x1 <- read.csv("CN_UTF8.txt", header=F ,sep = ",", quote = '"', encoding="UTF-8")
#x <- read.csv2("china_sns.csv", header=T , quote = '"', sep = "," ,fileEncoding="UTF-8", fill = TRUE)
#x <- read.table("china_k3_hive_utf8.csv", header=TRUE, fill = TRUE, sep=",",fileEncoding="UNICODE")
#x <-read.table('china_sns.csv',sep=",")
#read.table("china_k3_utf8.txt", sep=",", header=TRUE,fileEncoding="UTF-8")
#read.table("china_k3_utf8.txt", header=T , quote = '"', sep = "," ,fileEncoding="UTF-8")
#read.csv("china_k3_utf8.txt", header=T , quote = '"', sep = "," ,fileEncoding="UTF-8")
#read.table("china_sns.csv", fileEncoding="UTF-8", sep = ",", header = TRUE)
#hive -e 'select * from dura_master.china' > /home/yourfile.csv
|
af8750125c7c5cf437c424878391559d9d7d8e87
|
4e7a425672eeab6ba7ca94208d9c31e9571881a9
|
/R/ignore/building_pkgdown.R
|
7c7a1366a90363432abd3fd1181bab682e925677
|
[] |
no_license
|
cran/usdarnass
|
857710417992d5c3050d46515bd4ddade2f496f6
|
29d906589ef5c3bbfd8b6a40828d77ac0e8ff106
|
refs/heads/master
| 2020-12-22T23:03:46.276208
| 2019-06-21T07:50:03
| 2019-06-21T07:50:03
| 236,956,503
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 722
|
r
|
building_pkgdown.R
|
# https://lbusettspatialr.blogspot.com/2017/08/building-website-with-pkgdown-short.html
# require("devtools")
# use_readme_rmd()
# use_news_md()
# use_vignette("usdarnass") #substitute with the name of your package
#
# use_github_links()
# use_travis()
# use_cran_badge()
devtools::install_github("hadley/pkgdown")
library("desc")
library("pkgdown")
build_site()
desc_add_author("Robert", "Dinterman", "robert.dinterman@gmail.com",
role = "cre", comment = c(ORCID = "0000-0002-9055-6082"))
desc_add_author("Robert", "Dinterman", "robert.dinterman@gmail.com",
role = "aut", comment = c(ORCID = "0000-0002-9055-6082"))
desc_add_author("Jonathan", "Eyer", "jeyer@usc.edu", role = "aut")
|
364f751007afae496cb047f2ef67e5ad6a58b754
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/soilDB/examples/fetchOSD.Rd.R
|
a545e28390fd322ceac3c635f4f6d565d6e8ada7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 910
|
r
|
fetchOSD.Rd.R
|
library(soilDB)
### Name: fetchOSD
### Title: Fetch Data by Soil Series Name
### Aliases: fetchOSD
### Keywords: manip
### ** Examples
## Not run:
##D # soils of interest
##D s.list <- c('musick', 'cecil', 'drummer', 'amador', 'pentz',
##D 'reiff', 'san joaquin', 'montpellier', 'grangeville', 'pollasky', 'ramona')
##D
##D # fetch and convert data into an SPC
##D s.moist <- fetchOSD(s.list, colorState='moist')
##D s.dry <- fetchOSD(s.list, colorState='dry')
##D
##D # plot profiles
##D # moist soil colors
##D par(mar=c(0,0,0,0), mfrow=c(2,1))
##D plot(s.moist, name='hzname', cex.names=0.85, axis.line.offset=-4)
##D plot(s.dry, name='hzname', cex.names=0.85, axis.line.offset=-4)
##D
##D # extended mode: return a list with SPC + summary tables
##D x <- fetchOSD(s.list, extended = TRUE, colorState = 'dry')
##D
##D par(mar=c(0,0,1,1))
##D plot(x$SPC)
##D
##D str(x, 1)
##D
## End(Not run)
|
b9fb96d463b0a12d43f90d4d7294acba4d031d88
|
3fbd8c5078d5ebb28e23558b158ab74ec0f2ed6b
|
/man/lutUpdate.Rd
|
bdd70ce4d9fda7d99e97d63644a4b23f6f1ea975
|
[
"MIT"
] |
permissive
|
envima/envimaR
|
161cf2e8a0569292ae18b0edfbb0f99900f97bd4
|
c8854cd06203a12cf923818728f9cff9a2e41a3d
|
refs/heads/master
| 2021-07-23T23:29:35.948757
| 2021-07-13T22:40:40
| 2021-07-13T22:40:40
| 156,347,896
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 467
|
rd
|
lutUpdate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpMa.R
\name{lutUpdate}
\alias{lutUpdate}
\title{Update values of default environment to internal look-up table (deprecated)}
\usage{
lutUpdate()
}
\value{
List containing lut content.
}
\description{
Update values of default environment to internal look-up table.
Run it after updating \code{\link[=pckgDefaults]{pckgDefaults()}}.
}
\details{
None
}
\examples{
\dontrun{
lutInfo()
}
}
|
c09043656f97c5622f7083cce877fea6e59c6e0d
|
5072893034c9d61b7cf970c316d7300b31df352c
|
/Assignment5_Ornek.R
|
c57efcc4843426abbeff24fc41407a2e174e68a5
|
[] |
no_license
|
medtraf/R_Studio_Code
|
85a57f3f2854fa1539abf39675246e671bcd428d
|
83b97b12f31bf190686c659300168e778ccd0631
|
refs/heads/master
| 2022-12-11T08:42:17.449932
| 2020-09-08T00:41:32
| 2020-09-08T00:41:32
| 293,657,441
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,703
|
r
|
Assignment5_Ornek.R
|
# Assignment 5 Ertan Ornek
this_dir <- function(directory)
setwd( file.path(getwd(), directory) )
setwd("D:\\Backup\\Dropbox\\Canvas\\455\\Week9\\Assignment5_Ornek")
# https://cran.r-project.org/web/packages/usmap/vignettes/mapping.html
library(usmap)
library(ggplot2)
# Our client is interested in learning about the fraudelent activities recorded by Office of Inspector General
# The fraudelent activities mainly take place in Medicare by individuals or organizations
# The main business questions are:
# Are the fraudulent activities concentrated on certain areas (counties)?
# If the fraudulent activities were normalized by population, does the map significantly change?
all_data<-read.csv("UPDATED.csv",header=TRUE,sep=",")
all_city_county<-read.csv("uscities.csv")
all_dataDF<-data.frame(all_data)
all_city_countyDF<-data.frame(all_city_county)
all_city_countyDF$city<-toupper(all_city_countyDF$city)
str(all_dataDF)
#Now we attach county names and other fields to our fraud dataset
all_data_withCountynames<-merge(all_dataDF,all_city_countyDF,by.x = c("CITY","STATE"),by.y = c("city","state_id"))
#let's summarize the data by county
library(dplyr)
print (min(all_data_withCountynames$EXCLDATE))
print (max(all_data_withCountynames$EXCLDATE))
print (2018-1977)
faudcases<- group_by(all_data_withCountynames, county_name,STATE,county_fips,population)
percounty<-summarise(faudcases,numberofcases=n())
percounty$state<-percounty$STATE
percounty$county<-paste("County", percounty$county_name, sep=" ")
percounty$fips<-formatC(percounty$county_fips, width=5, flag="0")
percounty$abbr<-percounty$STATE
percounty$froudper100K<-(percounty$numberofcases/(41*percounty$population))*100000
percounty$froudper100K<- ifelse(percounty$froudper100K>100 , 100, percounty$froudper100K)
plot_usmap(regions = "counties",data = percounty,values = "froudper100K")+
scale_fill_continuous(
low = "white", high = "red", name = "No of Frauders per 100K pop.", label = scales::comma
)+
labs(title = "Office of Inspector General", subtitle = "Frauders Excluded from Federal and State Health Care Programs") +
theme(panel.background = element_rect(colour = "black", fill = "lightblue"))
# Last 10 years of fraud reporting
all_data_withCountynames2<-all_data_withCountynames[which(all_data_withCountynames$EXCLDATE>20071231 & all_data_withCountynames$EXCLDATE<20181231),]
faudcases<- group_by(all_data_withCountynames2,STATE)
perstate<-summarise(faudcases,numberofcases=n())
perstate$state<-perstate$STATE
perstate<-merge(perstate, statepop,by.x='state',by.y='abbr')
#perstate$county<-paste("County", perstate$county_name, sep=" ")
perstate$abbr<-perstate$STATE
perstate$froudper100K<-((perstate$numberofcases/10.0)/(perstate$pop_2015))*100000
perstate$froudper100K<- ifelse(perstate$froudper100K>100 , 100, perstate$froudper100K)
plot_usmap(include = c("CA", "NV", "ID", "OR", "WA"),labels=TRUE) +
labs(title = "Western States")
plot_usmap(regions = "states",data = perstate,values = "froudper100K",labels=TRUE)+
scale_fill_continuous(
low = "white", high = "red", name = "No of Cases per 100K pop.", label = scales::comma
)+
#labs(title = "Office of Inspector General", subtitle = "Fraud Excluded from Federal and State Health Care Programs") +
theme(panel.background = element_rect(colour = "transparent", fill = "lightblue"),legend.position = "right")
ggsave("Last10YearsPerState.svg", width=12, height=10)
### Vermont, Mississippi, Alaska, West Virginia
faudcases<- group_by(all_data_withCountynames, county_name,STATE,county_fips,population)
percounty<-summarise(faudcases,numberofcases=n())
percounty$state<-percounty$STATE
percounty$county<-paste("County", percounty$county_name, sep=" ")
percounty$fips<-formatC(percounty$county_fips, width=5, flag="0")
percounty$abbr<-percounty$STATE
percounty$froudper100K<-(percounty$numberofcases)
percounty$froudper100K<- ifelse(percounty$froudper100K>200 , 200, percounty$froudper100K)
plot_usmap(regions = "counties",data = percounty,values = "numberofcases",include = c("VT","MS","AK","WV"))+
scale_fill_continuous(
low = "white", high = "red", name = "No of Cases", label = scales::comma
)+
#labs(title = "Office of Inspector General", subtitle = "Frauders Excluded from Federal and State Health Care Programs") +
theme(panel.background = element_rect(colour = "transparent", fill = "lightblue"),legend.position = "right")
ggsave("States.svg", width=39, height=20)
percounty$STATE=="VT"
vermontdata<-percounty[percounty$STATE=="VT",]
mississippidata<-percounty[percounty$STATE=="MS",]
alaskadata<-percounty[percounty$STATE=="AK",]
westvirginiadata<-percounty[percounty$STATE=="WV",]
|
d0a480f8bdcdeb8936dc5b20a5c5482b99ff8e0f
|
0fae864eec0b15404603f4d72b0f6579978375d5
|
/StudentenTrendShiny/screens/StudentenPerSector.R
|
4701655e4b3240b48e02011c0f93aba228d41130
|
[] |
no_license
|
TeamIntelligence/StudentenTrend
|
6a38c5deaaabd1496d4ecf305c2bf2b6d274a0e2
|
3d3757e78c9391cc5ab766aa6d814903e188e543
|
refs/heads/master
| 2021-04-26T16:44:34.044468
| 2016-01-29T09:10:41
| 2016-01-29T09:10:41
| 43,003,810
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,386
|
r
|
StudentenPerSector.R
|
#The UI function for the StudentenPerSector page
StudentenPerSectorUI <- function(PageName) {
return(
tabItem(tabName = PageName,
fluidRow(
box(width = 12, title = "Eerstejaarsstudenten",
p("Op deze pagina vindt u het aantal eerstejaarsstudenten per studiesector over de periode 1995 tot en met 2012. HBO en WO is samengenomen. U kunt zelf kiezen welke studiesectoren u wilt weergeven. Daarnaast kunt u ook een totaallijn weergeven van alle studies of een totaallijn van de studies die u geselecteerd hebt."),
p("De grafiek biedt inzicht hoeveel studenten elk jaar starten binnen een bepaalde studie sector. Er kan vervolgens uit opgemaakt worden of studies binnen een bepaalde studiesector groeien of afnemen."),
collapsible = T
),
box(width=6, height = 170,
selectInput("StudentenPerSector_selectStudyImp",
"Selecteer een of meerdere studiesectoren om weer te geven:",
choices = list(Studiesectoren = unique(studievoortgang$iscedCode.iscedNaam), "Selecteer een studiesector" = ""),
multiple = TRUE,
selectize = TRUE),
checkboxInput("StudentenPerSector_AlleStudies",
"Geef alle studies weer"
)
),
box(width = 6, height = 170,
checkboxInput("StudentenEerstejaars_Totaalselect",
"Totaal lijn weergeven van de geselecteerde studies"
),
checkboxInput("StudentenEerstejaars_Totaal",
"Totaal lijn weergeven"
)
)
# Show a plot of the generated distribution
,
tabBox( width=12, height=550,
tabPanel("Huidig",
box(width=5, plotlyOutput("StudentenPerSector_aantalStudentenPlot", height=450)),
box(width=7, plotOutput("StudentenPerSector_aantalStudentenBarPlot", height=450))
),
tabPanel("Voorspelling",
box(width=12,plotOutput("StudentenPerSector_aantalStudentenVoorspellingPlot", height = 450))
)
)
)
)
)
}
#The Server function for the StudentenPerSector page
StudentenPerSectorServer <- function(input, output, session) {
reac <- reactiveValues(redraw = TRUE, selections = isolate(input$StudentenPerSector_selectStudyImp))
output$StudentenPerSector_aantalStudentenPlot <- renderPlotly({
svSub <- studievoortgang[studievoortgang$iscedCode.iscedNaam %in% reac$selections,]
PlotTitle <- "Aantal eerstejaarsstudenten per jaar verdeeld per studie"
colnames(svSub)[colnames(svSub)=="iscedCode.iscedNaam"] <- "soort"
#Totaal selectlijn
if (input$StudentenEerstejaars_Totaalselect == TRUE && length(reac$selections) != 0){
svSub <- TotaalAantalSelect(data = svSub, filterParams = c("jaartal"))
}
#Totaal berekenen
if(input$StudentenEerstejaars_Totaal == TRUE) {
svSub <- TotaalAantal(data = studievoortgang,
subSet = svSub,
filterParams = c("jaartal"))
}
plot <- ggplot(svSub, aes(x=jaartal)) +
xlab("Jaar") +
ylab("Aantal studenten") +
ggtitle(PlotTitle) +
theme(legend.position="none")+
geom_line(data=svSub, aes(y=aantal, #lijn studies
group=soort,
color=soort), size = -1) +
geom_point(data=svSub,aes(y=aantal,
group=soort,
color=soort))+
scale_color_manual(values=GetColors(svSub$soort), labels=unique(svSub$soort))
#Render de plot
if(length(reac$selections) != 0 || input$StudentenEerstejaars_Totaal == TRUE) {
PrintGGPlotly(plot)
} else {
return(plot)
}
PrintGGPlotly(plot)
})
output$StudentenPerSector_aantalStudentenBarPlot <- renderPlot({
svSub <- studievoortgang[studievoortgang$iscedCode.iscedNaam %in% reac$selections,]
PlotTitle <- "Aantal eerstejaarsstudenten per jaar verdeeld per studie"
#select lijn
if(input$StudentenEerstejaars_Totaalselect == TRUE && length(reac$selections) != 0) {
svSub <- TotaalAantalSelect(data = svSub, filterParams= c("jaartal"))
}
#Totaal berekenen
if (input$StudentenEerstejaars_Totaal == TRUE){
svSub <- TotaalAantal(data = studievoortgang, subSet = svSub, filterParams= c("jaartal"))
}
plot <- ggplot(svSub, aes(x=jaartal)) +
xlab("Jaar") +
ylab("Aantal studenten") +
ggtitle(PlotTitle)+
geom_bar(stat = "identity", aes(y=aantal,fill=iscedCode.iscedNaam)) +
scale_fill_manual(values=GetColors(svSub$iscedCode.iscedNaam), name="Studiesector")
AddTotaalLines(plot=plot, data=svSub)
})
#########################
## VOORSPELLINGEN PLOT ##
#########################
output$StudentenPerSector_aantalStudentenVoorspellingPlot <- renderPlot({
svSub <- studievoortgang[studievoortgang$iscedCode.iscedNaam %in% reac$selections,]
colnames(svSub)[colnames(svSub)=="iscedCode.iscedNaam"] <- "soort"
if (input$StudentenEerstejaars_Totaalselect == TRUE && length(reac$selections) != 0){
#alleen select
svSub <- TotaalAantalSelect(data =svSub,
filterParams= c("jaartal"))
}
if (input$StudentenEerstejaars_Totaal == TRUE ){
#totaallijn
svSub <- TotaalAantal(data =studievoortgang,
subSet = svSub,
filterParams= c("jaartal"))
}
StudentenEerstejaars_forecastSub <- createForecastSub(svSub, "aantal", "soort", 1995, 2012,"")
plot <- ggplot(StudentenEerstejaars_forecastSub, aes(x=jaartal)) +
xlab("Jaar") +
ylab("Aantal eerstejaars studenten") +
ggtitle("Aantal eerstejaars studenten per studiesector")
AddTotaalLines(plot = plot, data = StudentenEerstejaars_forecastSub, forecast = T, name = "Studiesector")
})
observe({
trueFalse = length(input$StudentenPerSector_selectStudyImp) == length(unique(studievoortgang$iscedCode.iscedNaam))
updateCheckboxInput(session, "StudentenPerSector_AlleStudies", value = trueFalse)
})
observeEvent(input$StudentenPerSector_AlleStudies, {
trueFalse = length(input$StudentenPerSector_selectStudyImp) == length(unique(studievoortgang$iscedCode.iscedNaam))
if(input$StudentenPerSector_AlleStudies == T && !trueFalse){
updateSelectInput(session, "StudentenPerSector_selectStudyImp",
selected = studievoortgang$iscedCode.iscedNaam
)
}
})
observe({
input$StudentenPerSector_selectStudyImp
reac$redraw <- FALSE
})
observe({
invalidateLater(500, session)
input$StudentenPerSector_selectStudyImp
input$redraw
if (isolate(reac$redraw)) {
reac$selections <- input$StudentenPerSector_selectStudyImp
} else {
isolate(reac$redraw <- TRUE)
}
})
}
|
c3ce330a3095209f87e2b2c3cf96b53a3f763b1e
|
129408919e4fcde9818bef047f6e9b2a74d23c8a
|
/man/get_meta_indicator.Rd
|
aee9f6afd04a57561ef4ba01008a269490dc564d
|
[
"MIT"
] |
permissive
|
mrc-ide/naomi
|
93decfb73624de911f298aadcc0e0d02b8d7d5e5
|
94d34246144e4dfcb86161258faf213a7db03268
|
refs/heads/master
| 2023-06-14T06:37:36.343882
| 2023-05-05T11:08:33
| 2023-05-05T11:08:33
| 204,965,083
| 7
| 6
|
NOASSERTION
| 2023-09-12T12:54:48
| 2019-08-28T15:32:00
|
R
|
UTF-8
|
R
| false
| true
| 360
|
rd
|
get_meta_indicator.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outputs.R
\name{get_meta_indicator}
\alias{get_meta_indicator}
\title{Get indicator metadata}
\usage{
get_meta_indicator()
}
\value{
data.frame of indicator ids, labels, descriptions, and parameter mapping.
}
\description{
Get indicator metadata
}
\examples{
get_meta_indicator()
}
|
0290fb15b43b772810de5ae3d1a55963c6457ac9
|
543bf9f77229b2f15db421e19d2289ebd362fbbc
|
/static/files/gathering-tidy-2.R
|
d42ced5a14687340057eb0ab5195944438b7fa58
|
[] |
no_license
|
danilofreire/poli-301
|
4607eb8e1cf3315758b02869d307ad81624e7cd0
|
bbcf8a383634815e85cc3fb3d12ad3c10f193ee4
|
refs/heads/master
| 2022-03-13T17:23:19.294094
| 2019-12-06T13:59:22
| 2019-12-06T13:59:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,982
|
r
|
gathering-tidy-2.R
|
library(tidyverse)
library(fivethirtyeight)
library(ggthemes)
# let's tidy drinks
drinks
# too many countries, let's look at a few
# %in% is a new logical operator: returns observations that match one of the strings
drinks_subset =
drinks %>%
filter(country %in% c("USA", "China", "Italy", "Saudi Arabia"))
# let's gather the three alcohol variables into two: type and serving
tidy_drinks = drinks_subset %>%
gather(key = "type", value = "serving", c(beer_servings, spirit_servings, wine_servings))
tidy_drinks
# let's put position = dodge in geom_col, which will place bars side by side
ggplot(tidy_drinks, aes(x = country, y = serving, fill = type)) +
geom_col(position = "dodge")
# masculinity survey
masculinity_survey
?masculinity_survey
## let's look only at pressure to be manly
manly_pressure =
masculinity_survey %>%
filter(question == "Do you think that society puts pressure on men in a way that is unhealthy or bad for them?")
manly_pressure
## let's gather the age categories
manly_age = manly_pressure %>%
select(response, age_18_34, age_35_64, age_65_over) %>%
gather(key = "age_category", value = "proportion",
c(age_18_34, age_35_64, age_65_over))
ggplot(manly_age, aes(x = response, y = proportion, fill = age_category)) +
geom_col(position = "dodge") +
theme_minimal() +
labs(title = "Do you think that society puts pressure on men in a way that is unhealthy or bad for them?")
## pick a new question, and make a bar plot comparing people with and without children
masculine_children = masculinity_survey %>%
filter(question == "How important is it to you that others see you as masculine?") %>%
select(response, children_yes, children_no)
masc_child_tidy = masculine_children %>%
gather(key = "has_children", value = "proportion", c(children_yes, children_no))
ggplot(masc_child_tidy, aes(x = response, y = proportion, fill = has_children)) +
geom_col(position = "dodge")
## we can also gather everything
manly_tidy_all =
manly_pressure %>%
# -c(question, response) = everything EXCEPT question and response
gather(key = "subset", value = "proportion", -c(question, response))
ggplot(manly_tidy_all, aes(x = response, y = proportion, fill = subset)) +
geom_col(position = "dodge")
# Factors -----------------------------------------------------------------
## say I have data on weather, like so
weather = tibble(temp = c(80, 23, 14, 25, 83),
month = c("June", "September", "October", "February", "December"))
weather
ggplot(weather, aes(x = month, y = temp)) + geom_col()
## I want to display in month order; how?
## I can use factors
weather = weather %>%
mutate(month = factor(month, levels = c("February",
"June",
"September",
"October",
"December")))
ggplot(weather, aes(x = month, y = temp)) + geom_col()
## say I want to plot see how common different eye colors are and plot them by frequency
starwars
star_eyes = starwars %>%
group_by(eye_color) %>%
summarise(n = n())
## I want to plot by how common they are
ggplot(star_eyes, aes(x = eye_color, y = n)) +
geom_col() +
coord_flip()
## convert function
star_eyes = star_eyes %>%
mutate(eye_color = fct_reorder(eye_color, n))
star_eyes$eye_color
# plot in order
ggplot(star_eyes, aes(x = eye_color, y = n)) +
geom_col() +
coord_flip()
## use case_when to make a new variable clasifying height into short medium tall
## then turn into a factor that makes sense
## plot using geom_col
starwars_height = starwars %>%
mutate(star_heights = case_when(height < 100 ~ "short",
height >= 100 & height < 200 ~ "medium",
height >= 200 ~ "tall"))
starwars_height$star_heights
starwars_height =
starwars_height %>%
mutate(star_heights = factor(star_heights, levels = c("tall", "medium", "short")))
starwars_height$star_heights
## recreate Bechdel 538 infographic:
## break up year into 4 year blocks
## count how many categories in clean-test, then calculate percent
## make a barplot with year block on x-axis, y = percent of movies
bechdel_percent = bechdel %>%
select(year, clean_test) %>%
mutate(year_cat = cut_width(year, width = 4)) %>%
group_by(year_cat, clean_test) %>%
summarise(count_movies = n()) %>%
mutate(total_movies = sum(count_movies)) %>%
mutate(percent = count_movies/total_movies * 100)
ggplot(bechdel_percent, aes(x = year_cat, y = percent, fill = clean_test)) +
geom_col() + theme_fivethirtyeight()
# gss survey data
gss_cat
?gss_cat
## what is average age, tv watched, and number of respondents in each religion
religion_tv = gss_cat %>%
group_by(relig) %>%
summarise(age = mean(age, na.rm = TRUE),
tvhours = mean(tvhours, na.rm = TRUE),
n = n())
religion_tv
ggplot(religion_tv, aes(x = tvhours, y = relig)) + geom_point()
## make religion a factor, ordered in terms of taverage tv
religion_tv =
religion_tv %>%
mutate(relig = fct_reorder(relig, tvhours))
religion_tv$relig
ggplot(religion_tv, aes(x = tvhours, y = relig)) + geom_point()
## create a new variable that breaks up year into two-year intervals
## turn into a factor so it retains it's order
## calculate mean() and median() tv_hours for each year category
## gather the stats and plot, filling by stat
yearly_tv = gss_cat %>%
mutate(year_int = case_when(year >= 2000 & year <= 2002 ~ "2000-2002",
year >= 2004 & year <= 2006 ~ "2004-2006",
year >= 2008 & year <= 2010 ~ "2008-2010",
year >= 2012 & year <= 2014 ~ "2012-2014")) %>%
mutate(year_int = factor(year_int, levels = c("2000-2002",
"2004-2006",
"2008-2010",
"2012-2014"))) %>%
group_by(year_int) %>%
summarise(avg_tv = mean(tvhours, na.rm = TRUE),
median_tv = median(tvhours, na.rm = TRUE))
ggplot(yearly_tv, aes(x = year_int, y = avg_tv)) + geom_col()
## drug use
drug_use
?drug_use
## let's tidy the data
## notice that there are TWO variables here in wide format: use, and frequency
use = drug_use %>%
select(age, contains("_use")) %>%
gather(key = "drug", value = "percent", -c(age))
## then plot each
ggplot(use, aes(x = age, y = percent, color = drug)) + geom_point()
## billboard
library(billboard)
spotify_track_data
?spotify_track_data
## scatterplot of the different song characteristics over time
## you will need to tidy the data using gather
## you will facet over the characteristic
spotify_track_data %>%
select(year, danceability, energy, loudness, speechiness, valence, tempo)
|
9be7de1e6b381b6a1aebe5d7a1bc0e9c5af60d3c
|
36241b0d8d9a2f63b1d1503a89b96212e9242cc2
|
/dz/6-monty.R
|
c461cad9e3e1cc64b34d9c1839412be622f99857
|
[] |
no_license
|
luciantin/faks-MIS
|
1818a578aa0d3be7d36665e5926ce895446985b5
|
14b57cd79a9c923a87fc5faa22f8d443ace06ba4
|
refs/heads/main
| 2023-05-11T18:06:37.216876
| 2021-05-31T06:23:22
| 2021-05-31T06:23:22
| 345,627,908
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 396
|
r
|
6-monty.R
|
# Bacamo dvije kocke. Koja je vjerojatnost da je njihova suma veća ili jednaka 7?
# P = 21/36 da ce biti >= 7
outcomes <- c(0, 1)
probabilities <- c(15/36, 21/36)
set.seed(2)
smpl_size = 100000
out <- sample(outcomes, prob = probabilities, size = smpl_size, replace = TRUE)
sum(out)
out.table <- table(out)
out.table
# 21/36 = 0.58333...
out.table['1']/smpl_size * 100
# rez je 0.58083...
|
a16b0f614b74ed75c480abcafb1df19bee12d77c
|
9d72922f56b6367d445516229ae1ee55c0eab1b2
|
/R/bad.R
|
6c4770a7695de4ed14a0e14327c3150927b96b31
|
[] |
no_license
|
mdodrill-usgs/fishR
|
088c91e7a6615a444bd0cadd6b07e91255adacc4
|
2a1172286933fd9954fd2bfe21aeee2d2c49b284
|
refs/heads/master
| 2020-03-15T01:20:17.505255
| 2019-10-28T18:47:13
| 2019-10-28T18:47:13
| 131,891,525
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,311
|
r
|
bad.R
|
#' @title Returns parameters from a Stan object which are not converged
#'
#' @description Returns the parameters from a Stan object which are not
#' converged, based on Rhat.
#'
#' @param fit Model object from Stan
#' @param r.hat.level Argument controling the level for Rhat. Defaults to 1.2
#' @param plot Display trace plots of these parameters. (not working)
#'
#' @examples
#' \dontrun{
#' library(rstan)
#' scode <- "
#' parameters {
#' real y[2];
#' }
#' model {
#' y[1] ~ normal(0, 1);
#' y[2] ~ double_exponential(0, 2);
#' }
#' "
#' fit <- stan(model_code = scode, iter = 10, verbose = FALSE)
#' bad(fit)
#' }
#' @author Michael J. Dodrill, \email{mdodrill@usgs.gov}
#' @export
bad = function(fit, r.hat.level = 1.2, plot = FALSE){
if(any(class(fit) == "stanfit")){
Rhat = rstan::summary(fit)$summary[,"Rhat"]
bad = Rhat[which(Rhat > r.hat.level)]
b1 = length(bad)
bad.per = round(b1 / length(Rhat), 3)
message(c(print(b1)," parms < ", r.hat.level, " Rhat, ", bad.per, " % Overall"))
# if(plot == TURE){
# stan_trace(fit = fit, par.name = )
# }
# this is still returning something - not sure why
if(length(bad) == 0){
return(invisible())
} else {
return(bad)
}
} else {
message("This only works for Stan objects!")
}
}
|
b77298ea679d64b5306effd9c71fc1121aa1f5fb
|
67e58cd3324dbdb981002e680488e5aa88266111
|
/STAT_604/Homework/jblubau1_hw04_script.r
|
a80a12b70ec8b1c5fd8cea01fc734ff41b637122
|
[] |
no_license
|
mauliasavana/Statistics-Masters
|
b8a152870e528cb653dfb921cf1fd53202ecfe78
|
c89c2f36d05d5936404d5f460b1a2bdb01a93f3a
|
refs/heads/master
| 2021-08-27T17:18:16.994407
| 2017-05-13T12:09:28
| 2017-05-13T12:09:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,750
|
r
|
jblubau1_hw04_script.r
|
# name: jblubau1_hw04_script.r
# path: ~/Projects/learning/Statistics/STAT_604/Homework
# created by: Joseph Blubaugh
# created on: 6 Sept 2016
# purpose: Homework 04
# last ran:
Sys.time()
# 1) House Keeping
ls()
rm(list = ls())
# 2) Direct output to a file
sink(file <- "/home/jeston/Projects/learning/Statistics/STAT_604/Homework/jblubau1_hw04_output.txt", split = TRUE)
# 3) Sequence 4 - 100 by 4
(x1 <- seq(4, 100, 4))
mode(x1)
# 4) Create numeric vector .8 - 40 by .8
(x2 <- seq(.8, 40, .8))
mode(x2)
# 5) Create a 5 column matrix
matrix(x2, ncol = 5, byrow = FALSE)
# 6) combine two vectors: since x1 and x2 are multiples of each other
# x1 is recycled to fill in the empty spaces next to x2
(x3 <- cbind(x1, x2))
mode(x3)
# 7) combine vectors as rows
(x4 <- rbind(x1, x2))
# 8)
## a) show contents
ls()
## b) load data set
load("/home/jeston/Projects/learning/Statistics/STAT_604/Data/HW04.RData")
## c) show contents
ls()
# 9) Display structure of loaded object
str(Oklahoma)
# 10) Display summary of object
summary(Oklahoma)
# 11) Compute average of HSTotal
mean(na.omit(Oklahoma$HSTotal))
# 12) Perform logical test
is.na(Oklahoma$HSTotal) == FALSE &
Oklahoma$HSTotal < mean(na.omit(Oklahoma$HSTotal))
# 13) Return only School, City, HSTotal
Oklahoma[is.na(Oklahoma$HSTotal) == FALSE &
Oklahoma$HSTotal < mean(na.omit(Oklahoma$HSTotal)),
c(1,2,15)]
# 14) Use apply to summarise average class size for grade 7 - 12
apply(X = Oklahoma[, 6:11], MARGIN = 2, FUN = "mean", na.rm = TRUE)
# 15) Use apply to create average class size
Oklahoma$AvgClassSize <- apply(Oklahoma[, 6:11], MARGIN = 1, FUN = "mean", na.rm = TRUE)
# 16) Display first 25 rows
head(Oklahoma, 25)
# 17) Stop output
sink()
|
331d63b6beb7ce406b26ae4665396bc33da5c48c
|
b4d9e8cdef90f26ead6ee0d3d4ce7dad1070687a
|
/plot1.R
|
c255b4bc986ee325b0bacc11cbf049acc3368425
|
[] |
no_license
|
animusmoth/ExData_Plotting1
|
fbb4eef4a3ce10bfe97708599529eeb361954ed2
|
4ff5c823d8e2fe8cc2e99ccd5372e7516da8488b
|
refs/heads/master
| 2021-01-18T07:57:11.896534
| 2015-02-08T23:55:36
| 2015-02-08T23:55:36
| 30,510,638
| 0
| 0
| null | 2015-02-08T23:54:35
| 2015-02-08T23:54:35
| null |
UTF-8
|
R
| false
| false
| 713
|
r
|
plot1.R
|
fileName = "./datafile.zip"
if (!file.exists(fileName)) {
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = fileName, method="curl")
}
# unzip the file
unzip("datafile.zip")
# Load the file
fl <- read.table(file="household_power_consumption.txt", header=TRUE, sep=";", na.strings="?")
# subset for Date
sub <- fl[fl$Date=="1/2/2007"|fl$Date=="2/2/2007",]
# remove original
rm(fl)
# convert to Double
sub$Global_active_power <- as.double(sub$Global_active_power)
with(sub,hist(sub$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", col="red"))
dev.copy(png, file="plot1.png")
dev.off()
|
3e064870a45c0d8d7202aad5895bf7d30a96e72a
|
514a5ea21c7744c1bb92ee95a797b4bad403de1a
|
/package/patientProfilesVis/man/patientProfilesVis-palette.Rd
|
360a18b2d30abb3cc00e2c969637d984c2a5f664
|
[] |
no_license
|
Lion666/patientProfilesVis
|
b99d49cb4d2ce92026169e9a3bdadba90902c12c
|
798609c581a88703a43769cb972a66d3138cdae4
|
refs/heads/master
| 2023-08-03T22:02:57.033220
| 2021-09-28T09:59:55
| 2021-09-28T10:00:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 553
|
rd
|
patientProfilesVis-palette.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/palettes.R
\name{patientProfilesVis-palette}
\alias{patientProfilesVis-palette}
\title{Parameters for all patient profiles visualization palette functions.}
\arguments{
\item{includeNA}{Logical (TRUE by default),
should NA elements be retained in the palette in case
\code{x} is specified?}
}
\value{
No return value, used for the documentation of
the palette functions of the package.
}
\description{
Parameters for all patient profiles visualization palette functions.
}
|
76e325156278f1194848fa19562cb26f3d155667
|
bdda050a3713f1ac5b996c2f5c600daecbad920b
|
/man/getExperimentSampleFeatures.Rd
|
4831b0b2497faee272e686f6f033db8a959aed82
|
[
"MIT"
] |
permissive
|
astrolabediagnostics/orloj
|
dd466acb293447b33426a22766770323b50fee99
|
f24b7ef7710e4ba3adaf0d615238bfcd8fe46380
|
refs/heads/master
| 2021-06-04T16:20:04.772433
| 2021-05-19T14:14:23
| 2021-05-19T14:14:23
| 110,556,030
| 5
| 3
|
MIT
| 2020-08-09T14:17:33
| 2017-11-13T14:07:22
|
R
|
UTF-8
|
R
| false
| true
| 502
|
rd
|
getExperimentSampleFeatures.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/experiment.R
\name{getExperimentSampleFeatures}
\alias{getExperimentSampleFeatures}
\title{Get experiment sample features.}
\usage{
getExperimentSampleFeatures(experiment)
}
\arguments{
\item{experiment}{An Astrolabe experiment.}
}
\value{
Experiment sample features.
}
\description{
Get the sample features for a given Astrolabe experiment. Feature names are
converted from internal Astrolabe IDs to user-readable names.
}
|
3ef2ed11a03637fe935ac82151859d2e1927cffa
|
9800746efbf5779c5178e122a97caedad2157263
|
/amps02dataIn.R
|
58a5be4723339ec880d0cb93009ef42cf59e06ea
|
[] |
no_license
|
hanspeter6/amps_2002
|
63518f7efcc9fc570478437ab423f0432d6219bd
|
b6ca9481cf253aa70c81b1b486075afd408c85ad
|
refs/heads/master
| 2021-04-29T22:52:18.207683
| 2018-09-02T14:00:45
| 2018-09-02T14:00:45
| 121,646,553
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,267
|
r
|
amps02dataIn.R
|
# libraries
library(stringr)
library(tidyverse)
library(caret)
print_02 <- read.csv("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/amps_2002/csv/amps-2002-newspaper-magazine-readership-v1.1.csv")
electr_02 <- read.csv("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/amps_2002/csv/amps-2002-electronic-media-v1.1.csv")
internet_02 <- read.csv("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/amps_2002/csv/amps-2002-internet-v1.1.csv")
demogrs_02 <- read.csv("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/amps_2002/csv/amps-2002-demographics-v1.1.csv")
personal_02 <- read.csv("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/amps_2002/csv/amps-2002-personal-v1.1.csv")
lsm_02 <- read.csv("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/amps_2002/csv/amps-2002-lsm-v1.1.csv")
lifestage_02 <- read.csv("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/amps_2002/csv/amps-2002-lifestage-v1.1.csv")
# not got attitudes in 2002
#
save(print_02, electr_02, internet_02, demogrs_02, personal_02, lsm_02, lifestage_02, file = "input_02.RData")
load("input_02.RData")
#
print_02_labels <- readLines("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/amps_2002/csv/metadata/variable_labels/amps-2002-newspaper-magazine-readership-v1.1_variable_labels.txt")
electr_02_labels <- readLines("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/amps_2002/csv/metadata/variable_labels/amps-2002-electronic-media-v1.1_variable_labels.txt")
internet_02_labels <- readLines("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/amps_2002/csv/metadata/variable_labels/amps-2002-internet-v1.1_variable_labels.txt")
demogrs_02_labels <- readLines("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/amps_2002/csv/metadata/variable_labels/amps-2002-demographics-v1.1_variable_labels.txt")
personal_02_labels <- readLines("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/amps_2002/csv/metadata/variable_labels/amps-2002-personal-v1.1_variable_labels.txt")
lsm_02_labels <- readLines("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/amps_2002/csv/metadata/variable_labels/amps-2002-lsm-v1.1_variable_labels.txt")
lifestage_02_labels <- readLines("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/amps_2002/csv/metadata/variable_labels/amps-2002-lifestage-v1.1_variable_labels.txt")
# not got attitudes in 2002
#
save(print_02_labels, electr_02_labels, internet_02_labels, demogrs_02_labels, personal_02_labels, lsm_02_labels, lifestage_02_labels, file = "labels_02.RData")
load("labels_02.RData")
## 1st Print (newspapers and magazines) Media Set
## ISSUES
names_issues_print_02 <- str_subset(print_02_labels, 'Number of different issues usually read or page through') %>%
str_replace('.+\\s-', '') %>%
str_trim()
vars_issues_print_02 <- str_subset(print_02_labels, 'Number of different issues usually read or page through') %>%
str_replace('Number\\sof\\sdifferent.+', '') %>%
str_trim()
##Newspapers
# # fix names and get rid of some and save
# names_newspapers_02_issues <- names_issues_print_02[c(1:39,77)]
# fix(names_newspapers_02_issues)
# saveRDS(names_newspapers_02_issues, "names_newspapers_02_issues.rds")
names_newspapers_02_issues <- readRDS("names_newspapers_02_issues.rds")
# vector of variables
vars_newspapers_02_issues <- vars_issues_print_02[c(1:39,77)]
issues_newspapers_02 <- print_02[,vars_newspapers_02_issues]
# Magazines
# # fix names and get rid of some (including MNet guides and save
# names_magazines_02_issues <- names_issues_print_02[c(78:84,87:99,103:106,109:129, 131:146, 148:157)]
# fix(names_magazines_02_issues)
# saveRDS(names_magazines_02_issues, "names_magazines_02_issues.rds")
names_magazines_02_issues <- readRDS("names_magazines_02_issues.rds")
# vector of variables
vars_magazines_02_issues <- vars_issues_print_02[c(78:84,87:99,103:106,109:129, 131:146, 148:157)]
issues_magazines_02 <- print_02[,vars_magazines_02_issues]
## THOUROUGHLY
names_thorough_print_02 <- str_subset(print_02_labels, 'How thoroughly respondent usually read') %>%
str_replace('.+\\s-', '') %>%
str_replace("\\'",'') %>%
str_trim()
# # three (161 vs 158) more than in issues need to id them and possibly remove
# which(!names_thorough_print_02 %in% names_issues_print_02)
#154, 156, 158 (GQ , Y Mag and De Kat)
names_thorough_print_02 <- names_thorough_print_02[-c(154,156,158)]
names_thorough_print_02 <- names_issues_print_02 # now they the same
vars_thorough_print_02 <- str_subset(print_02_labels, 'How thoroughly respondent usually read') %>%
str_replace('How\\sthoroughly.+', '') %>%
str_trim()
vars_thorough_print_02 <- vars_thorough_print_02[-c(154,156,158)]
##Newspapers
# get names and get rid of some and save (already sorted above)
# names_newspapers_02_thorough <- names_thorough_print_02[c(1:39,77)]
# fix(names_newspapers_02_thorough)
# saveRDS(names_newspapers_02_issues, "names_newspapers_02_issues.rds")
# vector of variables
vars_newspapers_02_thorough <- vars_thorough_print_02[c(1:39,77)]
thorough_newspapers_02 <- print_02[,vars_newspapers_02_thorough]
thorough_newspapers_02 <- 7 - thorough_newspapers_02
# Magazines
# fix names and get rid of some and save
# names_magazines_02_thorough <- names_thorough_print_02[c(77:99,103:107,109:157)]
# fix(names_magazines_02_issues)
# saveRDS(names_magazines_02_issues, "names_magazines_02_issues.rds")
# vector of variables
vars_magazines_02_thorough <- vars_thorough_print_02[c(78:84,87:99,103:106,109:129, 131:146, 148:157)]
thorough_magazines_02 <- print_02[,vars_magazines_02_thorough]
# # need to reverse numbering to serve as weights (see value_lables text file):
thorough_magazines_02 <- 7 - thorough_magazines_02
# create datasets ...for newspapers and magazines:
newspapers_engagement_02_all <- issues_newspapers_02 * thorough_newspapers_02
names(newspapers_engagement_02_all) <- names_newspapers_02_issues
magazines_engagement_02_all <- issues_magazines_02 * thorough_magazines_02
names(magazines_engagement_02_all) <- names_magazines_02_issues
newspapers_engagement_02_simple_all <- issues_newspapers_02
names(newspapers_engagement_02_simple_all) <- names_newspapers_02_issues
magazines_engagement_02_simple_all <- issues_magazines_02
names(magazines_engagement_02_simple_all) <- names_magazines_02_issues
# # # replace NAs with zeros
newspapers_engagement_02_all[is.na(newspapers_engagement_02_all)] <- 0
magazines_engagement_02_all[is.na(magazines_engagement_02_all)] <- 0
newspapers_engagement_02_simple_all[is.na(newspapers_engagement_02_simple_all)] <- 0
magazines_engagement_02_simple_all[is.na(magazines_engagement_02_simple_all)] <- 0
# save (alls)
saveRDS(newspapers_engagement_02_all, "newspapers_engagement_02_all.rds")
saveRDS(magazines_engagement_02_all, "magazines_engagement_02_all.rds")
saveRDS(newspapers_engagement_02_simple_all, "newspapers_engagement_02_simple_all.rds")
saveRDS(magazines_engagement_02_simple_all, "magazines_engagement_02_simple_all.rds")
# CLEAN UP
# for newspapers: include "Herald on Sat" as "other.news"
other.news <- newspapers_engagement_02_all[,22]
newspapers_engagement_02 <- newspapers_engagement_02_all %>%
mutate(other.news = other.news)
newspapers_engagement_02 <- newspapers_engagement_02[,-c(22)]
other.news_simple <- newspapers_engagement_02_simple_all[,22]
newspapers_engagement_02_simple <- newspapers_engagement_02_simple_all %>%
mutate(other.news = other.news_simple)
newspapers_engagement_02_simple <- newspapers_engagement_02_simple[,-c(22)]
# for magazines - deal with it in vehicle_cleaning project
magazines_engagement_02 <- readRDS("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/vehicle_cleaning/magazines_engagement_02.rds")
magazines_engagement_02_simple <- readRDS("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/vehicle_cleaning/magazines_engagement_02_simple.rds")
# save them in this project
saveRDS(newspapers_engagement_02, "newspapers_engagement_02.rds")
saveRDS(magazines_engagement_02, "magazines_engagement_02.rds")
saveRDS(newspapers_engagement_02_simple, "newspapers_engagement_02_simple.rds")
saveRDS(magazines_engagement_02_simple, "magazines_engagement_02_simple.rds")
magazines_engagement_02 <- readRDS("magazines_engagement_02.rds")
newspapers_engagement_02 <- readRDS("newspapers_engagement_02.rds")
magazines_engagement_02_simple <- readRDS("magazines_engagement_02_simple.rds")
newspapers_engagement_02_simple <- readRDS("newspapers_engagement_02_simple.rds")
## 2nd Electronic Media Set
# RADIO
# # creating a names vector
# names_radio_02 <- electr_02_labels %>%
# str_subset('.+Listened.+4\\sweeks') %>%
# str_replace('.+Listened\\sto\\s','') %>%
# str_replace('\\sin\\sthe\\spast.+','')
#
# # get rid of first: "any Radio services via a satellite transmission"
# names_radio_02 <- names_radio_02[-1]
#
# # also get rid of the following summaries:
# # Total Radio (Any Radio) [22]
# # SABC African Language Services [23]
# # Total Community [24]
# # Limpopo Corridor [32]
# names_radio_02 <- names_radio_02[-c(22,23,24,32)]
#
# # fix(names_radio_02)
#
# saveRDS(names_radio_02, "names_radio_02.rds")
# fix(names_radio_02)
names_radio_02 <- readRDS('names_radio_02.rds')
# get data...
radio4weeks_02 <- electr_02[,str_detect(names(electr_02), 'ca54co\\d{2}_\\d')]
radio4weeks_02 <- radio4weeks_02[,-c(22,23,24,32)] # get rid of list described above
radio7days_02 <- electr_02[,str_detect(names(electr_02), 'ca50co\\d{2}_\\d')]
radio7days_02 <- radio7days_02[,-c(22,23,24,32)] # get rid of list described above
radioYesterday_02 <- electr_02[,str_detect(names(electr_02), 'ca53co\\d{2}_\\d')]
radioYesterday_02 <- radioYesterday_02[,-c(22,23,24,32)] # get rid of "unsure" and "none"
## checking to see if same stations in 3 levels
# first extract all the variable colnames
colnames_4weeks_02 <- names(radio4weeks_02)
colnames_7days_02 <- names(radio7days_02)
colnames_yesterday_02 <- names(radioYesterday_02)
# yes
# creating engagement set:
radio_engagement_02_all <- radio4weeks_02 + radio7days_02 + radioYesterday_02
names(radio_engagement_02_all) <- names_radio_02
saveRDS(radio_engagement_02_all, "radio_engagement_02_all.rds")
radio_engagement_02_all <- readRDS("radio_engagement_02_all.rds")
# AFTER CLEANING (see vehicle cleaning project)
radio_engagement_02 <- readRDS("/Users/HansPeter/Dropbox/Statistics/UCTDataScience/Thesis/vehicle_cleaning/radio_engagement_02.rds")
# save in this space
saveRDS(radio_engagement_02, "radio_engagement_02.rds")
radio_engagement_02 <- readRDS("radio_engagement_02.rds")
# ## TV
# names_tv_02 <- electr_02_labels %>%
# str_subset('watched.+4\\sweeks') %>%
# str_replace('.+watched\\s','') %>%
# str_replace('in\\sthe\\spast\\s4\\sweeks','') %>%
# str_trim()
#
# # cut total tv, unsure, no tv, and mnet stuff
# names_tv_02 <- names_tv_02[-c(3:4,10:12)]
# saveRDS(names_tv_02, "names_tv_02.rds")
names_tv_02 <- readRDS("names_tv_02.rds")
# fix(names_tv_02)
# want to isolate only past 4 weeks
tv4weeks_02 <- electr_02[,c('ca38co9_1',
'ca38co9_2',
'ca38co9_5',
'ca38co9_6',
'ca38co9_7',
'ca38co17_4',
'ca38co17_5'
)]
# combine Bop and Other:
one_4 <- rowSums(tv4weeks_02[,c(1,6)])
two_4 <- ifelse(one_4 == 2, 1, one_4)
tv4weeks_02 <- tv4weeks_02 %>%
select(-ca38co9_1, -ca38co17_4) %>%
mutate(other = two_4)
# want to isolate only past 7 days...
tv7days_02 <- electr_02[,c('ca38co19_1',
'ca38co19_2',
'ca38co19_5',
'ca38co19_6',
'ca38co19_7',
'ca38co27_4',
'ca38co27_5'
)]
# combine Bop and Other:
one_7 <- rowSums(tv7days_02[,c(1,6)])
two_7 <- ifelse(one_7 == 2, 1, one_7)
tv7days_02 <- tv7days_02 %>%
select(-ca38co19_1, -ca38co27_4) %>%
mutate(other = two_7)
# want to isolate only yesterday...(indexes w.r.t 4weeks that are missing here: 7, 10)
tvYesterday_02 <- electr_02[,c('ca38co29_1',
'ca38co29_2',
'ca38co29_5',
'ca38co29_6',
'ca38co29_7',
'ca38co37_4',
'ca38co37_5'
)]
# combine Bop and Other:
one_y <- rowSums(tvYesterday_02[,c(1,6)])
two_y <- ifelse(one_y == 2, 1, one_y)
tvYesterday_02 <- tvYesterday_02 %>%
select(-ca38co29_1, -ca38co37_4) %>%
mutate(other = two_y)
# combining into a tv engagement dataset (using tv4weeks_02 as basis):
tv_engagement_02 <- tv4weeks_02 + tv7days_02+ tvYesterday_02
names(tv_engagement_02) <- names_tv_02
saveRDS(tv_engagement_02, "tv_engagement_02.rds")
tv_engagement_02 <- readRDS("tv_engagement_02.rds")
## 3rd Internet Media Set
## accessed: sum of 4weeks, 7days and yesterday
internet_level1 <- internet_02[,str_detect(names(internet_02), 'ca38co(40)|(45)|(52)')]
#change all 2 = "No" and NA's' to 0
internet_level1[internet_level1 == 2 | is.na(internet_level1)] <- 0
internet_level1 <- rowSums(internet_level1)
# what internet was accessed for...
## (maybe could use similar to vehicles?? as well as add up and multiply with first eng):
internet_level2 <- internet_02[,str_detect(names(internet_02),
'ca38co(41_3)|(41_4)|(41_6)|(41_7)|(41_8)|(41_9)|(42_0)|(42_1)|(42_2)|(42_3)|(42_4)|(42_5)|(42_6)')]
# want to add "infos together to give int_search
int_search <- transmute(internet_level2[,5:13], rowSums(internet_level2[,5:13]))
internet_level2 <- internet_level2[,1:4] %>%
mutate(int_search = int_search[,1])
names(internet_level2) <- c('int_print',
'int_radio',
'int_news',
'int_social',
'int_search')
## create single dataframe for internet multiplying internet_level1 with sum of internet_level2:
internet_engagement_02 <- internet_level2 * internet_level1
internet_engagement_02_simple <- internet_level1
saveRDS(internet_engagement_02, "internet_engagement_02.rds")
saveRDS(internet_engagement_02_simple, "internet_engagement_02_simple.rds")
internet_engagement_02 <- readRDS("internet_engagement_02.rds")
internet_engagement_02_simple <- readRDS("internet_engagement_02_simple.rds")
## create single dataframe for media02, including total_engagement columns)
# Level 1: Type
media_type_02 <- data.frame(cbind(qn = print_02$qn,
rowSums(newspapers_engagement_02),
rowSums(magazines_engagement_02),
rowSums(radio_engagement_02),
rowSums(tv_engagement_02),
rowSums(internet_engagement_02)))
names(media_type_02) <- c("qn",
"newspapers",
"magazines",
"radio",
"tv",
"internet")
media_type_02 <- media_type_02 %>%
mutate(all = as.vector(newspapers + magazines + radio + tv + internet))
media_type_02_simple <- data.frame(cbind(qn = print_02$qn,
rowSums(newspapers_engagement_02_simple),
rowSums(magazines_engagement_02_simple),
rowSums(radio_engagement_02),
rowSums(tv_engagement_02),
internet_engagement_02_simple))
names(media_type_02_simple) <- c("qn",
"newspapers",
"magazines",
"radio",
"tv",
"internet")
media_type_02_simple <- media_type_02_simple %>%
mutate(all = as.vector(newspapers + magazines + radio + tv + internet))
media_type_02_simple_print <- data.frame(cbind(qn = print_02$qn,
rowSums(newspapers_engagement_02_simple),
rowSums(magazines_engagement_02_simple),
rowSums(radio_engagement_02),
rowSums(tv_engagement_02),
rowSums(internet_engagement_02)))
names(media_type_02_simple_print) <- c("qn",
"newspapers",
"magazines",
"radio",
"tv",
"internet")
media_type_02_simple_print <- media_type_02_simple_print %>%
mutate(all = as.vector(newspapers + magazines + radio + tv + internet))
# Level 2: Vehicles
media_vehicles_02 <- data.frame(cbind(qn = print_02$qn,
newspapers_engagement_02,
magazines_engagement_02,
radio_engagement_02,
tv_engagement_02,
internet_engagement_02))
media_vehicles_02_simple <- data.frame(cbind(qn = print_02$qn,
newspapers_engagement_02_simple,
magazines_engagement_02_simple,
radio_engagement_02,
tv_engagement_02,
internet_eng = internet_engagement_02_simple))
media_vehicles_02_simple_print <- data.frame(cbind(qn = print_02$qn,
newspapers_engagement_02_simple,
magazines_engagement_02_simple,
radio_engagement_02,
tv_engagement_02,
internet_eng = internet_engagement_02))
saveRDS(media_type_02, 'media_type_02.rds')
saveRDS(media_type_02_simple, 'media_type_02_simple.rds')
saveRDS(media_type_02_simple_print, 'media_type_02_simple_print.rds')
saveRDS(media_vehicles_02, 'media_vehicles_02.rds')
saveRDS(media_vehicles_02_simple, 'media_vehicles_02_simple.rds')
saveRDS(media_vehicles_02_simple_print, 'media_vehicles_02_simple_print.rds')
media_type_02 <- readRDS('media_type_02.rds')
media_type_02_simple <- readRDS('media_type_02_simple.rds')
media_type_02_simple_print <- readRDS('media_type_02_simple_print.rds')
media_vehicles_02 <- readRDS('media_vehicles_02.rds')
media_vehicles_02_simple <- readRDS('media_vehicles_02_simple.rds')
media_vehicles_02_simple_print <- readRDS('media_vehicles_02_simple_print.rds')
## 4th Demographics Set (see notes for descriptions)
age <- personal_02[,'ca44co38']
age_actual <- personal_02[,'ca44co39'] # actual age..note some 999 = refusal or dont know
age_actual[age_actual == 999] <- NA
sex <- demogrs_02[,'ca46co51a']
# edu_alt <- personal_02[,'ca44co42'] # gives more sensible and additional level
# #ca44co42:
# # 1 No schooling
# # 2 Some primary school
# # 3 Primary school completed
# # 4 Some high school
# # 5 Matric (Grade 12)
# # 6 Artisan's certificate obtained
# # 7 Technikon diploma
# # 8 University degree completed
# # 9 Professional
edu <- demogrs_02[,'ca46co48']
for(i in 1: length(edu)) {
if(edu[i] %in% c(6,7)) {
edu[i] <- edu[i] + 1
}
else if(edu[i] == 8) {
edu[i] <- 6
}
}
hh_inc <- demogrs_02[,'ca46co50']
race <- demogrs_02[,'ca46co51b']
# dataset: 1 = white, 2 = black, 3 = coloured, 4 = indian.
# 2012 dataset: 1 = black, 2 = coloured, 3 = indian, 4 = white
# change 2002 to 2012 codes for consistency: 1 to 4; 2 to 1; 3 to 2 and 4 to 3
race <- ifelse(race == 1, 9, race)
race <- ifelse(race == 2, 6, race)
race <- ifelse(race == 3, 7, race)
race <- ifelse(race == 4, 8, race)
race <- race - 5
province <- demogrs_02[,'ca46co56']
metro1 <- demogrs_02[,'ca46co57']
metro2 <- demogrs_02[,'ca46co58'] + 9
metro <- rowSums(cbind(metro1,
metro2), na.rm = TRUE)
#as in '95 and 2012 need to sort out double count of Soweto....
# seems that all the 19s are the sum of 7 & 12s (ie, Soweto)
# # code as such, ie all 19s are actually 12s (this also eliminates double count in the 7s ( so exlude Soweto)) >NB double check this is same in '95!!!
# check
# collect and code into single metro set:
#0 = no metro
#1 Cape Town
#2 Cape Town Fringe Area
#3 Port Elizabeth/Uitenhage
#4 East London
#5 Durban
#6 Bloemfontein
#7 Greater Johannesburg
#8 Reef
#9 Pretoria
#10 Kimberley
##11 Pietermaritzburg
##12 Vaal
##13 Welkom
metro <- ifelse(metro == 19, 7, metro)
metro <- ifelse(metro == 13, 12, metro)
table(metro) # yes, continue
lang <- demogrs_02[,'ca46co75'] + 1 # change 0 to 1, so add one to all
# change NAs to "other"
lang <- ifelse(is.na(lang), 12, lang) # 12 == other
lifestages <- demogrs_02[,'ca46co77'] # nb different categories from 2012
mar_status <- personal_02[,'ca44co09']
lsm <- lsm_02[,'ca46co64']
lsm <- ifelse(lsm == 0,10,lsm)
lsm_full <- lsm
# no lifestyle or attitudes yet.
demographics_02 <- data.frame(qn = print_02$qn,
pwgt = print_02$pwgt,
age,
age_actual,
sex,
edu,
hh_inc,
race,
province,
metro,
lang,
lifestages,
mar_status,
lsm,
lsm_full)
#reducing levels of categorical variables and setting factor types for demographics:
# age:
demographics_02$age <- ifelse(demographics_02$age %in% c(1,2), 1, demographics_02$age)
demographics_02$age <- ifelse(demographics_02$age %in% c(3,4), 2, demographics_02$age)
demographics_02$age <- ifelse(demographics_02$age %in% c(5,6), 3, demographics_02$age)
demographics_02$age <- ifelse(demographics_02$age %in% c(7,8), 4, demographics_02$age)
demographics_02$age <- factor(demographics_02$age, ordered = TRUE)
# sex:
demographics_02$sex <- factor(demographics_02$sex, ordered = FALSE)
#edu:
demographics_02$edu <- ifelse(demographics_02$edu %in% c(1,2,3,4), 1, demographics_02$edu)
demographics_02$edu <- ifelse(demographics_02$edu %in% c(5), 2, demographics_02$edu)
demographics_02$edu <- ifelse(demographics_02$edu %in% c(6,7,8), 3, demographics_02$edu)
demographics_02$edu <- factor(demographics_02$edu, ordered = TRUE)
#hh_inc
demographics_02$hh_inc <- ifelse(demographics_02$hh_inc %in% c(1,2,3,4), 1, demographics_02$hh_inc)
demographics_02$hh_inc <- ifelse(demographics_02$hh_inc %in% c(5,6), 2, demographics_02$hh_inc)
demographics_02$hh_inc <- ifelse(demographics_02$hh_inc %in% c(7), 3, demographics_02$hh_inc)
demographics_02$hh_inc <- ifelse(demographics_02$hh_inc %in% c(8), 4, demographics_02$hh_inc)
demographics_02$hh_inc <- factor(demographics_02$hh_inc, ordered = TRUE)
demographics_02$race <- factor(demographics_02$race, ordered = FALSE)
demographics_02$province <- factor(demographics_02$province, ordered = FALSE)
demographics_02$metro <- factor(demographics_02$metro, ordered = FALSE)
demographics_02$lang <- factor(demographics_02$lang, ordered = FALSE)
demographics_02$lifestages <- factor(demographics_02$lifestages, ordered = FALSE)
demographics_02$mar_status <- factor(demographics_02$mar_status, ordered = FALSE)
# demographics_02$pers_inc <- factor(demographics_02$pers_inc, ordered = TRUE)
# lsm
demographics_02$lsm <- ifelse(demographics_02$lsm %in% c(1,2), 1, demographics_02$lsm)
demographics_02$lsm <- ifelse(demographics_02$lsm %in% c(3,4), 2, demographics_02$lsm)
demographics_02$lsm <- ifelse(demographics_02$lsm %in% c(5,6), 3, demographics_02$lsm)
demographics_02$lsm <- ifelse(demographics_02$lsm %in% c(7,8), 4, demographics_02$lsm)
demographics_02$lsm <- ifelse(demographics_02$lsm %in% c(9,10), 5, demographics_02$lsm)
demographics_02$lsm <- factor(demographics_02$lsm, ordered = TRUE)
demographics_02$lsm_full <- factor(demographics_02$lsm_full, ordered = TRUE)
# demographics_02$lifestyle <- factor(demographics_02$lifestyle, ordered = FALSE) # not for 2002 yet
# demographics_02$attitudes <- factor(demographics_02$attitudes, ordered = FALSE) # not for 2002 yet
# save
saveRDS(demographics_02, "demographics_02.rds")
demographics_02 <- readRDS("demographics_02.rds")
# # read datafiles if necessary
# magazines_engagement_02 <- readRDS("magazines_engagement_02.rds")
# magazines_engagement_02_simple <- readRDS("magazines_engagement_02_simple.rds")
#
# newspapers_engagement_02 <- readRDS("newspapers_engagement_02.rds")
# newspapers_engagement_02_simple <- readRDS("newspapers_engagement_02_simple.rds")
#
# radio_engagement_02 <- readRDS("radio_engagement_02.rds")
#
# tv_engagement_02 <- readRDS("tv_engagement_02.rds")
#
# internet_engagement_02 <- readRDS("internet_engagement_02.rds")
# internet_engagement_02_simple <- readRDS("internet_engagement_02_simple.rds")
#
#
# media_type_02 <- readRDS("media_type_02.rds")
# media_type_02_simple <- readRDS("media_type_02_simple.rds")
# media_vehicles_02 <- readRDS("media_vehicles_02.rds")
# media_vehicles_02_simple <- readRDS("media_vehicles_02_simple.rds")
#
# demographics_02 <- readRDS("demographics_02.rds")
# #create single dataset s
set02 <- demographics_02 %>%
left_join(media_type_02) %>%
left_join(media_vehicles_02)
set02_simple <- demographics_02 %>%
left_join(media_type_02_simple) %>%
left_join(media_vehicles_02_simple)
set02_simple_print <- demographics_02 %>%
left_join(media_type_02_simple) %>%
left_join(media_vehicles_02_simple)
# get rid of zero variances:
ind_02 <- nearZeroVar(set02[,16:ncol(set02)], saveMetrics = TRUE)
good_set <- set02[,16:ncol(set02)][,!ind_02$zeroVar]
set02 <- data.frame(cbind(set02[,1:15], good_set))
ind_02_simple <- nearZeroVar(set02_simple[,16:ncol(set02_simple)], saveMetrics = TRUE)
good_set_simple <- set02_simple[,16:ncol(set02_simple)][,!ind_02_simple$zeroVar]
set02_simple <- data.frame(cbind(set02_simple[,1:15], good_set_simple))
ind_02_simple_print <- nearZeroVar(set02_simple_print[,16:ncol(set02_simple_print)], saveMetrics = TRUE)
good_set_simple_print <- set02_simple_print[,16:ncol(set02_simple_print)][,!ind_02_simple_print$zeroVar]
set02_simple_print <- data.frame(cbind(set02_simple_print[,1:15], good_set_simple_print))
# save them:
saveRDS(set02, "set02.rds") # using all data available
saveRDS(set02_simple, "set02_simple.rds") # using only "issues" for print and single value for internet
saveRDS(set02_simple_print, "set02_simple_print.rds") # using only "issues" for print. all values for internet
|
e0cffaf07d4c22f890094c65732f2403c2bec682
|
7c6f08119b1f6e9a87b5963c136d61857bdd8b7b
|
/12.EDA 1/pm_analysis/pm_analysis.R
|
c722760eaecbac02e67e280cf988a4d9eda6fb90
|
[] |
no_license
|
lherbeur/R-Labs
|
4d508708a658e9fff90d899bc41e80e03e34658a
|
de5cfa5b2d3df0ed3e02c0d53908460c8252dcd3
|
refs/heads/master
| 2020-03-18T04:43:31.772610
| 2018-05-21T19:01:42
| 2018-05-21T19:01:42
| 99,908,537
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,681
|
r
|
pm_analysis.R
|
# data source - https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip
library(tidyverse)
library(dplyr)
pm_data <- read_rds('C://Users/Lherbeur/Documents/Projects/R/12.EDA 1/pm_analysis/exdata_data_NEI_data/summarySCC_PM25.rds')
pm_data <- as.tibble(pm_data)
pm_data_classification <- read_rds('C://Users/Lherbeur/Documents/Projects/R/12.EDA 1/pm_analysis/exdata_data_NEI_data/Source_Classification_Code.rds')
pm_data_classification <- as.tibble(pm_data_classification)
# pm_data$year <- as.Date(as.character(pm_data$year), "%Y")
head(pm_data)
data_1999 <- pm_data[pm_data$year == 1999,]
data_2002 <- pm_data[pm_data$year == 2002,]
data_2005 <- pm_data[pm_data$year == 2005,]
data_2008 <- pm_data[pm_data$year == 2008,]
total_emissions_1999 <- sum(data_1999$Emissions, na.rm = T)
total_emissions_2008 <- sum(data_2008$Emissions, na.rm = T)
# since 1999 total emission is greater than 2008's, total emissions decreased
rng <- range(data_1999$Emissions, data_2002$Emissions)
boxplot(data_1999$Emissions, data_2002$Emissions, ylim = rng)
# data_2005$Emissions, data_2008$Emissions)
year_sum <- pm_data %>%
group_by(year) %>%
summarise(total = sum(Emissions, na.rm = T))
ggplot(data = year_sum, aes(x = year, y = total)) +
geom_point() +
geom_line()+
labs(x = "Year", y = "Total Emissions", title = "Total emissions for the US")
# baltimore - decreased, as well. tho, went up and then came down
data_baltimore <- pm_data[pm_data$fips == "24510",]
data_baltimore_total <- data_baltimore %>%
group_by(year) %>%
summarise(total = sum(Emissions, na.rm = T))
ggplot(data = data_baltimore_total, aes(x = year, y = total)) +
geom_point() +
geom_line() +
labs(x = "Year", y = "Total Emissions", title = "Total emissions for Baltimore City")
# 3.
# All have seen consistent decreases; however, d point type saw an increase
# and then a decrease
pm_data$type <- as.factor(pm_data$type)
data_baltimore_total <- data_baltimore %>%
group_by(year, type) %>%
summarise(total = sum(Emissions, na.rm = T))
p <- ggplot(data = data_baltimore_total, aes(x = year, y = total)) + geom_line() + geom_point()
p + facet_wrap(~type) +
labs(title = "Total emissions for Baltimore based on Type")
# coal combustion related sources
full_data <- merge(pm_data, pm_data_classification) #both av the SCC, so that's d default
coal_data <- full_data[grepl("coal", full_data$Short.Name), c('Emissions', 'year')]
coal_data_grouped <- coal_data %>%
group_by(year) %>%
summarise(total = sum(Emissions, na.rm = T))
ggplot(data = coal_data_grouped, aes(x = year, y = total)) + geom_line() + geom_point() +
labs(x = "Year", y = "Total Emissions", title = "Total emissions from Coal combustion")
# motor vehicle...baltimore
veh_baltimore_data <- full_data[grepl("Veh", full_data$Short.Name) & full_data$fips == "24510",
c('Emissions', 'year')]
veh_baltimore_data_grouped <- veh_baltimore_data %>%
group_by(year) %>%
summarise(total = sum(Emissions, na.rm = T))
ggplot(data = veh_baltimore_data_grouped, aes(x = year, y = total)) + geom_line() + geom_point() +
labs(x = "Year", y = "Total Emissions", title = "Total emissions from Vehicles in Baltimore City")
# motor california
veh_california_data <- full_data[grepl("Veh", full_data$Short.Name) & full_data$fips == "06037",
c('Emissions', 'year')]
veh_california_data_grouped <- veh_california_data %>%
group_by(year) %>%
summarise(total = sum(Emissions, na.rm = T))
names(veh_baltimore_data_grouped) <- paste("baltimore", names(veh_baltimore_data_grouped), sep= ".")
names(veh_california_data_grouped) <- paste("california", names(veh_california_data_grouped), sep= ".")
merged_data <- data.frame(veh_baltimore_data_grouped, veh_california_data_grouped)
ggplot(data = merged_data) +
geom_point(aes(x = baltimore.year, y = baltimore.total), colour = "blue") +
geom_line(aes(x = baltimore.year, y = baltimore.total), colour = "blue") +
geom_point(aes(x = california.year, y = california.total), colour = "red") +
geom_line(aes(x = california.year, y = california.total), colour = "red") +
labs(x = "Year", y = "Total Emissions", title = "Total emissions from Vehicles in Baltimore City and California")
# # adding legends
# scale_colour_manual( values=c(California="red", Baltimore="blue"))
# legend("bottomleft", c('Baltimore', 'California'), col = c ("blue", "red"))
# sum_for_years <- tapply(pm_data$Emissions, pm_data$year, sum)
# sum_for_years
# years <- names(sum_for_years)
# sum <- sum_for_years[1]
# sum_for_years[[1]]
# x <- as.list(sum_for_years[1])
# names(x)
|
a45f37126717ff3b9343365020632651bec850a1
|
aee2c11aff6bd0874a03fbd16f852ad785efe5ba
|
/R/HalmScore.R
|
a36d84d9debeb7b08c5aed79eace68c5c8093df4
|
[
"MIT"
] |
permissive
|
maciejrosolowski/progressdatenbankderivate
|
bca72eadf47ba2dcffeed80cc120f25458f13581
|
1a2e31ed7e62970a0206883173d32e14d888563d
|
refs/heads/master
| 2021-02-18T18:34:28.100509
| 2020-08-02T13:07:09
| 2020-08-02T13:07:09
| 245,223,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,615
|
r
|
HalmScore.R
|
#' Compute the Halm score.
#'
#' @param FRM_B24 data.table containing the table with the same name from
#' the database of the PROGRESS study
#' @param FRM_BEF data.table containing the table with the same name from
#' the database of the PROGRESS study
#' @param FRM_RR data.table containing the table with the same name from
#' the database of the PROGRESS study
#' @param FRM_O2A data.table containing the table with the same name from
#' the database of the PROGRESS study
#' @param FRM_O2P data.table containing the table with the same name from
#' the database of the PROGRESS study
#' @param FRM_BEAT data.table containing the table with the same name from
#' the database of the PROGRESS study
#' @param DID_CLIN data.table containing the table with the same name from
#' the database of the PROGRESS study
#' @param zp_fabian vector of characters. They must be present in
#' event2zeitpunkt_table$zp_fabianref. Currently, only
#' zp_fabian = "auf_in_d-1_in_d0" is possible.
#'
#' @return a named list with components: input and out. input is a data.table
#' in the wide format (one row per patient), containing the data used for
#' computing the Halm score. out is a data.table with one row
#' corresponding to one patient, identified by the
#' PATSTUID. The column halm contains the value of Halm. The score is non-NA,
#' if more than 50% of its 6 subscores, i.e., at least 4 subscores are non-NA.
#' @export
#'
#' @examples
#' \dontrun{
#' excel_fn <- paste0("/net/ifs1/san_projekte/projekte/",
#' "PROGRESS/Datenmanagement/Data_freezes/",
#' "20190320/PROGRESS-freeze_201903_01.xlsx")
#' FRM_B24 <- readxl::read_excel(excel_fn, 'FRM_B24')
#' FRM_BEF <- readxl::read_excel(excel_fn, 'FRM_BEF')
#' FRM_RR <- readxl::read_excel(excel_fn, 'FRM_RR')
#' FRM_O2A <- readxl::read_excel(excel_fn, 'FRM_O2A')
#' FRM_O2P <- readxl::read_excel(excel_fn, 'FRM_O2P')
#' FRM_BEAT <- readxl::read_excel(excel_fn, 'FRM_BEAT')
#' DID_CLIN <- readxl::read_excel(excel_fn, 'DID_CLIN')
#' data.table::setDT(FRM_B24)
#' data.table::setDT(FRM_BEF)
#' data.table::setDT(FRM_RR)
#' data.table::setDT(FRM_O2A)
#' data.table::setDT(FRM_O2P)
#' data.table::setDT(FRM_BEAT)
#' data.table::setDT(DID_CLIN)
#' erg <- HalmScore(FRM_B24, FRM_BEF, FRM_RR, FRM_O2A, FRM_O2P, FRM_BEAT,
#' DID_CLIN, zp_fabian = "auf_in_d-1_in_d0")
#' erg
#' }
HalmScore <- function (FRM_B24,FRM_BEF, FRM_RR, FRM_O2A, FRM_O2P,FRM_BEAT,
DID_CLIN,zp_fabian="auf_in_d-1_in_d0") {
# due to non-standard evaluation notes in R CMD check
apo2.min_auf <- `apo2.min_d-1` <- apo2.min_d0 <- o2p.min_auf <-
`o2p.min_d-1` <- o2p.min_d0 <- patstuid <- halm <- NULL
#Halm-Score, urspruenglich von Katrin eingebaut am 14. Maerz 2017
if ( !(zp_fabian %in% c("auf_in_d-1_in_d0")) ) {
stop("ERROR: variable zp_fabian must be set to auf_in_d-1_in_d0
It's not possible to calculate it for another time point!")
} # Nach absprache mit peter 4.6.19 auffuellen nicht konsequent
# auf_in_d-1_in_d0, sondern nur dort, wo es katrin gemacht hat,
# um vergleichbarkeit basispaper zu optimieren
toadd_hfrq.min = getData4hfrqMin(FRM_B24,FRM_BEF)
toadd_hfrq.max = getData4hfrqMax(FRM_B24,FRM_BEF)
toadd_sysbp.min = getData4sysbp.min (FRM_RR)
toadd_afrq.max = getData4afrqMax(FRM_B24,FRM_BEF)
toadd_afrq.min =getData4afrqMin(FRM_B24,FRM_BEF)
toadd_apo2.min = getData4apo2.min (FRM_O2A)
toadd_o2p.min = getData4o2p.min (FRM_O2P)
toadd_beat =getData4beat(FRM_BEAT)
toadd_sauerst = getData4sauerst(FRM_O2A , FRM_O2P)
toadd_temp=getData4temp(FRM_BEF, FRM_B24)
toadd_verwirrt = getData4verwirrt(FRM_BEF)
toadd_gcs = getData4gcs (DID_CLIN)
DAT = merge(toadd_hfrq.min, toadd_hfrq.max, by= "patstuid", all = T,sort = F)
DAT = merge(DAT, toadd_sysbp.min, by= "patstuid", all = T,sort = F)
DAT = merge(DAT, toadd_afrq.max, by= "patstuid", all = T,sort = F)
DAT = merge(DAT, toadd_afrq.min, by= "patstuid", all = T,sort = F)
DAT = merge(DAT, toadd_apo2.min, by= "patstuid", all = T,sort = F)
DAT = merge(DAT, toadd_o2p.min, by= "patstuid", all = T,sort = F)
DAT = merge(DAT, toadd_beat, by= "patstuid", all = T,sort = F)
DAT = merge(DAT, toadd_sauerst, by= "patstuid", all = T,sort = F)
DAT = merge(DAT, toadd_temp, by= "patstuid", all = T,sort = F)
DAT = merge(DAT, toadd_verwirrt, by= "patstuid", all = T,sort = F)
DAT = merge(DAT, toadd_gcs, by= "patstuid", all = T,sort = F)
# stopifnot(nrow(DAT[allDuplicatedEntries(patstuid)])==0)
stopifnot(anyDuplicated(DAT[, patstuid]) == 0)
setDF(DAT)
#Herzfrequenz > 100 Schlaege -> ok
hrf<-pmax(DAT$hfrq.min_d0,DAT$hfrq.max_d0,na.rm=T)
myFilt<-is.na(hrf)
dummy<-pmax(DAT$hfrq.min_auf,DAT$hfrq.max_auf,na.rm=T)
hrf[myFilt]<-dummy[myFilt]
HRF.p<-as.numeric(hrf>100)
#systolischer Blutdruck < 90 mmHG -> ok
sbp<-DAT$sysbp.min_d0
myFilt<-is.na(sbp)
sbp[myFilt]<-DAT$sysbp.min_auf[myFilt]
SBP.p<-as.numeric(sbp<90)
#Atemfrequenz > 24/min -> ok
af<-pmax(DAT$afrq.min_d0,DAT$afrq.max_d0,na.rm=T)
myFilt<-is.na(af)
dummy<-pmax(DAT$afrq.min_auf,DAT$afrq.max_auf,na.rm=T)
af[myFilt]<-dummy[myFilt]
AF.p<-as.numeric(af>24)
#Sauerstoffsaettigung ->
setDT(DAT)
minPoxy = DAT[, ifelse(is.na(o2p.min_d0)==F, o2p.min_d0,
ifelse(is.na(`o2p.min_d-1`)==F, `o2p.min_d-1`,
o2p.min_auf))]
minApo2= DAT[,ifelse(is.na(apo2.min_d0)==F, apo2.min_d0,
ifelse(is.na(`apo2.min_d-1`)==F, `apo2.min_d-1`,
apo2.min_auf))]
setDF(DAT)
cutoff<-(minPoxy<90 | (minApo2 < 60))
#Beatmung ? -> kein NA
# beatmet<-DAT$bea.d0
beatmet <- DAT$patbea_d0 # 2020-05-23 MRos
#zusaetzlicher O2 ?
extraO2<-DAT$sauerst_d0
myFilt<-is.na(extraO2)
extraO2[myFilt]<-DAT$sauerst_auf[myFilt]
#Gesamtpunkt
O2.p<-as.numeric(cutoff | beatmet | extraO2)
#Koerpertemperatur > 37.8C -> ok
kt<-pmax(DAT$temp.min_d0,DAT$temp.max_d0,na.rm=T)
myFilt<-is.na(kt)
dummy<-pmax(DAT$temp.min_auf,DAT$temp.max_auf,na.rm=T)
kt[myFilt]<-dummy[myFilt]
KT.p<-as.numeric(kt>37.8)
#Mental Status verwirrt oder Glasgow Coma Scale < 15 -> ok
verwirrt<-DAT$verwirrt_d0
myFilt<-is.na(verwirrt)
verwirrt[myFilt]<-DAT$verwirrt_auf[myFilt]
verwirrt[is.na(verwirrt)]<-0
MS.p<-as.numeric(verwirrt| (DAT$gcs_d0<15))
#Gesamtscore berechnen
# dummy<-cbind(HRF.p,SBP.p,AF.p,O2.p,KT.p,MS.p)
# halm<-apply(dummy,1,function(x) sum(x,na.rm=T))
# res = data.table(halm, dummy)
# res$PATSTUID = DAT$patstuid
# res$EVENT = zeitpunkt2event(zp_fabian)
res <- data.table(PATSTUID = DAT$patstuid,
EVENT = zeitpunkt2event(zp_fabian),
HRF.p, SBP.p, AF.p, O2.p, KT.p, MS.p)
# 50% rule. > 50% of the subscores have to be non-NA for the score
# to be non-NA. 2020-07-01.
res[, halm := ifelse(rowSums(!is.na(.SD)) >= 4,
rowSums(.SD, na.rm = TRUE), NA_integer_),
.SDcols = c("HRF.p", "SBP.p", "AF.p", "O2.p", "KT.p", "MS.p")]
# 2020-03-04 MRos: replace call to moveColFront for no dependency on toolboxH
# res = moveColFront(res,c( "PATSTUID", 'event'))
res <- data.table::setcolorder(res, neworder = c( "PATSTUID", "EVENT"))
erg = c()
erg$input = DAT
erg$input2 = c()
erg$out = res
erg
#completeness of score
# dummy<-cbind(!is.na(HRF.p),!is.na(SBP.p),!is.na(AF.p),!is.na(O2.p),
# !is.na(KT.p),!is.na(MS.p))
# com<-apply(dummy,1,function(x) sum(x)/6)
# sum(com>=0.5)
# sum(com>=0.5)/1532
# sum(com>=0.75)
# sum(com>=0.75)/1532
# sum(com>=0.9)
# sum(com>=0.9)/1532
# sum(com>=1)
# sum(com>=1)/1532
}
|
9464ae7e7d6103563efb2a31f2a1784a3ae5a564
|
beac9cf8f3605f222361bf1b6b5450607e4d837b
|
/demo/EITsolve.R
|
a1ccbb10fdf166a342c158157d223ab42524ba75
|
[] |
no_license
|
tfyamaguchi/AbdominalEIT
|
bceb9b026567d8094500181450e50b1f4212676b
|
041da4888ea8152ac0972149d60c2c5a05bf17f1
|
refs/heads/master
| 2020-05-09T17:16:05.490956
| 2019-04-14T12:19:31
| 2019-04-14T12:19:31
| 181,303,039
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 338
|
r
|
EITsolve.R
|
data(bound)
data(exdata)
exdata$datan32[,1:32] <- Kirchhoff(data=exdata$datan32[,1:32],dev=34)$expout
ret <- hosein(data=exdata)
exp <- lstsqrs(data=ret,bound32=bound)
ret <- f9l5qxfa(bound32=bound,expout=exp)
ret <- convsmt()
ret <- filta06("IMG__&_G.PRN",bound32=bound,nbun=255,rfilt=0.1,ratmap=1,col=rich.colors(256)[128:256])
|
8514288b64f0e6160859fe2e4004614a512e0d20
|
95a70c587ee188ddada550cd9247fbfce453a23f
|
/R/plot_quartile_area.R
|
05d546812479199fc2302fc4102f8bf8ccdf5233
|
[] |
no_license
|
hendersontrent/hotteR
|
1f3a4cbf9de84bf726d74747374def3b61ec9b88
|
8bc36b61aaf8b00512dae426ddcad3bdea0a875e
|
refs/heads/main
| 2023-06-17T04:40:20.775420
| 2021-07-10T10:36:51
| 2021-07-10T10:36:51
| 318,095,605
| 1
| 0
| null | 2021-07-10T10:36:52
| 2020-12-03T06:18:45
|
R
|
UTF-8
|
R
| false
| false
| 2,481
|
r
|
plot_quartile_area.R
|
#'
#' Function to calculate quartile time series composition for Australian artists and produce a stacked area graph
#' @import dplyr
#' @import ggplot2
#' @importFrom magrittr %>%
#' @importFrom janitor clean_names
#' @param data The dataframe of Hottest 100 results to analyse
#' @return an object of class ggplot which is a stacked area graph
#' @author Trent Henderson
#' @export
#' @examples
#' \dontrun{
#' plot_quartile_area(historical_countdowns)
#' }
#'
plot_quartile_area <- function(data = historical_countdowns){
# Initial aggregation
tmp <- data %>%
janitor::clean_names() %>%
dplyr::mutate(indicator = dplyr::case_when(
grepl(" ", year) ~ "Remove",
TRUE ~ "Keep")) %>%
dplyr::filter(indicator == "Keep") %>% # Remove specialist Countdowns (e.g. Of The Decade, All-Time)
dplyr::select(-c(indicator)) %>%
dplyr::mutate(quartile = dplyr::case_when(
rank <= 25 ~ 1, # Computes 4 quantiles to group rankings by
rank > 25 & rank <= 50 ~ 2,
rank > 50 & rank <= 75 ~ 3,
rank > 75 ~ 4)) %>%
dplyr::mutate(nationality = ifelse(country == "Australia", 2, 1)) %>% # Buckets countries into binary
dplyr::mutate(nationality = as.integer(nationality)) %>%
dplyr::mutate(year = as.numeric(year))
# Draw stacked area plot
p <- tmp %>%
dplyr::filter(nationality == 2) %>%
dplyr::group_by(year, quartile) %>%
dplyr::summarise(counter = dplyr::n()) %>%
dplyr::group_by(year) %>%
dplyr::mutate(probs = counter / sum(counter)) %>%
dplyr::ungroup() %>%
dplyr::mutate(quartile = factor(quartile, levels = c(1,2,3,4))) %>%
dplyr::mutate(quartile = dplyr::case_when(
quartile == "1" ~ "Rank 1-25",
quartile == "2" ~ "Rank 26-50",
quartile == "3" ~ "Rank 51-75",
quartile == "4" ~ "Rank 76-100")) %>%
ggplot2::ggplot(ggplot2::aes(x = year, y = probs)) +
ggplot2::geom_area(ggplot2::aes(fill = quartile)) +
ggplot2::labs(title = "Time series of Australian artist Hottest 100 composition by quartile",
x = "Year",
y = "Proportion of Australian artists",
fill = "Quartile") +
ggplot2::scale_y_continuous(limits = c(0,1),
breaks = seq(from = 0, to = 1, by = 0.2)) +
ggplot2::scale_fill_manual(values = c("#fa448c", "#fec859", "#43b5a0", "#491d88")) +
hotteR::theme_hotteR(grids = TRUE) +
ggplot2::theme(legend.position = "bottom")
return(p)
}
|
8463a25395fe444058360ed9769314a8a9eff420
|
65ee7b673307e33d708fc59fbcbc0dc14fe7dce2
|
/microflora_danica/kaiju_corekaiju/corekaiju/rearranging_individual_tables_t5.R
|
95819287a827248080399cc04920200271f91155
|
[] |
no_license
|
FPlatz95/master_thesis
|
0056cd9d41384930e4bd2bd04610588042fa9c56
|
b8f386e446a3815ba2835aea8d828c3a385de51f
|
refs/heads/master
| 2023-05-28T00:06:42.440807
| 2021-06-08T06:58:54
| 2021-06-08T06:58:54
| 340,918,855
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 436
|
r
|
rearranging_individual_tables_t5.R
|
filename=commandArgs(trailingOnly = F)
otutable_raw=read.csv(filename[6],check.names = F)
colnames(otutable_raw)=sub("X","",colnames(otutable_raw))
colnames(otutable_raw)=gsub("\\.","-",colnames(otutable_raw))
maxl = length(otutable_raw)
otutable_raw = otutable_raw[,c(maxl,1,9:(maxl - 1),2:8)]
write.csv(otutable_raw,"/srv/MA/Projects/microflora_danica/analysis/classified_corekaiju_t5/corekaiju_t5_ampvis_combined.csv",row.names=F)
|
950d33b8adcc2ff4070c2f57b6bcd5cbb8db514a
|
d2c7b6f677eb501b6f08c54fce7aebaf4119ae15
|
/man/summary.ssgraph.Rd
|
2987882ba46e34d6e47e0111047521837ce08b79
|
[] |
no_license
|
cran/ssgraph
|
9b792a284ee5ca70c24bbeeaf998fe769ef323db
|
15e27003a9ef1bf99ccc881f255853e309e17914
|
refs/heads/master
| 2023-01-12T03:58:58.043221
| 2022-12-24T12:30:02
| 2022-12-24T12:30:02
| 130,663,048
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,724
|
rd
|
summary.ssgraph.Rd
|
\name{summary.ssgraph}
\alias{summary.ssgraph}
\title{ Summary function for \code{S3} class \code{"ssgraph"} }
\description{
Provides a summary of the results for function \code{\link{ssgraph}}.
}
\usage{
\method{summary}{ssgraph}( object, round = 2, vis = TRUE, ... )
}
\arguments{
\item{object}{ An object of \code{S3} class \code{"ssgraph"}, from function \code{\link{ssgraph}}. }
\item{round}{ A value for rounding all probabilities to the specified number of decimal places. }
\item{vis}{ Visualize the results. }
\item{\dots}{System reserved (no specific usage).}
}
\value{
\item{selected_g}{The adjacency matrix corresponding to the selected graph which has the highest posterior probability.}
\item{p_links}{An upper triangular matrix corresponding to the posterior probabilities of all possible links.}
\item{K_hat}{The estimated precision matrix.}
}
\references{
Mohammadi, R. and Wit, E. C. (2019). \pkg{BDgraph}: An \code{R} Package for Bayesian Structure Learning in Graphical Models, \emph{Journal of Statistical Software}, 89(3):1-30
Mohammadi, A. and Wit, E. C. (2015). Bayesian Structure Learning in Sparse Gaussian Graphical Models, \emph{Bayesian Analysis}, 10(1):109-138
Mohammadi, A. et al (2017). Bayesian modelling of Dupuytren disease by using Gaussian copula graphical models, \emph{Journal of the Royal Statistical Society: Series C}, 66(3):629-645
}
\author{ Reza Mohammadi \email{a.mohammadi@uva.nl} }
\seealso{ \code{\link{ssgraph}} }
\examples{
\dontrun{
# Generating multivariate normal data from a 'random' graph
data.sim <- bdgraph.sim( n = 50, p = 6, size = 7, vis = TRUE )
ssgraph.obj <- ssgraph( data = data.sim, save = TRUE )
summary( ssgraph.obj )
}
}
|
814a440a55b737e9450b9334496692408f16041c
|
c146efcb22777fe677be62bb2e88f9b8788ab162
|
/run_analysis.R
|
6505e019eb95bdb251b37579a7ce64fb88142120
|
[] |
no_license
|
denizonder/Getting-and-Cleaning-Data-Project
|
c91b4d93074daf5fbbad5428486cf200dcb2cbdb
|
8d828d1da8d26e708ad61e1890b46a1223031599
|
refs/heads/master
| 2021-01-01T14:55:13.102435
| 2020-02-09T15:50:51
| 2020-02-09T15:50:51
| 239,327,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,564
|
r
|
run_analysis.R
|
#Downloading the files
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip")
unzip(zipfile="./data/Dataset.zip",exdir="./data")
project_files <- file.path("./data" , "UCI HAR Dataset")
files<-list.files(project_files, recursive=TRUE)
## Reading the flat files into tables
features_test <- read.table(file.path(project_files, "Test", "X_test.txt"), header = F)
features_train <- read.table(file.path(project_files, "Train", "X_train.txt"), header = F)
subject_train <- read.table(file.path(project_files, "Train", "subject_train.txt"), header = F)
subject_test <- read.table(file.path(project_files, "Test", "subject_test.txt"), header = F)
activity_test <- read.table(file.path(project_files, "Test", "y_test.txt"), header = F)
activity_train <- read.table(file.path(project_files, "Train", "y_train.txt"), header = F)
## Read in the features.txt file which we will be using
## as header names later on
features_names <- read.table(file.path(path_rf, "features.txt"), header = F)
## Row binding the train and test datas into single tables
activity_merged <- rbind(activity_train, activity_test)
features_merged <- rbind(features_train, features_test)
subject_merged <- rbind(subject_train, subject_test)
## Assigning the column names
names(features_merged)<- features_names$V2
names(activity_merged)<- c("subject")
names(subject_merged)<-c("activity")
# Merge all the data by columns and create a unified table
subject_activity_merged <- cbind(subject_merged, activity_merged)
Complete_Data <- cbind(features_merged, subject_activity_merged)
# Subsetting the data
features_names_specified<-features_names$V2[grep("mean\\(\\)|std\\(\\)", features_names$V2)]
last_names <- c(as.character(features_names_specified), "activity", "subject")
Last_Data <- subset(Complete_Data, select=last_names)
str(Last_Data)
# Reading in the Activity Labels
activityLabels <- read.table(file.path(project_files, "activity_labels.txt"),header = FALSE)
# For some reason I could'nt understand, the activity and subject names were mixed up
# so I'm correcting them
as_tibble(Last_Data)
Last_Data_Renamed <- rename(Last_Data, activity = subject, subject = activity)
tail(Last_Data_Renamed)
#Factorizing the activity variable and replacing the names with the corresponding
#names in activityLabels
Last_Data_Renamed$activity <- as.factor(Last_Data_Renamed$activity)
levels(Last_Data_Renamed$activity) <- c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING")
levels(Last_Data_Renamed$activity)
#Descriptive variable names
names(Last_Data_Renamed)<-gsub("Mag", "Magnitude", names(Last_Data_Renamed))
names(Last_Data_Renamed)<-gsub("BodyBody", "Body", names(Last_Data_Renamed))
names(Last_Data_Renamed)<-gsub("^t", "Time", names(Last_Data_Renamed))
names(Last_Data_Renamed)<-gsub("Acc", "Accelerometer", names(Last_Data_Renamed))
names(Last_Data_Renamed)<-gsub("Gyro", "Gyroscope", names(Last_Data_Renamed))
names(Last_Data_Renamed)<-gsub("^f", "Frequency", names(Last_Data_Renamed))
#Checking the table one last time
glimpse(Last_Data_Renamed)
View(Last_Data_Renamed)
#Creating the secon table
library(plyr);
Second_Table<-aggregate(. ~subject + activity, Last_Data_Renamed, mean)
Second_Table<-Second_Table[order(Second_Table$subject,Second_Table$activity),]
glimpse(Second_Table)
#Writing the Second_Table as a txt file
write.table(Second_Table, file = "Summary_Table.txt",row.name=FALSE)
|
25e8011624a941ac2231c243d24a3b4f1c067075
|
6601abca80d0dffb268bf2c756bbff8dc84d6385
|
/man/QueryBuilder.Rd
|
3f0141c470b0081806f3a1e1028c5a7e88033eaf
|
[
"MIT"
] |
permissive
|
langmead-lab/snapcount
|
5fd8e67dee09d8175a298ebba308e606a77f8002
|
257a66e3ed64155c9f3439aeea777ab6fb2feb6e
|
refs/heads/master
| 2022-05-01T08:22:58.627839
| 2022-04-26T14:00:26
| 2022-04-26T14:00:26
| 160,970,020
| 1
| 1
|
MIT
| 2020-07-31T16:10:10
| 2018-12-08T19:05:24
|
R
|
UTF-8
|
R
| false
| true
| 1,211
|
rd
|
QueryBuilder.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query_builder_wrappers.R
\name{QueryBuilder}
\alias{QueryBuilder}
\title{Construct a QueryBuilder object given a compilation and one or regions.}
\usage{
QueryBuilder(compilation, regions)
}
\arguments{
\item{compilation}{A single string containing the name of the
Snaptron data source. Any variant of the \code{Compilation} enum can also be
passed an argument.}
\item{regions}{Either a list of 1 more \code{HUGO} gene names as strings
\verb{e.g. "BRCA1"} or a Granges class object containing one or more geneomic
intervals \verb{(e.g. "chr1:1-1000")}.}
}
\value{
A QueryBuilder object.
}
\description{
Construct a QueryBuilder object given a compilation and one or regions.
}
\examples{
# contruct a query builder for GTEX data source and BRAC1 gene
qb <- QueryBuilder(compilation = Compilation$gtex, regions = "BRCA1")
# contruct a query builder for TCGA data source and chromosome region
qb <- QueryBuilder(compilation = "tcga", regions = "chr1:1-1000")
# construct a query builder for TCGA data source using GRanges object
library(GenomicRanges)
qb <- QueryBuilder(compilation = "tcga", regions = GRanges("chr1", "1-1000"))
}
|
6b956503e53ff2739efb35558faadd3c1d216921
|
b63c9bb0d7bdeb75aa8151055920af7b34ced83f
|
/man/db_drop_tables.Rd
|
fc70c83d659d67c1fcc075d8d6b57f7421a4dfb5
|
[] |
no_license
|
bernardocaldas/dplyrOracle
|
62c3a831cf6b292eb06b8b1fc2961f3f2b26ad62
|
2841cd966810740b07a204d73ffe2ece34f8be3e
|
refs/heads/master
| 2021-01-15T08:08:46.629130
| 2016-03-23T09:24:49
| 2016-03-23T09:24:49
| 54,482,145
| 0
| 1
| null | 2016-03-22T14:33:54
| 2016-03-22T14:33:54
| null |
UTF-8
|
R
| false
| true
| 478
|
rd
|
db_drop_tables.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/customSqlFunctions.R
\name{db_drop_tables}
\alias{db_drop_tables}
\title{Drop multiple tables in Oracle database}
\usage{
db_drop_tables(con, tables, force = FALSE, ...)
}
\arguments{
\item{con}{Connection}
\item{tables}{Character vector with table names to drop.}
\item{force}{Force drop?}
\item{...}{Other arguments passed to function}
}
\description{
Drop multiple tables in Oracle database
}
|
8fc5cd6fe4e4e2bd614182bcb549d21a766a51ef
|
b4e5e6e50c63f04d06e72ac20caa41c3aa394c13
|
/man/AlonDS.Rd
|
8b31f160a0e3d5aaac07a7843b3565b95c0725a8
|
[] |
no_license
|
cran/HiDimDA
|
59f9c12fddee68f14a5c017852921cc90eaf03bf
|
4fee7a41e8fa45c90816b05d9a364ed7b2b87e4b
|
refs/heads/master
| 2021-01-01T20:48:52.167666
| 2015-10-19T08:41:33
| 2015-10-19T08:41:33
| 17,679,831
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 946
|
rd
|
AlonDS.Rd
|
\name{AlonDS}
\docType{data}
\alias{AlonDS}
\title{Alon Colon Cancer Data Set}
\description{This data set was collected by Alon \emph{et. al.} and consists of 2000 genes measured on 62 patients: 40 diagnosed with colon cancer and 22 healthy patients. The patient status is described by the factor \sQuote{grouping} and the gene values are given by the numeric variables \sQuote{genes.1} through \sQuote{genes.2000}.}
\usage{data(AlonDS)}
\source{ Alon, U., Barkai, N., Notterman, D.A., Gish, K., Ybarra, S., Mack, D. and Levine, A.J. (1999) \dQuote{Broad patterns of gene expression revealed by clustering analysis of tumor and normal colon tissues probed by oligonucleotide arrays}, In: \emph{Proceedings National Academy of Sciences} USA 96, 6745-6750. The data set is available at http://microarray.princeton.edu/oncology.}
\format{A data frame containing 62 observations on one factor and 2000 numeric variables.}
\keyword{datasets}
|
9f76a96054e670d8d0b89e86ab3b72b8e1e41faf
|
b1cccc43340f5e1100a95428ecfe6a14fadb215a
|
/man/ld_matrix_local.Rd
|
56b3ef27cc91c7dc395909b82cded33977d5a144
|
[
"MIT"
] |
permissive
|
MRCIEU/ieugwasr
|
b041818e3de4db287aed0667c5e167ac0bdf74f3
|
33e4629f4dacd635c68e690bb5648de529c333cc
|
refs/heads/master
| 2022-07-01T22:52:46.713761
| 2022-06-15T14:27:21
| 2022-06-15T14:27:21
| 214,448,457
| 35
| 17
|
NOASSERTION
| 2022-03-16T14:54:21
| 2019-10-11T13:50:01
|
R
|
UTF-8
|
R
| false
| true
| 752
|
rd
|
ld_matrix_local.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ld_matrix.R
\name{ld_matrix_local}
\alias{ld_matrix_local}
\title{Get LD matrix using local plink binary and reference dataset}
\usage{
ld_matrix_local(variants, bfile, plink_bin, with_alleles = TRUE)
}
\arguments{
\item{variants}{List of variants (rsids)}
\item{bfile}{Path to bed/bim/fam ld reference panel}
\item{plink_bin}{Specify path to plink binary. Default = \code{NULL}.
See \url{https://github.com/explodecomputer/plinkbinr} for convenient access to plink binaries}
\item{with_alleles}{Whether to append the allele names to the SNP names.
Default: \code{TRUE}}
}
\value{
data frame
}
\description{
Get LD matrix using local plink binary and reference dataset
}
|
ecf8269ff4e6cb1b7cf07a159bca552d7e3fd630
|
c96fb047660d57547921e01de547ffcdfc2af4f8
|
/R/~old/R/LDstatsBPM.R
|
3186fb884c853f5a8a52faad73ddd42120993e6d
|
[
"MIT"
] |
permissive
|
BaderLab/POPPATHR
|
d5a3acf04fdda8ce3e9ad6ef41ade62dee7f8052
|
19290bfdaaa3ff06c9cfcad72f04b3f3e789007b
|
refs/heads/master
| 2023-06-23T21:30:03.556374
| 2021-06-09T20:35:43
| 2021-06-09T20:35:43
| 201,321,105
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,816
|
r
|
LDstatsBPM.R
|
#' Calculate selection statistics (LD) and perform exploratory analyses
#' for two sets of variants via R snpStats package
#' https://bioconductor.org/packages/release/bioc/manuals/snpStats/man/snpStats.pdf
#' @param hcInDir (char) path to files with high-confidence pathway SNP lists
#' @param lcInDir (char) path to files with low-confidence pathway SNP lists
#' @param popNames (char) optional - character vector to set alternate
#' population names in plots eg. c("European", "African") instead of
#' "CEU" and "YRI" for example
#' @param outDir (char) path to output directory
#' @return
#' @export
LDstats <- function(hcInDir, lcInDir, popNames=NULL, snp2geneF, outDir) {
# Read in snp2gene.txt file to assign SNPs to genes
snp2gene <- read.table(snp2geneF, h=F, as.is=T)
snp2gene <- snp2gene[,-3]
calcLD <- function(snpDir, popcode) {
ss1 <- c(); ss2 <- c()
snp.map1 <- c(); snp.map2 <- c()
ld.calc <- list(); pairwise.df <- list(); diff <- list()
diff.r2 <- list(); diff.dp <- list()
# Read PLINK files for each pathway
bed <- list.files(path=snpDir, pattern="*.bed", full.names=T)
bim <- list.files(path=snpDir, pattern="*.bim", full.names=T)
fam <- list.files(path=snpDir, pattern="*.fam", full.names=T)
###### NEW FOR BETWEEN-PATHWAY LD #####################
# matrix of all possible pathway x pathway combinations
bed.pair <- t(combn(bed, 2))
bim.pair <- t(combn(bim, 2))
fam.pair <- t(combn(fam, 2))
####################################################
for (i in 1:nrow(bed.pair)) {
cat(sprintf("#### BETWEEN-PATHWAY INTERACTION %i ####\n", i))
cat(sprintf("*Reading pathway %s x pathway %s PLINK sets\n",
basename(file_path_sans_ext(bed.pair[i,1])),
basename(file_path_sans_ext(bed.pair[i,2]))))
ss1[[i]] <- read.plink(bed.pair[i,1], bim.pair[i,1], fam.pair[i,1])
ss2[[i]] <- read.plink(bed.pair[i,2], bim.pair[i,2], fam.pair[i,2])
# Subset genotypes by population
if (popcode == 0) {
# For first pathway of pathwayxpathway interaction
pop <- which(ss1[[i]]$fam$affected != popcode)
ss1[[i]]$genotypes <- ss1[[i]]$genotypes[pop, ]
cat(sprintf("\n*Keeping %i individuals in pathway one of interaction\n",
length(pop)))
print(ss1[[i]]$genotypes)
# For second pathway of pathwayxpathway interaction
pop <- which(ss2[[i]]$fam$affected != popcode)
ss2[[i]]$genotypes <- ss2[[i]]$genotypes[pop, ]
cat(sprintf("\n*Keeping %i individuals in pathway two of interaction\n",
length(pop)))
print(ss2[[i]]$genotypes)
} else {
# For first pathway of pathwayxpathway interaction
pop <- which(ss1[[i]]$fam$affected == popcode)
ss1[[i]]$genotypes <- ss1[[i]]$genotypes[pop, ]
cat(sprintf("\n*Keeping %i %s genotypes in pathway one of interaction\n",
length(pop),
if (popcode == 1) {pop1}
else if (popcode == 2) {pop2} ))
print(ss1[[i]]$genotypes)
# For second pathway of pathwayxpathway interaction
pop <- which(ss2[[i]]$fam$affected == popcode)
ss2[[i]]$genotypes <- ss2[[i]]$genotypes[pop, ]
cat(sprintf("\n*Keeping %i %s genotypes in pathway two of interaction\n",
length(pop),
if (popcode == 1) {pop1}
else if (popcode == 2) {pop2} ))
print(ss2[[i]]$genotypes)
}
cat("*Calculating pathway x pathway LD statistics...\n")
ld.calc[[i]] <- ld(x=ss1[[i]]$genotypes, y=ss2[[i]]$genotypes,
stats=c("D.prime", "R.squared"))
snp.map1 <- ss1[[i]]$map #genomic location of each SNP
snp.map2 <- ss2[[i]]$map
r2 <- as.matrix(ld.calc[[i]]$R.squared) #convert sparseMatrix to regular matrix
r2 <- melt(r2) #melt matrix to data frame
colnames(r2)[3] <- "R.squared"
# Create dataframe containing pairwise distance calculations for each
# SNP-SNP pair
dp <- as.matrix(ld.calc[[i]]$D.prime)
dp <- melt(dp)
colnames(dp)[3] <- "D.prime"
all.stats <- merge(r2, dp, by=c("Var1", "Var2"))
colnames(all.stats)[1:2] <- c("snp.name.1", "snp.name.2")
snp.map1 <- subset(snp.map1, select=c("snp.name", "chromosome", "position"))
colnames(snp.map1)[1] <- "snp.name.1"
snp.map2 <- subset(snp.map2, select=c("snp.name", "chromosome", "position"))
colnames(snp.map2)[1] <- "snp.name.2"
pairwise <- merge(snp.map1, all.stats, by="snp.name.1")
colnames(pairwise)[1:3] <- c("snp_1", "chr_1", "pos_1")
pairwise <- merge(snp.map2, pairwise, by="snp.name.2")
colnames(pairwise) <- c("snp_2", "chr_2", "pos_2", "snp_1",
"chr_1", "pos_1", "R.squared", "D.prime")
pairwise <- pairwise[,c(4:6, 1:3, 7, 8)]
pairwise$pathway_pair1 <- basename(file_path_sans_ext(bed.pair[i,1]))
pairwise$pathway_pair2 <- basename(file_path_sans_ext(bed.pair[i,2]))
pairwise$ixn_num <- sprintf("interaction_%i", i)
# Assign gene to each SNP
colnames(snp2gene) <- c("snp_1", "gene_1")
pairwise.df <- merge(pairwise, snp2gene, by="snp_1")
colnames(snp2gene) <- c("snp_2", "gene_2")
pairwise.df <- merge(pairwise.df, snp2gene, by="snp_2")
cat(sprintf("\tTotal SNP-SNP interactions: %i\n", nrow(pairwise.df)))
cat("*Removing any matching genes in pathway x pathway interaction...\n")
remove <- intersect(pairwise.df$gene_1, pairwise.df$gene_2)
no.match <- pairwise.df[!(pairwise.df$gene_1 %in% remove),]
no.match2 <- no.match[!(no.match$gene_2 %in% remove),]
cat("*Filtering for inter-chromosomal interactions...\n")
diff[[i]] <- filter(no.match2, chr_1 != chr_2)
cat(sprintf("\tNew number of interactions: %i\n", nrow(diff[[i]])))
diff.r2[[i]] <- select(diff[[i]], R.squared) %>% unlist
diff.dp[[i]] <- select(diff[[i]], D.prime) %>% unlist
cat("done.\n\n")
}
diff.pairs <<- do.call("rbind", diff)
diff.num <<- sapply(diff.r2, length) #all SNP-SNP pairs per interaction
cat(sprintf("Finished inter-chr LD analysis for %i pathway x pathway interactions.\n",
nrow(bed.pair)))
}
if (!is.null(popNames)) {
pop1.name <- popNames[1]
pop2.name <- popNames[2]
} else {
pop1.name <- pop1
pop2.name <- pop2
}
sink(sprintf("%s/bwPathwayinterChrLDanalysis.log", outDir))
# Calculate inter-chromosomal LD stats for confidently enriched pathways
cat("=======================================================================")
cat(paste("\n*Calculating inter-chromosomal LD between the confidently",
"enriched pathway variants...\n"))
# all population genotypes
cat("========== ALL POPULATIONS ==========\n")
calcLD(snpDir=hcInDir, popcode=0)
hc.diff.pairs <- diff.pairs
hc.diff.pairs$set <- "Enriched"
hc.diff.num <- diff.num
cat(" done.\n")
saveRDS(hc.diff.pairs, sprintf("%s/hc.diff.pairs.rds", outDir))
# population 1 genotypes only
cat(sprintf("\n========== AMONG %s GENOTYPES ONLY ==========\n", pop1))
Sys.sleep(3)
calcLD(snpDir=hcInDir, popcode=1)
hc.diff.pairs.pop1 <- diff.pairs
hc.diff.pairs.pop1$set <- "Enriched"
hc.diff.pairs.pop1$pop <- pop1.name
hc.diff.num.pop1 <- diff.num
cat(" done.\n")
saveRDS(hc.diff.pairs.pop1, sprintf("%s/hc.diff.pairs.pop1.rds", outDir))
# population 2 genotypes only
cat(sprintf("\n========== AMONG %s GENOTYPES ONLY ==========\n", pop2))
Sys.sleep(3)
calcLD(snpDir=hcInDir, popcode=2)
hc.diff.pairs.pop2 <- diff.pairs
hc.diff.pairs.pop2$set <- "Enriched"
hc.diff.pairs.pop2$pop <- pop2.name
hc.diff.num.pop2 <- diff.num
cat(" done.\n")
saveRDS(hc.diff.pairs.pop2, sprintf("%s/hc.diff.pairs.pop2.rds", outDir))
# Calculate inter-chromosomal LD stats for unenriched pathways
cat("=======================================================================")
cat(paste("\n*Calculating inter-chromosomal LD between the unenriched",
"pathway variants...\n"))
# all population genotypes
cat("\n========== ALL POPULATIONS ==========\n")
Sys.sleep(3)
calcLD(snpDir=lcInDir, popcode=0)
lc.diff.pairs <- diff.pairs
lc.diff.pairs$set <- "Unenriched"
lc.diff.num <- diff.num
cat(" done.\n")
saveRDS(lc.diff.pairs, sprintf("%s/lc.diff.pairs.rds", outDir))
# population 1 genotypes only
cat(sprintf("\n========== AMONG %s GENOTYPES ONLY ==========\n", pop1))
Sys.sleep(3)
calcLD(snpDir=lcInDir, popcode=1)
lc.diff.pairs.pop1 <- diff.pairs
lc.diff.pairs.pop1$set <- "Unenriched"
lc.diff.pairs.pop1$pop <- pop1.name
lc.diff.num.pop1 <- diff.num
cat(" done.\n")
saveRDS(lc.diff.pairs.pop1, sprintf("%s/lc.diff.pairs.pop1.rds", outDir))
# population 2 genotypes only
cat(sprintf("\n========== AMONG %s GENOTYPES ONLY ==========\n", pop2))
Sys.sleep(3)
calcLD(snpDir=lcInDir, popcode=2)
lc.diff.pairs.pop2 <- diff.pairs
lc.diff.pairs.pop2$set <- "Unenriched"
lc.diff.pairs.pop2$pop <- pop2.name
lc.diff.num.pop2 <- diff.num
cat(" done.\n")
saveRDS(lc.diff.pairs.pop2, sprintf("%s/lc.diff.pairs.pop2.rds", outDir))
cat("=======================================================================")
# Write out tables of interactions per pathway
# Get dataframe listing each unique pathway x pathway interaction
pathway_ixns <- unique(hc.diff.pairs[,c("pathway_pair1", "pathway_pair2")])
hc.ixns <- data.frame(interaction=pathway_ixns,
hc_ixns=hc.diff.num,
hc_ixns_pop1=hc.diff.num.pop1,
hc_ixns_pop2=hc.diff.num.pop2)
write.table(hc.ixns, sprintf("%s/hc_num_interactions_pathway-pathway.txt",
outDir), col.names=T, row.names=F, quote=F, sep="\t")
pathway_ixns <- unique(lc.diff.pairs[,c("pathway_pair1", "pathway_pair2")])
lc.ixns <- data.frame(interaction=pathway_ixns,
lc_ixns=lc.diff.num,
lc_ixns_pop1=lc.diff.num.pop1,
lc_ixns_pop2=lc.diff.num.pop2)
write.table(lc.ixns, sprintf("%s/lc_num_interactions_pathway-pathway.txt",
outDir), col.names=T, row.names=F, quote=F, sep="\t")
lc.diff.num[lc.diff.num==0] <- NA
lc.diff.num <- na.omit(lc.diff.num)
lc.diff.num.pop1[lc.diff.num.pop1==0] <- NA
lc.diff.num.pop1 <- na.omit(lc.diff.num.pop1)
lc.diff.num.pop2[lc.diff.num.pop2==0] <- NA
lc.diff.num.pop2 <- na.omit(lc.diff.num.pop2)
## PLOT STATS
cat("\n*Generating inter-chromosomal LD analysis plots.\n")
############################### PLOT 1 #####################################
# Pairwise inter r2 of all enriched against all unenriched pathways
# Set common variables
title <- "Enrichment of SNP-SNP interactions between the selection-enriched pathways"
r2.xaxis.title <- bquote(bold("LD value per SNP-SNP pair"))
dat <- rbind(hc.diff.pairs, lc.diff.pairs)
# Rename dataframe column based on chosen LD statistic, R.squared or D.prime
# in order to ensure consistency calling the correct column
names(dat) <- gsub(statistic, "TEST.STAT", names(dat))
cat(sprintf("*Generating results based on '%s' statistic\n", statistic))
# 1a) Density distribution plot
p1 <- ggplot(dat, aes(x=TEST.STAT, colour=set, fill=set)) +
geom_density(alpha=0.2) +
xlab(r2.xaxis.title) +
ylab(bquote(bold("Density"))) +
scale_colour_Publication(guide=FALSE) +
scale_fill_Publication(guide=FALSE)
# 1b) eCDF plot (cumulative density at each r2)
p2 <- ggplot(dat, aes(x=TEST.STAT, colour=set)) +
stat_ecdf() +
xlab(r2.xaxis.title) +
ylab(bquote(bold("Cumulative density"))) +
scale_colour_Publication(guide=FALSE) +
scale_fill_Publication(guide=FALSE)
cat("*Generating plot 1...")
both_1 <- plot_grid(p1, p2, labels=c("A", "B"), ncol=2)
title_1 <- ggdraw() + draw_label(title, fontface="bold")
both_1 <- plot_grid(title_1, both_1, ncol=1, rel_heights=c(0.1, 1))
filename_1 <- sprintf("%s/density_ecdf_hc_lc_%s.tiff", outDir, statistic)
save_plot(filename_1, both_1, base_height=3.5, base_width=10.5,
base_aspect_ratio=1.2)
cat(sprintf(" saved to %s.\n", filename_1))
# Calculate significance via KS test
pval <- ks.test(filter(dat, set=="Enriched") %>% select(TEST.STAT) %>% unlist,
filter(dat, set=="Unenriched") %>% select(TEST.STAT) %>% unlist,
alternative="less")
cat(sprintf("\n\t**p-value via KS test (less)= %g**\n", pval$p.value))
############################### PLOT 2 #####################################
# Pairwise inter r2 per enriched and unenriched pathway separately
title <- paste("Between pathway enrichment of",
"inter-chromosomal SNP-SNP interactions")
# Set colour palette and number of colours needed
cols <- colorRampPalette(brewer.pal(8, "Accent"))
npal <- cols(length(unique(dat$ixn_num)))
# Function to determine significance of inter-chrom LD per selection-enriched
# between-pathway interaction vs. cumulative set of unenriched pathways
# via the KS test (alternative=less{the CDF of x lies below+right of y})
getPvals <- function(dat, pop.name) {
# Separate df into 'Enriched' and 'Unenriched' interactions
if (is.null(pop.name) == TRUE) {
enriched <- filter(dat, set=="Enriched")
unenriched <- filter(dat, set=="Unenriched")
} else { #population-stratified
enriched <- filter(dat, pop==pop.name & set=="Enriched")
unenriched <- filter(dat, pop==pop.name & set=="Unenriched")
}
for (i in 1:length(unique(enriched$ixn_num))) {
# Subset for each enriched pathway
enrich_ixn_ld[[i]] <<- subset(enriched, ixn_num==sprintf("interaction_%s", i))
# Calculate KS pvalue for each enriched pathway against the entire
# set of unenriched pathways
ks_pvals[[i]] <<- ks.test(enrich_ixn_ld[[i]]$TEST.STAT,
unenriched$TEST.STAT,
alternative="less")
ks_pvals[[i]] <<- ks_pvals[[i]]$p.value
}
}
# Get dataframe listing each unique pathway x pathway interaction
pathway_ixns <- unique(hc.diff.pairs[,c("pathway_pair1", "pathway_pair2")])
# 2a) Density distribution plot
p3 <- ggplot(dat, aes(x=TEST.STAT, colour=ixn_num, fill=ixn_num)) +
facet_grid(set ~ .) +
geom_density(alpha=0.2) +
xlab(r2.xaxis.title) +
ylab(bquote(bold("Density"))) +
scale_fill_manual(values=npal) +
scale_colour_manual(values=npal) +
theme(legend.position="none",
strip.text=element_text(face="bold"))
# 2b) eCDF plot (cumulative density at each r2)
p4 <- ggplot(dat, aes(x=TEST.STAT, colour=ixn_num)) +
facet_grid(set ~ .) +
stat_ecdf() +
xlab(r2.xaxis.title) +
ylab(bquote(bold("Cumulative density"))) +
scale_colour_manual(values=npal) +
theme(legend.position="none",
strip.text=element_text(face="bold"))
# 2c,d) Density and eCDF at x-axis > 0.2
p5 <- p3 + xlim(0.2, 1)
p6 <- p4 + xlim(0.2, 1)
cat("\n*Generating plot 2...")
both_2 <- plot_grid(p3, p4, p5, p6, labels=c("A", "B", "C", "D"),
ncol=2, nrow=2)
title_2 <- ggdraw() + draw_label(title, fontface="bold")
both_2 <- plot_grid(title_2, both_2, ncol=1, rel_heights=c(0.1, 1))
filename_2 <- sprintf("%s/inter_den_ecdf_hc_lc_%s.tiff", outDir, statistic)
save_plot(filename_2, both_2, base_height=8, base_width=9.5,
base_aspect_ratio=1.2)
cat(sprintf(" saved to %s.\n", filename_2))
# Calculate p values
enrich_ixn_ld <- list()
ks_pvals <- list()
getPvals(dat, pop=NULL)
pvals <- unlist(ks_pvals)
ixn_pvals <- data.frame(interaction=pathway_ixns,
pval=as.data.frame(pvals),
bonf=p.adjust(pvals, method="bonferroni"),
fdr=p.adjust(pvals, method="BH"))
ixn_pvals <- ixn_pvals[order(ixn_pvals$fdr),]
filename_p1 <- sprintf("%s/hc_pvals_per_interaction_alt-l.txt", outDir)
write.table(format(ixn_pvals, digits=3), file=filename_p1,
col=T, row=F, quote=F, sep="\t")
cat(sprintf("*Table of interaction p-values written to %s.\n", filename_p1))
############################### PLOT 3 #####################################
# Pairwise inter r2 per enriched and unenriched pathway separately
# and stratified by population
title <- paste("Between pathway enrichment of",
"inter-chromosomal SNP-SNP interactions per population")
dat <- rbind(hc.diff.pairs.pop1, lc.diff.pairs.pop1,
hc.diff.pairs.pop2, lc.diff.pairs.pop2)
# Rename dataframe column based on chosen LD statistic, R.squared or D.prime
# in order to ensure consistency calling the correct column
names(dat) <- gsub(statistic, "TEST.STAT", names(dat))
cat(sprintf("*Generating results based on '%s' statistic\n", statistic))
# 3a) Density distribution plot
p7 <- ggplot(dat, aes(x=TEST.STAT, colour=ixn_num, fill=ixn_num)) +
facet_grid(set ~ pop) +
geom_density(alpha=0.2) +
xlab(r2.xaxis.title) +
ylab(bquote(bold("Density"))) +
scale_fill_manual(values=npal) +
scale_colour_manual(values=npal) +
theme(legend.position="none",
strip.text=element_text(face="bold"))
# 3b) eCDF plot (cumulative density at each r2)
p8 <- ggplot(dat, aes(x=TEST.STAT, colour=ixn_num)) +
facet_grid(set ~ pop) +
stat_ecdf() +
xlab(r2.xaxis.title) +
ylab(bquote(bold("Cumulative density"))) +
scale_colour_manual(values=npal) +
theme(legend.position="none",
strip.text=element_text(face="bold"))
# 3c,d) Density and eCDF at x-axis > 0.2
p9 <- p7 + xlim(0.2, 1)
p10 <- p8 + xlim(0.2, 1)
cat("\n*Generating plot 3...")
both_3 <- plot_grid(p7, p8, p9, p10, labels=c("A", "B", "C", "D"),
ncol=2, nrow=2)
title_3 <- ggdraw() + draw_label(title, fontface='bold')
both_3 <- plot_grid(title_3, both_3, ncol=1, rel_heights=c(0.1, 1))
filename_3 <- sprintf("%s/inter_pop-strat_den_ecdf_hc_lc_%s.tiff",
outDir, statistic)
save_plot(filename_3, both_3, base_height=10, base_width=13.5,
base_aspect_ratio=1.2)
cat(sprintf(" saved to %s.\n", filename_2))
# Calculate p values per population
# Pop 1 calculations
enrich_ixn_ld <- list()
ks_pvals <- list()
getPvals(dat, pop.name=pop1.name)
pvals <- unlist(ks_pvals)
ixn_pvals_pop1 <- data.frame(interaction=pathway_ixns,
pval=as.data.frame(pvals),
bonf=p.adjust(pvals, method="bonferroni"),
fdr=p.adjust(pvals, method="BH"))
ixn_pvals_pop1 <- ixn_pvals_pop1[order(ixn_pvals_pop1$fdr),]
filename_p2 <- sprintf("%s/hc_pvals_per_interaction_alt-l_%s.txt", outDir, pop1)
write.table(format(ixn_pvals_pop1, digits=3), file=filename_p2,
col=T, row=F, quote=F, sep="\t")
cat(sprintf("*Table of interaction p-values written to %s.\n", filename_p2))
# Pop 2 calculations
enrich_ixn_ld <- list()
ks_pvals <- list()
getPvals(dat, pop.name=pop2.name)
pvals <- unlist(ks_pvals)
ixn_pvals_pop2 <- data.frame(interaction=pathway_ixns,
pval=as.data.frame(pvals),
bonf=p.adjust(pvals, method="bonferroni"),
fdr=p.adjust(pvals, method="BH"))
ixn_pvals_pop2 <- ixn_pvals_pop2[order(ixn_pvals_pop2$fdr),]
filename_p3 <- sprintf("%s/hc_pvals_per_interaction_alt-l_%s.txt", outDir, pop2)
write.table(format(ixn_pvals_pop2, digits=3), file=filename_p3,
col=T, row=F, quote=F, sep="\t")
cat(sprintf("*Table of interaction p-values written to %s.\n", filename_p3))
# Merge both p value dataframes
colnames(ixn_pvals_pop1)[3:5] <- paste(colnames(ixn_pvals_pop1)[3:5],
sep="_", pop1)
colnames(ixn_pvals_pop2)[3:5] <- paste(colnames(ixn_pvals_pop2)[3:5],
sep="_", pop2)
pval_merge <- merge(ixn_pvals_pop1, ixn_pvals_pop2,
by=c("interaction.pathway_pair1",
"interaction.pathway_pair2"))
pval_merge <- pval_merge[order(pval_merge[,5]), ]
filename_p4 <- sprintf("%s/hc_pvals_merge_%s_%s.txt", outDir, pop1, pop2)
write.table(format(pval_merge, digits=3), file=filename_p4,
col=T, row=F, quote=F, sep="\t")
cat(sprintf("Merged p-value tables written to %s.\n", filename_p4))
sink()
}
|
88ffbdad92bd83e20f8f8eb2e96cf7cbe3385109
|
47594da5da68b5e53ce10c45bc317f16ad69ac7d
|
/man/buildDescription.Rd
|
1024851c8a7ceade0fd48db6cf55953d36cc1071
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
viadee/anchorsOnR
|
79183941b725e0b3dd7590ec6d75025bd2b9ca47
|
617e9d253cbe09f6f18e7545d62c01ab1e3ddb78
|
refs/heads/master
| 2022-03-09T23:06:49.693086
| 2019-11-21T15:20:21
| 2019-11-21T15:20:21
| 192,675,270
| 14
| 4
|
BSD-3-Clause
| 2019-07-29T11:54:08
| 2019-06-19T06:49:11
|
R
|
UTF-8
|
R
| false
| true
| 572
|
rd
|
buildDescription.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/discretization.R
\name{buildDescription}
\alias{buildDescription}
\title{Builds a printable representation of the discretization used for output formatting}
\usage{
buildDescription(bin, cuts, right, short)
}
\arguments{
\item{bin}{the discretization option}
\item{cuts}{the cuts}
\item{right}{right}
\item{short}{short}
}
\value{
the readable description
}
\description{
TODO remove cuts and right as they are included in bin. Also, include non-numeric discretization usungusing $classes
}
|
41ce404070ed395e95afea5cf8095de9dd452aed
|
257ffc3438528729b62bc3e7abc24eea2be6193e
|
/man/SimpleSpatial.Rd
|
1d11219a0e89c91419da269d1c1d7ef3e67e592b
|
[
"MIT"
] |
permissive
|
SHUD-System/rSHUD
|
91e1ae7f077cf5efa52575a32ed4e692ed8034b9
|
1915a9cf2b241a1368b9768251b2f140454bd94e
|
refs/heads/master
| 2023-07-06T11:07:18.335307
| 2023-07-01T15:08:11
| 2023-07-01T15:08:11
| 224,737,854
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 296
|
rd
|
SimpleSpatial.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Func_GIS.R
\name{SimpleSpatial}
\alias{SimpleSpatial}
\title{Simplify SpatialData.}
\usage{
SimpleSpatial(x)
}
\arguments{
\item{x}{SpatialData}
}
\value{
Simplified SpatialData
}
\description{
Simplify SpatialData.
}
|
cfa4edd584c78b977f071c6f9bed9ce3f0a3a052
|
11398da10d2e446f376ed7188b4dcd3caba754c7
|
/R scripts/sdmAllCodeFinal_IP_11Feb18_havenot edited.r
|
b89c231b0f5b07070b9f42510e4c017538189c02
|
[] |
no_license
|
bhartidk/sdm
|
d96c1330c70f70200e16a881a6f79582cf9fc05f
|
8d82b94406f1d104f7bb6ea8fb2883ed70d5a342
|
refs/heads/master
| 2022-03-19T09:50:45.251540
| 2019-11-13T07:49:57
| 2019-11-13T07:49:57
| 221,377,088
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 48,362
|
r
|
sdmAllCodeFinal_IP_11Feb18_havenot edited.r
|
#rm(list = ls())
setwd("C:/Bharti/PhD data/Analysis/Chapter2_2Nov17")
library(sdmpredictors)
library(raster)
library(sp)
library(rgdal)
library(maps)
library(rgeos)
library(dismo)
library(dplyr)
library(Hmisc)
library(ggplot2)
library(devtools)
library(digest)
library(rJava)
library(geosphere)
library(stringr)
library(ncdf4)
library(sf)
library(reshape2)
library(ENMeval)
library(parallel)
library(devtools)
library(rmaxent)
library(MASS)
#library(foreach)
#library(iterators)
#library(doParallel)
#install_github('johnbaums/rmaxent')
#ip.ext<-c(24, 150,-36,30.21667)
#ms_layer<-raster("sdm_layers/marspec/ip_ext/mean_annual_sss_psu.tif")
#projection(ms_layer)<-wgs
#ext<-ip.ext
##input occurrence data
all.occ<-read.csv("dataframes/allsites_env_4Nov17.csv")
head(all.occ)
colnames(all.occ)
#removing the row number column and all the extracted data columns from the dataframe
all.occ<-all.occ[,-c(1,7:10, 12:38)]
head(all.occ)
#changing the order of longitude and latitude in the dataframe - easier to handle rasters that way
all.occ<-all.occ[,c(2,5,4,3,6)]
head(all.occ)
#creating a dataframe which has unique values of all.occ
rn<-as.numeric(rownames(unique(all.occ[,2:3])))
unq.occ<-all.occ[rn,]
#adding other sampling locations where no species were found but were a part of the sampling effort to this dataframe
no.occ<-read.csv("dataframes/coords_nosp_12Jan18.csv")
unq.occ<-rbind(unq.occ, no.occ)
#write.csv(unq.occ, file="dataframes/unq_occ.csv", row.names=TRUE)
#converting this into a SpatialPoints dataframe
#coordinates(unq.occ)<-~long+lat
#Creating a variable which saves the WGS projection codes
wgs<-"+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
#projection(unq.occ)<-wgs
#rbinding all.occ and no.occ
all.occ<-rbind(all.occ, no.occ)
#extracting occurrence data for each species and saving it as a separate dataframe
sp<-as.character(levels(all.occ$species))
#input the extended occurrence data from Reid and Reid referred publications
ip.occ<-read.csv("dataframes/species_locations_25Jan18.csv", fileEncoding="latin1")
head(ip.occ)
#looking at if the columns have been given correct assignments
str(ip.occ)
ip.occ$lat<-as.numeric(as.character(ip.occ$lat))
#removing an extra column that has for some reason been added in the end
ip.occ<-ip.occ[,-ncol(ip.occ)]
head(ip.occ)
#removing locations from India
colnames(ip.occ)
ip.occ<-ip.occ[ip.occ$location5!="India", c(2,12,13,6,15)]
#retaining only those rows where there are no NAs - not that we have reduced the number of columns used, the only columns that can give NA values are long and lat
ip.occ<-ip.occ[complete.cases(ip.occ),]
#changing the column names to match all.occ, so that I can rbind them together
colnames(ip.occ)
colnames(ip.occ)<-colnames(all.occ)
head(ip.occ)
#merging ip.occ with all.occ
ip.occ<-rbind(all.occ, ip.occ)
#creating a list where each element is a species specific dataframe
ip.ind.occ<-list()
for(i in 1:length(sp)){
ip.ind.occ[[i]]<-subset(ip.occ, ip.occ$species %in% sp[i])
names(ip.ind.occ)[[i]]<-paste0("ip.", substr(sp[i], 1, 3), ".occ")
write.csv(ip.ind.occ[[i]], file=paste0("dataframes/", "ip_", substr(sp[i], 1, 3), "_occ.csv"), row.names=TRUE)
}
#dissolving the list to create individual dataframes that have been appropriately labeled
list2env(ip.ind.occ, envir=.GlobalEnv)
ls()
#finding the minimum and maximum lat and long values
pt.ext<-c(min(ip.occ$long), max(ip.occ$long), min(ip.occ$lat), max(ip.occ$lat))
#trying to see how the extent is like by cropping the world coastline extent to this and plotting species points over it
coastline<-readOGR(dsn='sdm_layers/CS/ne_10m_coastline', layer='ne_10m_coastline')
crop.coastline<-crop(coastline, pt.ext)
##Loading 200 m bathymetry shapefile
shelf<-readOGR(dsn='sdm_layers/CS/ne_200m_bathymetry', layer='ne_10m_bathymetry_K_200')
plot(crop.coastline)
points(ip.und.occ[,2:3], col="brown")
points(ip.pal.occ[,2:3], col="green")
#From the above plot it appears that undulata and pallescens go into the Pacific islands as well. Since I do not have tidal range information for those locations I cannot use those regions. Therefore can use the longitudinal extent of tidal_range layer to determine the horizontal extent and the spread of points (ip.occ) to determine the vertical extent
##Downloading tidal range information in netCDF and converting it into a raster
#I downloaded netCDF data corresponding to Indian Ocean ATLAS 2011 at 1/12 degree resolution from the OTIS website. It has variables associated with latitude, longitude, h (amplitude) and phase information for 11 different tidal harmonic constitutents. This was input into TMD toolbox script code which was modified with help from Ivan Haigh's trimmed down code with changes to save the output as a map and not as a time series per location in the lat long grid. This was run every month of the year 2016 between 1-15 (15 days) so that a spring and neap tide is considered every month. In the code below I am finding the average of netCDF files obtained for each month to get the annual average.
#Opening and dealing with netCDF data - http://environmentalcomputing.net/opening-netcdf-files/
#Opening a netCDF file
tidalAmp<-nc_open("sdm_layers/tide/IO_2011atlas_netcdf/DATA/hf.IO_2011atlas.nc")
#Looking at the constituents of the netCDF file
print(tidalAmp)
#It has 7 variables, in 4 dimensions
#Obtaining some variables we are interested in
#Tidal elevation amplitude at Z-nodes
tidalAmp.ha <- ncvar_get(tidalAmp,"ha")
#Latitude of Z nodes
tidalAmp.lat<-ncvar_get(tidalAmp, "lat_z")
#Longitude of Z nodes
tidalAmp.lon<-ncvar_get(tidalAmp, "lon_z")
#Tidal constituents
tidalAmp.con<-ncvar_get(tidalAmp, "con")
#Looking at the dimensions of each of the variables obtained
dim(tidalAmp.ha)
#The above has the same dimensions as tidalAmp.lat and lon, but has another dimension of size 13 - which refers to each of the tidal components listed in tidalAmp.con
dim(tidalAmp.lat)
dim(tidalAmp.lon)
dim(tidalAmp.con)
#Looking at the values for tidal elevation amplitude, but for tidal constituent m2 which is con=1
tidalAmp.ha[,,1]
#What fill value is used for missing data - no fill value specified here
ncatt_get(tidalAmp, "ha", "_FillValue")
#Alternate method of obtaining fill value information
ncatt_get(tidalAmp, "ha", "_missing_value")
#This netCDF file does not have attribute data associated with the coordinate reference system used.
#Closing the netCDF file - they say this is IMPORTANT to do
nc_close(tidalAmp)
#Opening the tidal range netcdf file generated from TMD toolbox - there should be one netcdf file for each month of the year. I need to take the mean of all months put together
#List file names in a path, following a certain pattern
files<-list.files(path="sdm_layers/tide/tide_range_MATLAB/FUNCTIONS/mean", pattern="*.nc")
files.min<-list.files(path="sdm_layers/tide/tide_range_MATLAB/FUNCTIONS/min", pattern="*.nc")
files.max<-list.files(path="sdm_layers/tide/tide_range_MATLAB/FUNCTIONS/max", pattern="*.nc")
#Creating a stack called tidalRange where the netCDF to raster converted files will be stored as a stack
tidalRange<-stack()
#Opening each netCDF file, converting to raster, assigning the extent and the CRS for each and adding it to the stack
#The extent is assigned from looking at the min and max of the tidalAmp.lon and tidalAmp.lat matrices. The tidal range netCDF is derived from the tidalAmp.ha and tidalAmp.hp files - therefore the lat longs asociated with these should be the same for tidal range. The resolution of the netCDF file is 1/12 degrees
for(i in 1:length(files)){
tidalRange_nc<-nc_open(paste0("sdm_layers/tide/tide_range_MATLAB/FUNCTIONS/mean/", files[i]))
tidalRange.r<-ncvar_get(tidalRange_nc, 'tidal_range_mean')
tidalRange.r<-raster(tidalRange.r)
tidalRange.r[tidalRange.r==0]<-NA
extent(tidalRange.r)<-c(min(tidalAmp.lon), max(tidalAmp.lon), min(tidalAmp.lat), max(tidalAmp.lat))
crs(tidalRange.r)<-wgs
tidalRange<-stack(tidalRange, tidalRange.r)
nc_close(tidalRange_nc)
}
tidalRange
#Taking the mean of all rasters
tidalRange.r<-mean(tidalRange)
tidalRange.r<-flip(tidalRange.r, direction='y')
plot(tidalRange.r)
#Re-sampling it to match the resolution of ms_layers
#tidalRange.r<-resample(tidalRange.r, ms_layer)
#Setting min and max
tidalRange.r<-setMinMax(tidalRange.r)
#Saving the raster to disk
writeRaster(tidalRange.r, filename="sdm_layers/tidalRange/tidal_range_ip", format="GTiff", overwrite=TRUE)
par(mfrow=c(3,1))
plot(crop.coastline)
plot(tidalRange.r, add=TRUE)
points(ip.und.occ[,2:3], pch=19, col="blue")
plot(crop.coastline)
plot(tidalRange.r, add=TRUE)
points(ip.pal.occ[,2:3], pch=19, col="blue")
plot(crop.coastline)
plot(tidalRange.r, add=TRUE)
points(ip.vid.occ[,2:3], pch=19, col="blue")
#tidalRange.r counting the number of NAs from extracted values for presence points=4
length(which(is.na(extract(tidalRange.r, unique(ip.occ[,2:3])))))
nrow(ip.occ)
#Filling NAs with mean of neighbouting 8 values
reps<-4
tidalRange.fill<-tidalRange.r
for(i in 1:reps){
tidalRange.fill<-focal(tidalRange.fill, w=matrix(1,3,3), fun=mean, pad=TRUE, na.rm=TRUE, NAonly=TRUE)
}
#Checking if the NAs have been removed
length(which(is.na(extract(tidalRange.fill, unique(all.occ[,2:3])))))
#Saving the raster to disk
writeRaster(tidalRange.fill, filename="sdm_layers/tidalRange/tidal_range_fill_ip", format="GTiff", overwrite=TRUE)
##
#running the same set of functions for min and max
#
tidalMin<-stack()
for(i in 1:length(files.min)){
tidalMin_nc<-nc_open(paste0("sdm_layers/tide/tide_range_MATLAB/FUNCTIONS/min/", files.min[i]))
tidalMin.r<-ncvar_get(tidalMin_nc, 'tidal_min')
tidalMin.r<-raster(tidalMin.r)
tidalMin.r[tidalMin.r==0]<-NA
extent(tidalMin.r)<-c(min(tidalAmp.lon), max(tidalAmp.lon), min(tidalAmp.lat), max(tidalAmp.lat))
crs(tidalMin.r)<-wgs
tidalMin<-stack(tidalMin, tidalMin.r)
nc_close(tidalMin_nc)
}
tidalMax<-stack()
for(i in 1:length(files.max)){
tidalMax_nc<-nc_open(paste0("sdm_layers/tide/tide_range_MATLAB/FUNCTIONS/max/", files.max[i]))
tidalMax.r<-ncvar_get(tidalMax_nc, 'tidal_max')
tidalMax.r<-raster(tidalMax.r)
tidalMax.r[tidalMax.r==0]<-NA
extent(tidalMax.r)<-c(min(tidalAmp.lon), max(tidalAmp.lon), min(tidalAmp.lat), max(tidalAmp.lat))
crs(tidalMax.r)<-wgs
tidalMax<-stack(tidalMax, tidalMax.r)
nc_close(tidalMax_nc)
}
#Taking the min and max of all rasters
tidalMin.r<-setMinMax(tidalMin.r)
tidalMax.r<-setMinMax(tidalMax.r)
tidalMin.r<-min(tidalMin)
tidalMax.r<-max(tidalMax)
tidalMin.r<-flip(tidalMin.r, direction='y')
tidalMax.r<-flip(tidalMax.r, direction='y')
#Taking the difference between the two to obtain range
tidalRange2.r<-tidalMax.r-tidalMin.r
#writing it to disk
writeRaster(tidalRange2.r, filename="sdm_layers/tidalRange/tidal_range_minmax_ip", format="GTiff", overwrite=TRUE)
#Resampling to ms_layers extent and resolution
#tidalRange2.r<-resample(tidalRange2.r, ms_layer)
length(which(is.na(extract(tidalRange2.r, unique(ip.occ[,2:3])))))
#Filling NAs with mean of neighbouting 8 values
reps<-4
tidalRange2.fill<-tidalRange2.r
for(i in 1:reps){
tidalRange2.fill<-focal(tidalRange2.fill, w=matrix(1,3,3), fun=mean, pad=TRUE, na.rm=TRUE, NAonly=TRUE)
}
#Checking if the NAs have been removed
length(which(is.na(extract(tidalRange2.fill, unique(all.occ[,2:3])))))
#set min max
tidalRange2.fill<-setMinMax(tidalRange2.fill)
extent(tidalRange2.fill)
#Saving the raster to disk
writeRaster(tidalRange2.fill, filename="sdm_layers/tidalRange/tidal_range_minmax_fill_ip", format="GTiff", overwrite=TRUE)
##Before starting to handle rasters and shape files, define extents of area of interest. The -36 comes from rounding off the ymin from looking at the sampling locations
ip.ext<-extent(xmin(tidalRange2.fill), xmax(tidalRange2.fill), -36, ymax(tidalRange2.fill))
#checking if the extent looks okay, by plotting points over it
crop.coastline<-crop(coastline, ip.ext)
tidalRange2.fill<-crop(tidalRange2.fill, ip.ext)
plot(tidalRange2.fill)
plot(crop.coastline, add=TRUE)
points(ip.occ[,2:3], col="blue", pch=19)
#retaining only those points which fall into ip.extent
ext.pol<-as(ip.ext, "SpatialPolygons")
projection(ext.pol)<-wgs
ip.occ.crop<-ip.occ
coordinates(ip.occ.crop)<-~long+lat
projection(ip.occ.crop)<-wgs
within<-over(ip.occ.crop, ext.pol)
table(within)
ip.occ.crop<-ip.occ.crop[!(is.na(within)),]
#checking to see if the points have been removed
plot(coastline)
plot(tidalRange2.fill, add=TRUE)
points(ip.occ.crop[,2:3], pch=19, col="blue")
#dividing these cropped points species-wise, again
ip.ind.occ<-list()
for(i in 1:length(sp)){
ip.ind.occ[[i]]<-subset(ip.occ.crop, ip.occ.crop$species %in% sp[i])
names(ip.ind.occ)[[i]]<-paste0("ip.", substr(sp[i], 1, 3), ".occ")
write.csv(ip.ind.occ[[i]], file=paste0("dataframes/", "ip_", substr(sp[i], 1, 3), "_occ.csv"), row.names=FALSE)
}
#dissolving the list to create individual dataframes that have been appropriately labeled
list2env(ip.ind.occ, envir=.GlobalEnv)
ls()
#seeting ip.ext as the extent
ext<-ip.ext
#removing unrequired variables
rm(shelf, tidalAmp, tidalAmp.lat, tidalAmp.lon, tidalMax, tidalMax_nc, tidalMax.r, tidalMin, tidalMin_nc, tidalMin.r, tidalRange, tidalRange_nc, tidalRange.r, tidalRange2.r, tidalAmp.con, tidalAmp.ha)
##Downloaing raster layers
#MARSPEC 1km layers
ms<-list.files(path="sdm_layers/marspec", pattern="*.tif")
#The file is input, cropped to the required extent and put in the stack.
ms_layers<-stack()
for(i in 1:length(ms)){
ms.raster<-raster(paste0("sdm_layers/marspec/", ms[i]))
ms.raster<-crop(ms.raster, ext)
ms_layers<-stack(ms_layers, ms.raster)
}
#Giving the layers meaningful names
names(ms_layers)
names(ms_layers)[1:nlayers(ms_layers)]<-c("bathy_m", "dist_to_shore_km", "bathy_slope_deg", "mean_annual_sss_psu", "sss_freshest_month_psu", "sss_saltiest_month_psu", "mean_annual_sst_C", "sst_coldest_month_C", "sst_warmest_month_C" )
ms_layers
#The scaling has to be adjusted - the scaling factor info was taken from MARSPEC meta-data, also available here - http://marspec.weebly.com/modern-data.html
#bathymetric slope
names(ms_layers[[3]])
ms_layers[[3]]<-ms_layers[[3]]/10
#salinity
names(ms_layers[[4:6]])
ms_layers[[4]]<-ms_layers[[4]]/100
ms_layers[[5]]<-ms_layers[[5]]/100
ms_layers[[6]]<-ms_layers[[6]]/100
#temperature
names(ms_layers[[7:9]])
ms_layers[[7]]<-ms_layers[[7]]/100
ms_layers[[8]]<-ms_layers[[8]]/100
ms_layers[[9]]<-ms_layers[[9]]/100
#Setting min and max
ms_layers<-setMinMax(ms_layers)
#Saving the unedited rasters to disk
#setwd("sdm_layers/marspec/ip_ext_unedited")
writeRaster(ms_layers, filename=names(ms_layers), bylayer=TRUE,format="GTiff")
#setwd("D:/PhD data/Analysis/Chapter2_2Nov17")
#Plot to check the scale and the general appearance
plot(ms_layers)
#MARSPEC counting number of NAs in extracted value from presence points = 126
length(which(is.na(extract(ms_layers[[2]], unique(all.occ[,2:3])))))
#Correcting for NAs near the coastline. For each NA, taking the mean of the 8 cells surrounding it and averaging them to get the central cell value. It takes 3 reps to fills all NAs from extracted values in all.occ - ran it with a test data-set. Help from https://stat.ethz.ch/pipermail/r-sig-geo/2013-July/018709.html
reps<-3
ms_layers_fill<-ms_layers
for(i in 1:nlayers(ms_layers_fill)){
for(j in 1:reps){
ms_layers_fill[[i]]<-focal(ms_layers_fill[[i]], w=matrix(1,3,3), fun=mean, na.rm=TRUE, pad=TRUE, NAonly=TRUE)
}
}
#The layer names are removed for some reason when I run the above code.
names(ms_layers_fill)<-names(ms_layers)
#Checking if all NA's from presence points have been filled
length(which(is.na(extract(ms_layers_fill[[1]], unique(all.occ[,2:3])))))
#rm(ms_layers)
#Saving the cropped rasters to disk
#setwd("sdm_layers/marspec/wg_ext")
writeRaster(ms_layers_fill, filename=names(ms_layers_fill), bylayer=TRUE,format="GTiff", overwrite=TRUE)
#setwd("D:/PhD data/Analysis/Chapter2_2Nov17")
##WorldClim 1km layers
wc<-list.files(path="sdm_layers/worldclim", pattern="*.tif")
#The file is input, cropped to the required extent and put in the stack.
wc_layers<-stack()
for(i in 1:length(wc)){
wc.raster<-raster(paste0("sdm_layers/worldclim/", wc[i]))
wc.raster<-crop(wc.raster, ext)
wc_layers<-stack(wc_layers, wc.raster)
}
#Giving the layers meaningful names
names(wc_layers)
names(wc_layers)<-c("mean_air_temp_C" , "max_air_temp_C", "min_air_temp_C", "air_temp_dryQ_C")
#Setting min and max
wc_layers<-setMinMax(wc_layers)
plot(wc_layers)
#Exporting unedited cropped raster to disk. Parallel processing code from https://stackoverflow.com/questions/43243611/writeraster-to-netcdf-parallelisation-r
#UseCores<-detectCores()-1
#cl<- makeCluster(UseCores, type="FORK")
#registerDoParallel(cl)
#tmp<-foreach(i = 1:nlayers(wc_layers)) %dopar%
#{
#r<-raster::raster(wc_layers, i)
#raster::writeRaster(r, filename=names(wc_layers)[i],overwrite=TRUE, #format="GTiff")
#rm(r)
#}
#stopCluster(cl)
setwd("sdm_layers/worldclim/wg_ext_unedited")
writeRaster(wc_layers, filename=names(wc_layers), bylayer=TRUE,format="GTiff", overwrite=TRUE)
setwd("D:/PhD data/Analysis/Chapter2_2Nov17")
#WorldClim counting number of NAs in extracted value from presence points=90
length(which(is.na(extract(wc_layers[[1]], unique(all.occ[,2:3])))))
#Correcting for NAs near the coastline. For each NA, taking the mean of the 8 cells surrounding it and averaging them to get the central cell value. It takes 2 reps to fills all NAs from extracted values in all.occ - ran it with a test data-set. Help from https://stat.ethz.ch/pipermail/r-sig-geo/2013-July/018709.html
reps<-2
wc_layers_fill<-wc_layers
for(i in 1:nlayers(wc_layers_fill)){
for(j in 1:reps){
wc_layers_fill[[i]]<-focal(wc_layers_fill[[i]], w=matrix(1,3,3), fun=mean, na.rm=TRUE, pad=TRUE, NAonly=TRUE)
}
}
#The layer names are removed for some reason when I run the above code.
names(wc_layers_fill)<-names(wc_layers)
#Checking if all NA's from presence points have been filled
length(which(is.na(extract(wc_layers_fill[[1]], unique(all.occ[,2:3])))))
#rm(wc_layers)
#Writing the cropped layers to disk
#UseCores<-detectCores() -1
#cl<- makeCluster(UseCores, type="FORK")
#registerDoParallel(cl)
#tmp<-foreach(i = 1:nlayers(wc_layers_fill)) %dopar%
#{
# r<-raster::raster(wc_layers_fill, i)
# raster::writeRaster(r, #filename=names(wc_layers_fill)[i],overwrite=TRUE, format="GTiff")
# rm(r)
#}
#stopCluster(cl)
setwd("sdm_layers/worldclim/wg_ext")
writeRaster(wc_layers_fill, filename=names(wc_layers_fill), bylayer=TRUE,format="GTiff", overwrite=TRUE)
setwd("D:/PhD data/Analysis/Chapter2_2Nov17")
#Inputing Bio-ORACLE layers
#bo_list<-list.files(path="sdm_layers/biooracle/ip_ext_unedited", pattern="*tif")
bo_list<-list.files(path="sdm_layers/biooracle", pattern="*tif")
bo_layers<-stack()
for(i in 1:length(bo_list)){
#bo.raster<-raster(paste0("sdm_layers/biooracle/ip_ext_unedited/", bo_list[i]))
bo.raster<-raster(paste0("sdm_layers/biooracle/", bo_list[i]))
bo.raster<-crop(bo.raster, ext)
bo_layers<-stack(bo_layers, bo.raster)
}
#Checking if everything with bo_layers looks okay
#plot(bo_layers)
#Giving the layers meaningful names
names(bo_layers)<-c("chlo_max", "chlo_mean", "chlo_min", "cloud_cover_max", "cloud_cover_mean", "cloud_cover_min", "phyto_max", "phyto_mean", "phyto_min", "pp_max", "pp_mean", "pp_min")
#Resampling bo_layers by an order of magnitude to make it match marspec and worldclim layers
bo_layers<-resample(bo_layers, ms_layer)
#Setting min and max
bo_layers<-setMinMax(bo_layers)
plot(bo_layers)
#write unedited raster to disk
#UseCores<-detectCores()-1
#cl<- makeCluster(UseCores, type="FORK")
#registerDoParallel(cl)
#tmp<-foreach(i = 1:nlayers(bo_layers)) %dopar%
#{
# r<-raster::raster(bo_layers, i)
# raster::writeRaster(r, filename=names(bo_layers)[i],overwrite=TRUE, #format="GTiff")
# rm(r)
#}
#stopCluster(cl)
setwd("sdm_layers/biooracle/ip_ext_unedited")
writeRaster(bo_layers, filename=names(bo_layers), bylayer=TRUE,format="GTiff", overwrite=TRUE)
setwd("C:/Bharti/PhD data/Analysis/Chapter2_2Nov17")
#Finding out how many NAs exist in extracted values = 4
length(which(is.na(extract(bo_layers[[1]], unique(all.occ[,2:3])))))
#Replacing NAs with mean of 8 neighbouring cells
reps<-2
bo_layers_fill<-bo_layers
for(i in 1:nlayers(bo_layers_fill)){
for(j in 1:reps){
bo_layers_fill[[i]]<-focal(bo_layers_fill[[i]], w=matrix(1,3,3), fun=mean, na.rm=TRUE, pad=TRUE, NAonly=TRUE)
}
}
names(bo_layers_fill)<-names(bo_layers)
#Checking if all the NAs have been removed
length(which(is.na(extract(bo_layers_fill[[1]], unique(all.occ[,2:3])))))
#Writing the raster to disk
#UseCores<-detectCores()-1
#cl<- makeCluster(UseCores, type="FORK")
#registerDoParallel(cl)
#tmp<-foreach(i = 1:nlayers(bo_layers_fill)) %dopar%
#{
# r<-raster::raster(bo_layers_fill, i)
# raster::writeRaster(r, #filename=names(bo_layers_fill)[i],overwrite=TRUE, format="GTiff")
# rm(r)
#}
#topCluster(cl)
setwd("sdm_layers/biooracle/ip_ext")
writeRaster(bo_layers_fill, filename=names(bo_layers_fill), bylayer=TRUE, format="GTiff", overwrite=TRUE)
setwd("C:/Bharti/PhD data/Analysis/Chapter2_2Nov17")
#rm(bo_layers_fill)
##Loading topobath data from gebco
topobath<-raster("sdm_layers/gebco/GEBCO_2014_2D.nc")
projection(topobath)<-wgs
topobath<-crop(topobath, ip.ext)
#resampling topobath so that the resolution and extent match that of ms_layers
tb<-resample(topobath, ms_layer)
#Calculating the slope using the GEBCO topobath layer using the function terrain
tb_slope<-terrain(tb, opt='slope', unit='degrees')
#Set min and max
tb_slope<-setMinMax(tb_slope)
#Plot the topobath slope values next to each other
plot(tb_slope)
#Plot sampling points on the tb_slope layer
plot(tb_slope)
plot(crop.coastline, add=TRUE)
#Writing tb to disk
writeRaster(tb, filename="sdm_layers/topobath/topo_bath_ip.tif", format="GTiff", overwrite=TRUE)
#Writing tb_slope to disk
writeRaster(tb_slope, filename="sdm_layers/topobath/topo_bath_slope_ip.tif", format="GTiff", overwrite=TRUE)
ls()
ms_layer
rm(bo.raster, bo_layers, bo_list, files, files.max, files.min, tb, tb_slope, tidalAmp, tidalAmp.con, tidalAmp.ha, tidalAmp.lat, tidalAmp.lon, tidalMax, tidalMax.r, tidalMax_nc, tidalMin, tidalMin.r, tidalMin_nc, tidalRange, tidalRange.fill, tidalRange.r, tidalRange_nc, tidalRange2.fill, tidalRange2.r, topobath)
#If I keep the slope parameter, and I have to think carefully about what this actually means and how it might influence species presence in a region
##Creating the shortest distance to 200m bathymetry polygon raster
#This is how MARSPEC did it - information from their meta-data - "To generate the distance to shore raster, the GSHHS land mask was first converted to a Plate Carrée projection (also known as an equirectangular or equidistant cylindrical map projection) so that distances could be calculated in kilometers rather than arc-degrees. Due to edge effects in the calculations, distance was measured in two rounds using the Spatial Analyst extension in ArcGIS 9.3."
crop.shelf<-crop(shelf, ip.ext)
plot(crop.shelf)
#Calculate shortest distance between a point and a polygon
gDistance(recordsSpatial, crop.shelf)
#The above code did not work because there was no spatial projection assigned to the data input. The program has to know how degrees relate to physical distance in the given area. Usually one chooses an appropriate UTM zone, but the data here spans many, so using the projection used for MARSPEC - Plate Carrée projection (equirectangular or equidistant cylindrical map projection)
#I had actually run this using UTM zone 43 for calculations - this can lead to distortions in the E-W axis - something that has to be kept in mind. Not using that though.
#Creating a variable that stores the projection we want to use
espg.32643<-"+proj=utm +zone=43 +ellps=WGS84 +datum=WGS84 +units=m +no_defs"
espg.32663<-"+proj=eqc +lat_ts=0 +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs"
#Transforming the crop.shelf polygon into the above projection
crop.shelf.proj<-spTransform(crop.shelf, CRS(espg.32663))
plot(crop.shelf.proj)
#Creating a new raster where distance values will be stored. It has the same extent as crop.shelf.proj and the same number of cells as ms_layers rasters
#ms_layers[[3]]
dist.shore<-raster(extent(crop.shelf.proj), nrow=nrow(ms_layer), ncol=ncol(ms_layer))
values(dist.shore)<-rep(1, ncell(dist.shore))
crs(dist.shore)<-espg.32663
#Rerunning gDistance on the projected data-set again
#The probably functions I can use are distance, gDistance, dist2Line
#Trying out gDistance and dist2Line. The gDistance code is inspired from https://gis.stackexchange.com/questions/226554/create-raster-in-r-distance-to-line-feature
#Using the polygon as such, but converting the raster to a SpatialPoints format. Turns out the gDistance function does not work if the raster/SpatialPoints variable has NA values
dist.shore[]<-gDistance(crop.shelf.proj, as(dist.shore, "SpatialPoints"), byid=TRUE)/1000
plot(dist.shore)
#I also converted SpatialPolygon to SpatialLines and retried it - the results have identical values uptil a point, but after that the values start differing - When I convert it to SpatialLines, the islands are considered separately while in polygon they are considered as one variable (?) and hence the difference in values between them.
#Re-projecting the distance raster dist.shore into lat long
distShore<-projectRaster(dist.shore, crs=wgs)
#On reprojection the extent and resolution are not the same as before
distShore<-resample(distShore, ms_layers_fill[[1]])
distShore<-raster::mask(distShore, crop.shelf, inverse=TRUE)
#Set min and max
distShore<-setMinMax(distShore)
distShore
ms_layers_fill[[1]]
plot(distShore)
plot(crop.coastline, add=TRUE)
plot(crop.shelf, add=TRUE)
#Writing the distShore raster to disk
writeRaster(distShore, filename="sdm_layers/dist_200m_isobath", format="GTiff", overwrite=TRUE)
###############################################################
ms_list<-list.files(path="sdm_layers/marspec/ip_ext", pattern="*.tif")
ms_layers_fill<-stack()
for(i in 1:length(ms_list)){
ms.raster<-raster(paste0("sdm_layers/marspec/ip_ext/", ms_list[i]))
ms_layers_fill<-stack(ms_layers_fill, ms.raster)
}
#Input wc_layers_fill from disk
wc_list<-list.files(path="sdm_layers/worldclim/ip_ext", pattern="*.tif")
wc_layers_fill<-stack()
for(i in 1:length(wc_list)){
wc.raster<-raster(paste0("sdm_layers/worldclim/ip_ext/", wc_list[i]))
wc_layers_fill<-stack(wc_layers_fill, wc.raster)
}
#Input bo_layers_fill from disk
bo_list<-list.files(path="sdm_layers/biooracle/ip_ext", pattern="*.tif")
bo_layers_fill<-stack()
for(i in 1:length(bo_list)){
bo.raster<-raster(paste0("sdm_layers/biooracle/ip_ext/", bo_list[i]))
bo_layers_fill<-stack(bo_layers_fill, bo.raster)
}
#Input topobath
tb<-raster("sdm_layers/topobath/topo_bath_ip.tif")
#Input topobath slope
tb_slope<-raster("sdm_layers/topobath/topo_bath_slope_ip.tif")
#Input tidalRange.fill
tidalRange.fill<-raster("sdm_layers/tidalRange/ip_resampled/tidal_range_minmax_fill_ip.tif")
###############################################################
##Selecting predictor variables
#Putting all the raster layers together
candidates<-stack(ms_layers_fill, wc_layers_fill, bo_layers_fill, tb, tb_slope, tidalRange.fill)
names(candidates)
#Removing dist_to_shore_km from candidates - since the fill NA with mean of 8 neighbours was used - this wouldn't make much sense
candidates<-dropLayer(candidates, c(1:3, 14:16, 20:22))
names(candidates)
#Using a mask such that only points close to the coast are selected - which should be an intersection between wc, ms, bo, tb, tb_slope, tidal_range. Using code from https://stackoverflow.com/questions/5598516/intersection-of-bands-in-r-raster-package to do this
cand.mask <-sum(candidates)
extract(candidates, unique(all.occ[,2:3])))
#writing cand.mask to disk
writeRaster(cand.mask, filename="sdm_layers/cand_mask_ip", format="GTiff", overwrite=TRUE)
#Using cand.mask as a mask to convert cells to NA that don't have values for atleast one of
candidates<-raster::mask(candidates, cand.mask)
names(candidates)
setwd("sdm_layers/candidates_ip")
writeRaster(candidates, filename=names(candidates), bylayer=TRUE, format="GTiff", overwrite=TRUE)
setwd("C:/Bharti/PhD data/Analysis/Chapter2_2Nov17")
#plot(candidates)
##Take the records of the focal species
n<-"und.occ"
sp.occ<-ind.occ[[n]]
#Converting long and lat values to a spatial points file
recordsSpatial<-SpatialPointsDataFrame(coords=cbind(sp.occ$long, sp.occ$lat), data=sp.occ, proj4string=CRS(wgs))
plot(crop.coastline)
points(recordsSpatial, pch=19, col="blue")
##Looking at the correlation between different candidate predictors
#Calculating the correlation for 10000 random points extracted from the extent
cor.pts<-randomPoints(candidates, 100000, lonlatCorrection=TRUE)
#By default cor.test uses squared Spearman correlation coefficients, Hoeffding's D statistic can also be used which is sensitive to various kinds of relationships including highly non-monotonic relationships
envSpecies<-extract(candidates, cor.pts)
cor.tree<-varclus(envSpecies)
plot(cor.tree)
#Dropping layers that are redundant after looking at cor.test
names(candidates)
predictors<-dropLayer(candidates, c(1,3,5,9:14,16,19))
names(predictors)
predictors<-setMinMax(predictors)
writeRaster(predictors, bylayer=TRUE, filename=names(predictors), format="GTiff", overwrite=TRUE)
#Input predictors from disk
pred_list<-list.files(path="sdm_layers/predictors", pattern="*.tif")
predictors<-stack()
for(i in 1:length(pred_list)){
pred.raster<-raster(paste0("sdm_layers/predictors/", pred_list[i]))
predictors<-stack(predictors, pred.raster)
}
#rm(bo.raster, bo_layers_fill, bo_list, candidates, distShore, ms.raster, ms_layers_fill, ms_list, tb, tb_slope, tidalRange.fill, wc.raster, wc_layers_fill, wc_list)
#Extracting values for records from layers in the predictors stack
envSpecies<-extract(predictors, recordsSpatial)
records<-cbind(sp.occ[,2:3], envSpecies)
#Removing multiple records from the same cell - Adam Smith code
#inputing the function
source('./R scripts/Eliminate Points in Same Cell of a Raster.r')
recordsNoDups<-elimCellDups(records, predictors[[1]], longLatFields=c('long', 'lat'), priority=NULL)
nrow(records)
nrow(recordsNoDups)
write.csv(recordsNoDups, file="dataframes/und_occ.csv", row.names=FALSE)
#Plot extracted values. Looking at any outliers in data by plotting one variable against another. Code from here - https://stackoverflow.com/questions/13035834/plot-every-column-in-a-data-frame-as-a-histogram-on-one-page-using-ggplot - I don't understand the steps very well though - need to take some time out to look at this
d <- melt(recordsNoDups[,c(3:ncol(recordsNoDups))])
ggplot(d,aes(x = value)) + facet_wrap(~variable,scales = "free_x") +
geom_histogram()
##Creating random background points - Adam Smith code
set.seed(454)
randomBgSites<-randomPoints(predictors, 10000)
randomBgEnv<-as.data.frame(extract(predictors, randomBgSites))
#combine with coordinates and rename coordinate fields
randomBg<-cbind(randomBgSites, randomBgEnv)
colnames(randomBg)[1:2] <- c('long', 'lat')
head(randomBg)
write.csv(randomBg, file="dataframes/randomBg.csv", row.names=FALSE)
#randomBg<-read.csv("dataframes/randomBg.csv")
##Creating a kernel desity smoothing surface for sampling locations (combining locations were littorinids were and were not found) and using this as the probability surface to sample background locations
#Doing this for all sites and for mangrove and rocky shore separately as well
rs.hab<-c("rocky", "mixed")
mg.hab<-c("mangrove", "mixed")
rs.occ<-unq.occ[unq.occ$habitat %in% rs.hab,]
write.csv(rs.occ, file="dataframes/rs_occ", row.names=FALSE)
mg.occ<-unq.occ[unq.occ$habitat %in% mg.hab, ]
write.csv(mg.occ, file="dataframes/mg_occ", row.names=FALSE)
#creating the probability surface for all habitats
#running the kde2d function without specifying the number of cells to calculate extent. Code from - https://scottrinnan.wordpress.com/2015/08/31/how-to-construct-a-bias-file-with-r-for-use-in-maxent-modeling/
#the rasterize function converts any part of a polygon falling over the center of a raster cell into a raster cell and the field argument transfers the specified value to this new raster cell
kd.ras.fn<-function(x.pt, y.pt, ras){
occur.ras<-rasterize(cbind(x.pt, y.pt), ras, field=1)
kd.ras<-raster(kde2d(x.pt, y.pt, n=c(nrow(occur.ras), ncol(occur.ras)), h=0.5))
kd.ras[kd.ras==0]<-NA
kd.ras<-resample(kd.ras, ras)
kd.ras<-raster::mask(kd.ras, ras)
kd.ras
}
all.kd<-kd.ras.fn(unq.occ[,2], unq.occ[,3], cand.mask)
writeRaster(all.kd, filename="all_kd", overwrite=TRUE, format="GTiff")
rs.kd<-kd.ras.fn(rs.occ[,2], rs.occ[,3], cand.mask)
writeRaster(rs.kd, filename="rs_kd", overwrite=TRUE, format="GTiff")
mg.kd<-kd.ras(mg.occ[,2], mg.occ[,3], cand.mask)
writeRaster(mg.kd, filename="mg_kd", overwrite=TRUE, format="GTiff")
#Creating background points based on the kernel density probability surface. Doing this for both all sites and also habitat specific probability surface
set.seed(30985)
kdBgSites<-randomPoints(all.kd, 10000, prob=TRUE)
kdBgEnv<-as.data.frame(extract(predictors, kdBgSites))
kdBg<-cbind(kdBgSites, kdBgEnv)
colnames(kdBg)[1:2]<-c("long", "lat")
write.csv(kdBg, file="dataframes/kdBg.csv", row.names=FALSE)
#kdBg<-read.csv("dataframes/kdBg.csv")
set.seed(485743)
kdRockBgSites<-randomPoints(rs.kd, 10000, prob=TRUE)
kdRockBgEnv<-as.data.frame(extract(predictors, kdRockBgSites))
kdRockBg<-cbind(kdRockBgSites, kdRockBgEnv)
colnames(kdRockBg)[1:2]<-c("long", "lat")
write.csv(kdRockBg, file="dataframes/kdRockBg.csv", row.names=FALSE)
#kdRockBg<-read.csv("dataframes/kdRockBg.csv")
set.seed(9238)
kdMangBgSites<-randomPoints(mg.kd, 10000, prob=TRUE)
kdMangBgEnv<-as.data.frame(extract(predictors, kdMangBgSites))
kdMangBg<-cbind(kdMangBgSites, kdMangBgEnv)
colnames(kdMangBg)[1:2]<-c("long", "lat")
write.csv(kdMangBg, file="dataframes/kdMangBg.csv", row.names=FALSE)
#kdMangBg<-read.csv("dataframes/kdMangBg.csv")
##Creating another background sites dataframe - where a x km radius is drawn about each of the target species points samples - it is coalesced into a polygon and random background points are drawn from this polygon
#This code is taken from http://www.rspatial.org/sdm/rst/3_sdm_absence-background.html
#Also doing a 20 km radius polygon
#Save sites as a spatial points dataframe - defining which columns in are the coordinates
coordinates(unq.occ)<-~long+lat
projection(unq.occ)<-wgs
coordinates(rs.occ)<-~long+lat
projection(rs.occ)<-wgs
coordinates(mg.occ)<-~long+lat
projection(mg.occ)<-wgs
#Creating circles of radius 20 km around each sampled point
rad<-20000 #20 km radius
pol20<-polygons(circles(all.occ, d=rad, lonlat=TRUE))
pol20Rock<-polygons(circles(rs.occ, d=rad, lonlat=TRUE))
pol20Mang<-polygons(circles(mg.occ, d=rad, lonlat=TRUE))
plot(pol20, col=alpha("blue", 0.2))
plot(pol20Rock, col=alpha("red", 0.2))
plot(pol20Mang, col=alpha("green", 0.2))
#Creating an intersect of the raster stack with the polygons and extracting random points from there
##Creating random background points - Adam Smith code
set.seed(6644)
bufBgSites<-randomPoints(raster::mask(cand.mask, pol20), 10000)
bufBgEnv<-as.data.frame(extract(predictors, bufBgSites))
bufBg<-cbind(bufBgSites, bufBgEnv)
colnames(bufBg)[1:2]<-c("long", "lat")
write.csv(bufBg, file="dataframes/bufBg.csv", row.names=FALSE)
#bufBg<-read.csv("dataframes/bufBg.csv")
set.seed(8457)
bufRockBgSites<-randomPoints(raster::mask(cand.mask, pol20Rock), 10000)
bufRockBgEnv<-as.data.frame(extract(predictors, bufRockBgSites))
bufRockBg<-cbind(bufRockBgSites, bufRockBgEnv)
colnames(bufRockBg)[1:2]<-c("long", "lat")
write.csv(bufRockBg, file="dataframes/bufRockBg.csv", row.names=FALSE)
#bufRockBg<-read.csv("dataframes/bufRockBg.csv")
set.seed(90384)
bufMangBgSites<-randomPoints(raster::mask(cand.mask, pol20Mang), 10000)
bufMangBgEnv<-as.data.frame(extract(predictors, bufMangBgSites))
bufMangBg<-cbind(bufMangBgSites, bufMangBgEnv)
colnames(bufMangBg)[1:2]<-c("long", "lat")
write.csv(bufMangBg, file="dataframes/bufMangBg.csv", row.names=FALSE)
#bufMangBg<-read.csv("dataframes/bufMangBg.csv")
##I have decided on testing out two sets of feature functions - linear + quadratic + product, linear + quadratic + threshold and maybe for species where there are enough points linear + quadratic + product + threshold - STARTING WITH LINEAR + QUADRATIC + PRODUCT
##Running maxent models with each of the background locations, keeping the beta regularization factor as default and plotting the results
#Code for jackknifing over variables
pred.jk<-function(input){
env.input<-input[,3:(ncol(input)-1)]
pres.input<-input[,ncol(input)]
modGain<-as.data.frame(matrix(NA, nrow=nrow(input), ncol=2))
names(modGain)<-c("ex-var", "in-var")
for(i in 1:ncol(env.input)){
exModel<-maxent(x=env.input[,-i], p=pres.input, args=c('linear=true', 'quadratic=true', 'product=true',
'threshold=false', 'hinge=false'))
inModel<-maxent(x=env.input[,i], p=pres.input, args=c('linear=true', 'quadratic=true', 'product=true',
'threshold=false', 'hinge=false'))
modGain[i,]<-c(exModel@results[2], inModel@results[2])
}
modGain
}
##Maxent model using random background points
trainData<-rbind(recordsNoDups, randomBg)
presentBg<-c(rep(1, times=nrow(recordsNoDups)), rep(0, times=nrow(randomBg)))
randomBgIn<-cbind(trainData, presentBg)
ncol(randomBgIn)
write.csv(randomBgIn, file="dataframes/und_randomBg_input.csv", row.names=FALSE)
randomBgModel<-maxent(x=randomBgIn[,3:(ncol(randomBgIn)-1)], p=randomBgIn[,ncol(randomBgIn)], args=c('linear=true', 'quadratic=true', 'product=true',
'threshold=false', 'hinge=false', 'jackknife=true', 'responsecurves=true'))
plot(randomBgModel)
save(randomBgModel, file="results/undulata/randomBgModel")
randomBgResponse<-response(randomBgModel, range='p')
randomBgMap<-predict(randomBgModel, predictors)
writeRaster(randomBgMap, filename="results/undulata/randomBgMap", format="GTiff", overwrite=TRUE)
#Creating MESS map to see where combination of environmental variables is not the same as the training data
randomBgMESS<-mess(predictors, randomBgIn[,3:(ncol(randomBgIn)-1)])
plot(randomBgMESS)
writeRaster(randomBgMESS, filename="results/undulata/randomBgMESS", format="GTiff")
#creating a limiting variable map for the model
randomBgLim<-limiting(predictors, randomBgModel)
plot(randomBgLim)
writeRaster(randomBgLim, filename="results/undulata/randomBgLim", format="GTiff", overwrite=TRUE)
##Maxent model using kernel density based points
trainData<-rbind(recordsNoDups, kdBg)
presentBg<-c(rep(1, times=nrow(recordsNoDups)), rep(0, times=nrow(kdBg)))
kdBgIn<-cbind(trainData, presentBg)
write.csv(kdBgIn, file="dataframes/und_kdBg_input.csv", row.names=FALSE)
kdBgModel<-maxent(x=kdBgIn[,3:(ncol(kdBgIn)-1)], p=kdBgIn[,ncol(kdBgIn)], args=c('linear=true', 'quadratic=true', 'product=true',
'threshold=false', 'hinge=false', 'jackknife=true', 'responsecurves=true'))
plot(kdBgModel)
save(kdBgModel, file="results/undulata/kdBgModel")
kdBgResponse<-response(kdBgModel, range='p')
kdBgMap<-predict(kdBgModel, predictors)
writeRaster(kdBgMap, filename="results/undulata/kdBgMap", format="GTiff", overwrite=TRUE)
plot(kdBgMap)
#Creating MESS map to see where combination of environmental variables is not the same as the training data
kdBgMESS<-mess(predictors, kdBgIn[,3:(ncol(kdBgIn)-1)])
plot(kdBgMESS)
writeRaster(kdBgMESS, filename="results/undulata/kdBgMESS", format="GTiff")
#creating a limiting variable map for the model
kdBgLim<-limiting(predictors, kdBgModel)
plot(kdBgLim)
writeRaster(kdBgLim, filename="results/undulata/kdBgLim", format="GTiff")
##Maxent model using kernel density based points - but for habitat specific background sites only
kdHabBg<-kdRockBg
trainData<-rbind(recordsNoDups, kdHabBg)
presentBg<-c(rep(1, times=nrow(recordsNoDups)), rep(0, times=nrow(kdHabBg)))
kdHabBgIn<-cbind(trainData, presentBg)
nrow(kdHabBgIn)
write.csv(kdHabBgIn, file="dataframes/und_kdRockBg_input.csv", row.names=FALSE)
kdHabBgModel<-maxent(x=trainData[,3:(ncol(kdHabBgIn)-1)], p=kdHabBgIn[,ncol(kdHabBgIn)], args=c('linear=true', 'quadratic=true', 'product=true',
'threshold=false', 'hinge=false', 'jackknife=true', 'responsecurves=true'))
plot(kdHabBgModel)
save(kdHabBgModel, file="results/undulata/kdHabBgModel")
kdHabBgResponse<-response(kdHabBgModel, range='p')
kdHabBgMap<-predict(kdHabBgModel, predictors)
writeRaster(kdHabBgMap, filename="results/undulata/kdHabBgMap", format="GTiff", overwrite=TRUE)
#Creating MESS map to see where combination of environmental variables is not the same as the training data
kdHabBgMESS<-mess(predictors, kdHabBgIn[,3:(ncol(kdHabBgIn)-1)])
plot(kdBahBgMESS)
writeRaster(kdHabBgMESS, filename="results/undulata/kdHabBgMESS", format="GTiff")
#creating a limiting variable map for the model
kdHabBgLim<-limiting(predictors, kdHabBgModel)
plot(kdHabBgLim)
writeRaster(kdHabBgLim, filename="results/undulata/kdHabBgLim")
##Maxent model using background sites from 20 km buffer
trainData<-rbind(recordsNoDups, bufBg)
presentBg<-c(rep(1, times=nrow(recordsNoDups)), rep(0, times=nrow(bufBg)))
bufBgIn<-cbind(trainData, presentBg)
write.csv(bufBgIn, file="dataframes/und_bufBg_input.csv", row.names=FALSE)
bufBgModel<-maxent(x=bufBgIn[,3:(ncol(bufBgIn)-1)], p=bufBgIn[,ncol(bufBgIn)], args=c('linear=true', 'quadratic=true', 'product=true',
'threshold=false', 'hinge=false', 'jackknife=true', 'responsecurves=true'))
plot(bufBgModel)
save(bufBgModel, file="results/undulata/bufBgModel")
bufBgResponse<-response(bufBgModel, range='p')
bufBgMap<-predict(bufBgModel, predictors)
writeraster(bufBgMap, filename="results/undulata/bufBgMap", format="GTiff", overwrite=TRUE))
#Creating MESS map to see where combination of environmental variables is not the same as the training data
bufBgMESS<-mess(predictors, bufBgIn[,3:(ncol(bufBgIn)-1)])
plot(bufBgMESS)
writeRaster(bufBgMESS, filename="results/undulata/bufBgMESS", format="GTiff", overwrite=TRUE)
#creating a limiting variable map for the model
bufBgLim<-limiting(predictors, bufBgModel)
writeRaster(bufBgLim, filename="results/undulata/bufBgLim", format="GTiff", overwrite=TRUE)
##Maxent model using buffer based points - but for habitat specific background sites only
bufHabBg<-bufRockBg
trainData<-rbind(recordsNoDups, bufHabBg)
presentBg<-c(rep(1, times=nrow(recordsNoDups)), rep(0, times=nrow(bufHabBg)))
bufHabBgIn<-cbind(trainData, presentBg)
write.csv(bufHabBgIn, file="dataframes/und_bufRockBg_input.csv", row.names=FALSE)
bufHabBgModel<-maxent(x=bufHabBgIn[,3:(ncol(bufHabBgIn)-1)], p=bufHabBgIn[,ncol(bufHabBgIn)], args=c('linear=true', 'quadratic=true', 'product=true',
'threshold=false', 'hinge=false', 'jackknife=true', 'responsecurves=true'))
plot(bufHabBgModel)
save(bufHabBgModel, file="results/undulata/bufHabBgModel")
bufHabBgResponse<-response(bufHabBgModel, range='p')
bufHabBgMap<-predict(bufHabBgModel, predictors)
writeRaster(bufHabBgMap, filename="results/undulata/bufHabBgMap", format="GTiff", overwrite=TRUE)
#Creating MESS map to see where combination of environmental variables is not the same as the training data
bufHabBgMESS<-mess(predictors, bufHabBgIn[,3:(ncol(bufHabBgIn)-1)])
plot(bufHabBgMESS)
writeRaster(bufHabBgMESS, filename="results/undulata/bufHabBgMESS", format="GTiff", overwrite=TRUE)
#creating a limiting variable map for the model
bufHabBgLim<-limiting(predictors, bufHabBgModel)
plot(bufHabBgLim)
writeRaster(bufHabBgLim, filename="results/undulata/bufHabBgLim", format=GTiff, overwrite=TRUE)
##Running calc.aicc from the package ENMeval to find the value of beta regularization factor - choosing to use mixedBg20 and the features as above - linear, quadratic and product
beta.AIC<-function(bgIn, beta.val, predictors, coord){
nparams<-rep(NA, times=length(beta.val))
pred.stack<-stack()
model<-list()
for(i in 1:length(beta.val)){
model[[i]]<-maxent(x=bgIn[,3:(ncol(bgIn)-1)], p=bgIn[, ncol(bgIn)], args=c('linear=true', 'quadratic=true', 'product=true',
'threshold=false', 'hinge=false', paste0("betamultiplier=", beta.val[i])))
pred.stack<-stack(pred.stack, predict(model[[i]], predictors))
names(pred.stack)[i]<-paste0("beta ", beta.val[i])
nparams[i]<-get.params(model[[i]])
}
aicc<-calc.aicc(nparams, coord, pred.stack)
aicc$beta.val<-beta.val
aicc<-aicc[order(aicc$AICc),]
aicc
}
#Carrying out test with different values of beta for all the models
beta.val<-seq(1,5, by=1)
randomBg_beta<-beta.AIC(randomBgIn, beta.val, predictors, recordsNoDups[,1:2])
kdBg_beta<-beta.AIC(kdBgIn, beta.val, predictors, recordsNoDups[,1:2])
kdHabBg_beta<-beta.AIC(kdHabBgIn, beta.val, predictors, recordsNoDups[,1:2])
bufBg_beta<-beta.AIC(bufBgIn, beta.val, predictors, recordsNoDups[,1:2])
bufHabBg_beta<-beta.AIC(bufHabBgIn, beta.val, predictors, recordsNoDups[,1:2])
##Evaluating the models - looking at the ROC
randomBgEval<-evaluate(p=recordsNoDups[,3:ncol(recordsNoDups)], a=randomBg[,3:ncol(randomBg)], randomBgModel)
save(randomBgEval, file="results/undulata/randomBgEval")
kdBgEval<-evaluate(p=recordsNoDups[,3:ncol(recordsNoDups)], a=kdBg[,3:ncol(kdBg)], kdBgModel)
plot(kdBgEval, 'ROC')
save(kdBgEval, file="results/undulata/kdBgEval")
kdHabBgEval<-evaluate(p=recordsNoDups[,3:ncol(recordsNoDups)], a=kdHabBg[,3:ncol(kdHabBg)], kdHabBgModel)
plot(kdHabBgEval, 'ROC')
save(kdHabBgEval, file="results/undulata/kdHabBgEval")
bufBgEval<-evaluate(p=recordsNoDups[,3:ncol(recordsNoDups)], a=bufBg[,3:ncol(bufBg)], bufBgModel)
plot(bufBgEval, 'ROC')
save(bufBgEval, file="results/undulata/bufBgEval")
bufHabBgEval<-evaluate(p=recordsNoDups[,3:ncol(recordsNoDups)], a=bufHabBg[,3:ncol(bufHabBg)], bufHabBgModel)
plot(bufHabBgEval, 'ROC')
save(bufHabBgEval, file="results/undulata/bufHabBgEval")
##Dividing presence data into 5 groups - using 4/5ths for training and 1/5ths for test. Code from Hijmans and Elith 2017
#For each of the folds , dividing data into test and training data, combining test data with background data and giving presence codes to the rows. Running the model using training data, keeping the features linear, quadratic and product.
#Here k refers to the number of k folds, fold refers to the k fold group, pres is the presence data dataframe and bg is the background data dataframe
maxent_kfold<-function(k, pres, bg, beta_val){
evl<- list()
model<-list()
group <- kfold(pres, k)
for (i in 1:k) {
test <- pres[group == i,]
train_pres <- pres[group != i, ]
train<-rbind(train_pres, bg)
code<-c(rep(1, nrow(train_pres)), rep(0, nrow(bg)))
model[[i]] <- maxent(x=train[,3:ncol(train)], p=code, args=c('linear=true', 'quadratic=true', 'product=true',
'threshold=false', 'hinge=false', paste0("betamultiplier=", beta_val)))
evl[[i]] <- evaluate(p=test[,3:ncol(test)], a=bg[,3:ncol(bg)], model[[i]])
}
evl
}
#Running the k-folds evaluation on the default beta value model
beta_val<-1
maxent_kfold(5, recordsNoDups, randomBg, beta_val)
maxent_kfold(5, recordsNoDups, kdBg, beta_val)
maxent_kfold(5, recordsNoDups, kdHabBg, beta_val)
maxent_kfold(5, recordsNoDups, bufBg, beta_val)
maxent_kfold(5, recordsNoDups, bufHabBg, beta_val)
#AUC value tells you the probability that if you pick a random presence and absence, the presence has been given a higher relative occurrence rate than the absence. But what sense does it make for pseudoabsences.
#the cor values are very low - cor is the correlation coefficient for predicted value of environmental suitability for a cell and the actual status of the cell (presence or absence)
##Looking if the residuals have a spatial patterning
modelMap<-stack(randomBgMap, kdBgMap, kdHabBgMap, bufBgMap, bufHabBgMap)
names(modelMap)<-c("randomBg", "kdBg", "kdHabBg", "bufBg", "bufHabBg")
for(i in 1:nlayers(modelMap)){
predict.pres<-raster::extract(modelMap[[i]], recordsNoDups[,1:2])
residual<-1-predict.pres
predict.pres<-cbind(recordsNoDups[,1:2], residual)
mycol<-colorRampPalette(c("blue", "red"))
jpeg(file = paste0("results/undulata/", names(modelMap)[i], "Res.jpeg"))
plot(modelMap[[i]], main="Residuals - Model")
points(predict.pres, pch=19, col=mycol(10), cex=0.5)
dev.off()
}
##############################################################
names(predictors)
|
aabf1f8d82d403fa0036163ae16415af6608394c
|
2199f4d22db7c73977a0a90f8f9cae390461c55f
|
/capstone_wrangling.R
|
470030db59e90f9301970b586994eeaf3be329ae
|
[] |
no_license
|
demetri77/springboard-capstone
|
ae759fbd8bf7fa6f0c8da581a4cbae39e05cd794
|
152d46e4a408504cd1e5a1c505605e16ff2eb4ac
|
refs/heads/master
| 2020-04-02T12:13:38.435028
| 2019-04-20T03:53:50
| 2019-04-20T03:53:50
| 154,424,335
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,916
|
r
|
capstone_wrangling.R
|
library(tidyverse)
## Read in file
#bankfull <- read.table("bank-additional-full.csv",header=TRUE,sep=";")
#bankfull <- read_delim("bank-additional-full.csv", delim = ";", col_types = "iccccccccciiiicdddddc")
#bankfull <- read_delim("bank-additional.csv", delim = ";", col_types = "iccccccccciiiicdddddc")
bankfull <- read_delim("bank-additional.csv", delim = ";", col_types = "iffcffffffiiiicdddddc")
#str(bankfull)
bankfull <- rename(bankfull, edu_lvl = "education")
bankfull <- rename(bankfull, cred_def = "default")
bankfull <- rename(bankfull, duration_sec = "duration")
bankfull <- rename(bankfull, contact_typ = "contact")
bankfull <- rename(bankfull, contact_cnt = "campaign")
bankfull <- rename(bankfull, days_passed = "pdays")
bankfull <- rename(bankfull, prev_contact_cnt = "previous")
bankfull <- rename(bankfull, prev_outcome = "poutcome")
bankfull <- rename(bankfull, emp_var_rate = "emp.var.rate")
bankfull <- rename(bankfull, cons_price_idx = "cons.price.idx")
bankfull <- rename(bankfull, cons_conf_idx = "cons.conf.idx")
bankfull <- rename(bankfull, num_employed = "nr.employed")
bankfull <- rename(bankfull, subscribed = y)
mutate(bankfull, duration_sec == "0")
etemp <- bankfull$edu_lvl
bankfull$edu_lvl <- gsub("\\.", "_", etemp)
bankfull$edu_lvl <- as.factor(bankfull$edu_lvl)
#is.factor(bankfull$subscribed)
bankfull$subscribed <- as.factor(bankfull$subscribed)
bankfull$prev_outcome <- as.factor(bankfull$prev_outcome)
glimpse(bankfull)
summary(bankfull)
#colnames(bankfull)
#tally(bankfull, subscribed == "0")
#tally(bankfull, subscribed == "1")
summary(bankfull$subscribed)
ggplot(bankfull, aes(x = subscribed)) + geom_bar()
ggplot(bankfull, aes(x = prev_outcome)) + geom_bar()
bankfull$prev_outcome[bankfull$prev_outcome == "nonexistent"] <- NA
xtabs(~prev_outcome+subscribed, bankfull)
plot(xtabs(~prev_outcome+subscribed, bankfull))
bankfull %>% mutate(duration_min = duration_sec/60)
summary(bankfull$duration_min)
boxplot(bankfull$duration_min)
#bankfull %>% select(edu_lvl, job) %>% group_by(edu_lvl, job) %>% arrange(desc(edu_lvl)) %>% slice(1:3)
bankfull %>% group_by(edu_lvl) %>% summarise(count = n()) %>% arrange(desc(count))
bankfull %>% group_by(job) %>% summarise(count = n()) %>% arrange(desc(count))
ggplot(bankfull, aes(x = euribor3m, y = cons.price.idx)) + geom_point()
ggplot(bankfull, aes(y = euribor3m, x = subscribed)) + geom_point
ggplot(data=bankfull, aes(x=edu_lvl, fill=subscribed)) + geom_bar()
## filter, select, arrange, mutate, summarise, group_by
## Variable selection
ggplot(bankfull, aes(x = job)) + geom_histogram(stat="count")
ggplot(bankfull, aes(x = job, fill=marital)) + geom_histogram(stat="count")
ggplot(bankfull, aes(x = job, fill=edu_lvl)) + geom_histogram(stat="count")
ggplot(bankfull, aes(x = edu_lvl, fill=subscribed)) + geom_histogram(stat="count")
ggplot(bankfull, aes(x = job, fill=subscribed)) + geom_histogram(stat="count")
ggplot(bankfull, aes(x=euribor3m, fill=subscribed)) + geom_bar(stat="count")
#weekdays <- c("Mon", "Tue", "Wed", "Thu", "Fri")
#Mon=0, Tue=1, ...
#group_by(group_by())
#dailyTally <- tally(bankfull, day_of_week == "mon")
#dailyTally[1] <- tally(bankfull, day_of_week == "tue")
#dailyTally[2] <- tally(bankfull, day_of_week == "wed")
#dailyTally[3] <- tally(bankfull, day_of_week == "thu")
#dailyTally[4] <- tally(bankfull, day_of_week == "fri")
#tibble(weekdays, dailyTally)
#groupby
ggplot(bankfull, aes(x=days_of_week)) + geom_bar()
# Outcome of previous campaign
ggplot(bankfull, aes(x = poutcome)) + geom_bar()
medCPI <- median(bankfull$cons_price_idx)
avgCPI <- mean(bankfull$cons_price_idx)
ggplot(bankfull, aes(x = marital, y = balance)) + geom_point()
ggplot(bankfull, aes(x = marital)) + geom_bar()
subY <- filter(bankfull, subscribed = "yes")
ggplot(subY, aes(x = duration, )) + geom_bar()
ggplot(bankfull, aes(x=duration, y=subscribed)) + geom_point()
ggplot(bankfull, aes()) + facet_grid()
ggplot(bankfull, aes(x = contact_typ, fill = subscribed)) + geom_bar()
edu_tmp <- as.factor(bankfull$edu_lvl)
plot(edu_tmp)
ggplot(edu_tmp, aes(x = edu_lvl)) + geom_bar()
model <- glm(subscribed ~ ., data = bankfull, family = binomial)
summary(model)
ggplot(bankfull, aes(x = age, fill = subscribed)) +
geom_histogram(binwidth = 10) +
labs(title = "Age distrubtion of current bank clients and those who subscribed")
library(scales)
ggplot(bankfull, aes(x = age, fill = subscribed)) +
geom_histogram(binwidth = 10, aes(y=(..count../sum(..count..)))) +
scale_y_continuous(scale::percent) +
labs(title = "Age distrubtion of current bank clients and those who subscribed")
ggplot(bankfull, aes(job, fill=subscribed)) +
geom_bar(aes(y = (..count..)/sum(..count..))) +
scale_y_continuous(labels=scales::percent) +
ylab("relative frequencies")
ggplot(bankfull, aes(euribor3m, emp_var_rate)) + geom_boxplot()
ggplot(bankfull, aes(day_of_week, )) + geom_boxplot()
ggplot(bankfull, aes(x=prev_outcome, y=(duration_sec/60)) + geom_boxplot()
# ------------------------------
# ggpairs
# ------------------------------
library(GGally)
b_client <- bankfull[, c(1:7)]
b_client <- select(bankfull, age:loan)
ggpairs(b_client)
b_credit <- bankfull[, c("default", "housing", "loan")]
b_credit <- bankfull[, c("cred_default", "mortgage", "loan")]
ggpairs(b_credit)
b_soceco <- select(bankfull, emp.var.rate:euribor3m)
b_soceco <- select(bankfull, emp_var_rate:euribor3m)
ggpairs(b_soceco)
# ------------------------------
# summarytools
# ------------------------------
library(summarytools)
# Data Frame Summaries
dfSummary(bankfull, plain.ascii=FALSE, style="grid")
dfSummary(b_client)
# Frequency
freq(bankfull$age)
freq(bankfull$loan)
# Cross-Tabulation
ctable(bankfull$edu_lvl, bankfull$subscribed)
ctable(bankfull$job, bankfull$y)
# Descriptive Univariate Stats
descr(bankfull)
# Write/Export dataset
write_csv(bankfull, path="bankfull_clean.csv")
|
22e630605d71df33909a759a8da947671c4294f7
|
50a7414f761472e051053dc5d0f12bc04da43772
|
/ehcs/unitTests/casesCreationTests.R
|
6d09ed3f2e605f4df053348021a67e4f24966b5b
|
[] |
no_license
|
cse-bristol/national-household-model-stock-files-creator
|
bef7c41f279938d3f767e6e66966a62610dfe013
|
5d777d5390b670da57b064d1c883b1c22404cb6d
|
refs/heads/master
| 2021-01-18T01:08:02.660088
| 2018-05-10T16:16:08
| 2018-05-10T16:16:08
| 65,458,879
| 1
| 1
| null | 2017-08-11T14:11:01
| 2016-08-11T09:54:41
|
R
|
UTF-8
|
R
| false
| false
| 2,431
|
r
|
casesCreationTests.R
|
library(RUnit)
source("~/software-projects/r-nhm-stock-creator/ehcs/cases.R", chdir=T);
test.floor.construction <- function(){
checkEquals("SOLID", groundfloor.construction.type(NA,NA))
checkEquals("SOLID", groundfloor.construction.type("Yes",NA))
checkEquals("SOLID", groundfloor.construction.type("Yes","Yes"))
checkEquals("SOLID", groundfloor.construction.type(NA,"Yes"))
checkEquals("SUSPENDED", groundfloor.construction.type("No",NA))
checkEquals("SUSPENDED", groundfloor.construction.type(NA,"No"))
checkEquals("SUSPENDED", groundfloor.construction.type("Yes","No"))
checkEquals("SUSPENDED", groundfloor.construction.type("No","Yes"))
}
test.occupant.counts <- function(){
checkEquals(0, occupant.counts(NA,NA)$adults)
checkEquals(0, occupant.counts(NA,NA)$children)
checkEquals(1, occupant.counts(1,0)$adults)
checkEquals(0, occupant.counts(0,1)$adults)
checkEquals(0, occupant.counts(1,0)$children)
checkEquals(1, occupant.counts(0,1)$children)
}
test.livingroom.data <- function(){
checkEquals(0, cal.livingarea.data(1,4,7,0)$livingAreaFaction)
checkEquals(0, cal.livingarea.data(1,88.8,7,100)$livingAreaFaction)
checkEquals(0, cal.livingarea.data(1,1,88.8,100)$livingAreaFaction)
checkEquals(0.28, cal.livingarea.data(1,4,7,100)$livingAreaFaction)
# Test some null values - would expect 0's
checkEquals(0, cal.livingarea.data(1,NA,NA,100)$livingAreaFaction)
}
test.plot.dimensions.backplot <- function() {
#widthOfPlot, doesFrontPlotExist, doesBackPlotExist, depthOfBackPlot
checkEquals(10, calc.plot.dimensions(10, NA, "Yes", 10, 0)$backplotDepth)
checkEquals(20, calc.plot.dimensions(20, NA, "Yes", 10, 0)$backplotWidth)
checkEquals(100, calc.plot.dimensions(10, NA, "Yes", 10, 0)$backplotArea)
checkEquals(0, calc.plot.dimensions(10, NA, "No", 10, 0)$backplotDepth)
checkEquals(0, calc.plot.dimensions(10, NA, "No", 10, 0)$backplotArea)
}
test.plot.dimensions.frontplot <- function() {
#widthOfPlot, doesFrontPlotExist, doesBackPlotExist, depthOfBackPlot
checkEquals(10, calc.plot.dimensions(10, "Yes", NA, 0, 10)$frontplotDepth)
checkEquals(20, calc.plot.dimensions(20, "Yes", NA, 0, 10)$frontplotWidth)
checkEquals(100, calc.plot.dimensions(10, "Yes", NA, 0, 10)$frontplotArea)
checkEquals(0, calc.plot.dimensions(10, "No", NA, 0, 10)$frontplotDepth)
checkEquals(0, calc.plot.dimensions(10, "No", NA, 0, 10)$frontplotArea)
}
|
1f177865dfd4617a872c28ff6f247a926f0266ae
|
73e778bb056fcf84bd6b062d62d14e3abdfb5ea7
|
/模拟试验/信号强度变化试验_低维.R
|
3261aabf45c9921ddc7310645b456a9c49747198
|
[] |
no_license
|
HDangDang/PLasso
|
77ad8b674c412556359ed95b56dc156726b96f4e
|
651b1bd3b4aed7ad1ffe0f62821927acf6f7e154
|
refs/heads/master
| 2021-05-22T02:41:34.107495
| 2020-04-04T08:02:21
| 2020-04-04T08:02:21
| 252,935,514
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,245
|
r
|
信号强度变化试验_低维.R
|
library(ggplot2)
####v50,snr1####
simu1<-simulate(train=100,test=50,ptrain=100,
v=50,betaper=0.1,
snr=1,
betamu=0,betasigma2=10,cons=10,
rho=0,
experiment=30)
result1<-data.frame(Method=c("Lasso","PLasso"),
set=1,
VariableNum=c(mean(simu$lassonumber),mean(simu$lnpnumber)),
RealInclu=c(mean(simu$lassotrue/simu$truenumber),
mean(simu$lnptrue/simu$truenumber)),
FakeInclu=c(
mean((v-betanumber-simu$lassonumber+simu$lassotrue)/(v-betanumber)),
mean((v-betanumber-simu$lnpnumber+simu$lnptrue)/(v-betanumber))),
Bias2=c(mean(simu$lassobeta2),mean(simu$lnpbeta2)),
Pre2=c(mean(simu$lassopre2),mean(simu$lnppre2)))
resultall<-result1
####v50,snr=5####
simu5<-simulate(train=100,test=50,ptrain=100,
v=50,betaper=0.1,
snr=5,
betamu=0,betasigma2=10,cons=10,
rho=0.3,
experiment=30)
result3<-data.frame(Method=c("Lasso","PLasso"),
set=5,
VariableNum=c(mean(simu$lassonumber),mean(simu$lnpnumber)),
RealInclu=c(mean(simu$lassotrue/simu$truenumber),
mean(simu$lnptrue/simu$truenumber)),
FakeInclu=c(
mean((v-betanumber-simu$lassonumber+simu$lassotrue)/(v-betanumber)),
mean((v-betanumber-simu$lnpnumber+simu$lnptrue)/(v-betanumber))),
Bias2=c(mean(simu$lassobeta2),mean(simu$lnpbeta2)),
Pre2=c(mean(simu$lassopre2),mean(simu$lnppre2)))
resultall<-rbind(resultall,result3)
####v50,snr=10####
simu10<-simulate(train=100,test=50,ptrain=100,
v=50,betaper=0.1,
snr=10,
betamu=0,betasigma2=10,cons=10,
rho=0.3,
experiment=30)
result10<-data.frame(Method=c("Lasso","PLasso"),
set=10,
VariableNum=c(mean(simu$lassonumber),mean(simu$lnpnumber)),
RealInclu=c(mean(simu$lassotrue/simu$truenumber),
mean(simu$lnptrue/simu$truenumber)),
FakeInclu=c(
mean((v-betanumber-simu$lassonumber+simu$lassotrue)/(v-betanumber)),
mean((v-betanumber-simu$lnpnumber+simu$lnptrue)/(v-betanumber))),
Bias2=c(mean(simu$lassobeta2),mean(simu$lnpbeta2)),
Pre2=c(mean(simu$lassopre2),mean(simu$lnppre2)))
resultall<-rbind(resultall,result10)
####v50绘图####
data<-resultall
data$set<-as.factor(data$set)
variable<-data[,c(1,2,3)]
realInclu<-data[,c(1,2,4)]
fakeInclu<-data[,c(1,2,5)]
Bias2<-data[,c(1,2,6)]
Pre2<-data[,c(1,2,7)]
p1<-ggplot(data=realInclu,aes(x=set,y=RealInclu,group=Method)) +
geom_line(aes(colour=Method)) +
geom_point(size=6,aes(shape=Method,colour=Method)) +
xlab("snr")+ylab("正确的真实变量比例")+
theme(legend.position = "top")
p2<-ggplot(data=fakeInclu,aes(x=set,y=FakeInclu,group=Method)) +
geom_line(aes(colour=Method)) +
geom_point(size=6,aes(shape=Method,colour=Method)) +
xlab("snr")+ylab("正确的无关变量比例")+
theme(legend.position = "top")
p3<-ggplot(data=Bias2,aes(x=set,y=Bias2,group=Method)) +
geom_line(aes(colour=Method)) +
geom_point(size=6,aes(shape=Method,colour=Method)) +
xlab("snr")+ylab("平均系数估计偏差平方")+
theme(legend.position = "top")
p4<-ggplot(data=Pre2,aes(x=set,y=Pre2,group=Method)) +
geom_line(aes(colour=Method)) +
geom_point(size=6,aes(shape=Method,colour=Method)) +
xlab("snr")+ylab("平均预测误差平方")+
theme(legend.position = "top")
p5<-ggplot(data=variable,aes(x=set,y=VariableNum,group=Method)) +
geom_line(aes(colour=Method)) +
geom_point(size=6,aes(shape=Method,colour=Method)) +
xlab("snr")+ylab("筛选变量个数")+
theme(legend.position = "top")
p1
p2
p3
p4
p5
|
9f9a50f30214d09d4f64cd6496f56a3c6677210e
|
6f6b4b068726c0e7f241b9682c2cf412804218d6
|
/dataset_testing/food_consumption.R
|
15be62e3d4d31ba5f63ed9c20f994dff58de3f90
|
[] |
no_license
|
calvin-munson/R-DataScience-workshops
|
7aeb10b87ec8ebfcc85867ccb3af21b5e6524323
|
3f6f85ff73d2e6c7b14beb845f2c67109f5d9ec6
|
refs/heads/master
| 2023-09-01T17:35:32.902792
| 2023-08-29T21:06:04
| 2023-08-29T21:06:04
| 286,129,289
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,918
|
r
|
food_consumption.R
|
##
##
## Workshop #TBD: Food Consumption!
##
## Objective: TBD
##
## Authors: Calvin J. Munson
##
## Date: TBD
##
##
##
# 1. Read in the data -----------------------------------------------------
food_consumption <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-02-18/food_consumption.csv')
# 2. Explore the data -----------------------------------------------------
food_consumption
## What values do we have in each column?
# Country
food_consumption$country %>% unique()
# Food category
food_consumption$food_category %>% unique()
# Distribution of consumption (kg/person/year)
food_consumption %>%
ggplot(aes(consumption)) +
geom_histogram(bins = 100)
# Distribution of emissions (Kg/person/year)
food_consumption %>%
ggplot(aes(co2_emmission)) +
geom_histogram(bins = 100)
# 3. Calculate kg of emissions per kg of consumption ----------------------
food_emissions <- food_consumption %>%
mutate(co2_per_consumption = co2_emmission/consumption)
# 3. What food categories lead to the most emissions? ---------------------
food_emissions %>%
ggplot(aes(food_category, co2_per_consumption)) +
geom_boxplot() +
coord_flip()
food_emissions %>%
ggplot(aes(co2_per_consumption, fill = ..x..)) +
geom_histogram() +
facet_wrap(food_category ~., scales = "free")
# 4. Consumption vs emissions ---------------------------------------------
## Is there a relationship between the consumption of an item and its emissions for a given country?
# First, only x and y
food_consumption %>%
ggplot(aes(consumption, co2_emmission)) +
geom_point()
# What could be driving these very specific changes??
# Add color to food category!
food_consumption %>%
ggplot(aes(consumption, co2_emmission, color = food_category)) +
geom_point()
# Introduce facet_wrap
food_consumption %>%
ggplot(aes(consumption, co2_emmission, color = food_category)) +
geom_point() +
facet_wrap(food_category ~.)
# Introduce freeing the scales
food_consumption %>%
ggplot(aes(consumption, co2_emmission, color = food_category)) +
geom_point() +
facet_wrap(food_category ~., scales = "free")
# X. Fancy graph exploration ----------------------------------------------
food_consumption %>%
ggplot(aes(x = food_category, y = consumption, fill = consumption)) +
geom_jitter(width = .15, size = 3, shape = 21, alpha = .6) +
coord_flip() +
scale_fill_gradient2(low = "red4", mid = "white",
high = "blue3", midpoint = 50) +
theme_minimal() +
theme(panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_blank(),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
axis.title.y = element_text(size = 12),
axis.title.x = element_text(size = 12),
legend.position = "none")
|
d06ba2527725cb961f2022f9738bbc66dbeb0305
|
f9b2ad8b55c0bc6cc38055641c5f3c95faf9c6c7
|
/R/put_bucket.R
|
2b05609ccddb34c4a08cf97a386853bd09bf8dfa
|
[] |
no_license
|
nagdevAmruthnath/minio.s3
|
e488e8ef54f50253dad9ea9e6ad4285338a5b448
|
3345f005cec34c456d144e020bff34914c327636
|
refs/heads/master
| 2023-08-08T14:47:32.407089
| 2021-11-11T21:34:19
| 2021-11-11T21:34:19
| 217,528,395
| 13
| 7
| null | 2023-07-14T13:16:03
| 2019-10-25T12:25:34
|
R
|
UTF-8
|
R
| false
| false
| 2,570
|
r
|
put_bucket.R
|
#' @title Create bucket
#' @description Creates a new S3 bucket.
#' @template bucket
#' @template acl
#' @param headers List of request headers for the REST call.
#' @param use_https True if connection is HTTPS and False if connection is HTTP
#' @template dots
#' @return \code{TRUE} if successful.
#' @details Bucket policies regulate who has what access to a bucket and its contents. The \code{header} argument can beused to specify \dQuote{canned} policies and \code{\link{put_bucket_policy}} can be used to specify a more complex policy. The \href{https://awspolicygen.s3.amazonaws.com/policygen.html}{AWS Policy Generator} can be useful for creating the appropriate JSON policy structure.
#' @examples
#' \dontrun{
#' put_bucket("examplebucket")
#'
#' # set a "canned" ACL to, e.g., make bucket publicly readable
#' put_bucket("examplebucket", headers = list(`x-amz-acl` = "public-read"), use_https=F)
#'
#' }
#' @references
#' \href{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html}{API Documentation}
#' \href{https://awspolicygen.s3.amazonaws.com/policygen.html}{AWS Policy Generator}
#' @seealso \code{\link{bucketlist}}, \code{\link{get_bucket}}, \code{\link{delete_bucket}}, \code{\link{put_object}}
#' @export
put_bucket <-
function(bucket,
acl = c("private", "public-read", "public-read-write",
"aws-exec-read", "authenticated-read",
"bucket-owner-read", "bucket-owner-full-control"),
headers = list(),
use_https = FALSE,
...){
region = Sys.getenv("AWS_DEFAULT_REGION")
b <- paste0('<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><LocationConstraint>',
region, '</LocationConstraint></CreateBucketConfiguration>')
r <- s3HTTP(verb = "PUT",
bucket = bucket,
request_body = b,
headers = headers,
check_region = FALSE,
encode = "raw",
write_disk = NULL,
accelerate = FALSE,
dualstack = FALSE,
parse_response = TRUE,
url_style = c("path", "virtual"),
base_url = Sys.getenv("AWS_S3_ENDPOINT"),
verbose = getOption("verbose", FALSE),
region = Sys.getenv("AWS_DEFAULT_REGION"),
key = Sys.getenv("AWS_ACCESS_KEY_ID"),
secret = Sys.getenv("AWS_SECRET_ACCESS_KEY"),
session_token = NULL,
use_https = use_https)
return(TRUE)
}
|
8c58318c07d9213db554225107b1fff9292919de
|
b34820260d5e39442cf6f07be731fcc5bf2b93e7
|
/R/graphs.R
|
0ac3860bf3e705636f08da4e757ca6b76a113b6d
|
[] |
no_license
|
KBillyPush/-
|
fa093606d4e6b28836b317bce5ad918b6b62c9ef
|
39354a6472c9516d96ff4791a42cca3ec0a53210
|
refs/heads/master
| 2021-01-01T17:14:32.403760
| 2017-07-22T20:35:09
| 2017-07-22T20:35:09
| 98,032,151
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,819
|
r
|
graphs.R
|
install.packages('UScensus2010')
library(UScensus2010)
library(scales)
library(rvest)
library(readxl)
edu <- read_excel('dobis.xlsx')
url <- 'http://www.governing.com/topics/urban/gov-majority-minority-populations-in-states.html'
pop <- url %>%
read_html %>%
html_nodes(xpath = '//*[@id="inputdata"]') %>%
html_table %>%
as.data.frame(stringsAsFactors = F) %>%
mutate_at(vars(-State), parse_number)
pop <- url %>%
read_html %>%
html_nodes(xpath = '/html/body/div[2]/div/div/section/div/div[2]/div[2]/div[1]/div[1]/div/div[11]/div/div/div/div/table') %>%
html_table %>%
as.data.frame(stringsAsFactors = F) %>%
mutate_at(vars(-State), parse_number)
names(pop) <- c('state', 'minority_share', 'white', 'hispanic', 'black', 'asian')
efficiencies %>%
left_join(edu, by = c('name' = 'state')) %>%
mutate(party = ifelse(efficiency < 0, 'Districting benefits Republicans', 'Districting benefits Democrats'),
tooltip = name) %>%
hchart(hcaes(x = pct_grads_10, y = efficiency, group = party), type = 'scatter') %>%
hc_tooltip(formatter = JS(paste0("function() {return this.point.tooltip;}")), useHTML = T) %>%
hc_colors(c('blue', 'red')) %>%
hc_title(text = 'Party efficiency gap is correlated with education') %>%
hc_yAxis(title = list(text = 'Effeciency Gap, 2012 Presidential Election')) %>%
hc_xAxis(title = list(text = 'Percentage of Adults aged 25-34 with a Postsecondary Degree, 2010')) %>%
hc_credits(
enabled = TRUE,
text = "Sources: Deparment of Education and Harvard Dataverse",
href = "https://www.ed.gov/news/press-releases/new-state-state-college-attainment-numbers-show-progress-toward-2020-goal") %>%
hc_add_theme(hc_theme_538()) %>%
saveWidget(file = 'graph.html')
|
225234d94b3893428bfe824e2a098db38c293945
|
94399d381681873b8fd660130ce66a48d76a482e
|
/man/make_docker_names.Rd
|
087567d775270be10ba112d69b6085eaca5bbe9a
|
[
"Apache-2.0"
] |
permissive
|
mikemahoney218/proceduralnames
|
ad890cf9de372f815ae182f6efbd1fc9e17cd32a
|
b4557f8b42f5c7c966c33cf81e0892b88ee72779
|
refs/heads/main
| 2023-05-23T18:36:59.277295
| 2022-08-11T14:19:15
| 2022-08-11T14:19:15
| 322,067,273
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 831
|
rd
|
make_docker_names.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_names.R
\name{make_docker_names}
\alias{make_docker_names}
\title{Generates a random name from the list of Docker adjectives and surnames.}
\usage{
make_docker_names(n, retry = FALSE, sep = "_")
}
\arguments{
\item{n}{The number of random names to be generated.}
\item{retry}{If `TRUE`, a random integer between 1 and 10 is appended to each
generated name.}
\item{sep}{A character string to separate the terms. Not `NA_character_`.}
}
\value{
A random name formatted as "adjective_surname" (for example,
"focused_turing").
}
\description{
This function generates `n` random names, each combining a single adjective
from [docker_adjectives] and a surname from [docker_names].
}
\examples{
make_docker_names(1)
make_docker_names(2, retry = TRUE)
}
|
f44f1686abf9b78ed22d5ce562a69c910a5c85b2
|
424a109c5f16ab0417c7f9ecc4fded3c0f38ae14
|
/utils/sql_utils.r
|
6a8ceb881487e36191827303093b1ad0989ba74f
|
[] |
no_license
|
adrianalbert/EnergyAnalytics
|
f784aca1e549be96b865db89f2190d2dd7566f83
|
39a5d5a6ee05a643ab723d4ef8d864282870cec8
|
refs/heads/master
| 2020-05-29T08:51:31.888860
| 2016-03-21T15:03:47
| 2016-03-21T15:03:47
| 7,062,053
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,639
|
r
|
sql_utils.r
|
# sql_utils.r
#
# Utility functions for accessing data off MySQL database.
#
# Adrian Albert
#
# Last modified: November 2012.
# --------------------------------------------------------
# Functions to perform query on database
# -------------------------------------
require(RMySQL)
# open MySQL connection
db.conn = function(dbname = 'pge_res', verbose=F, user = 'adrian', password = 'xmenxmen') {
if (verbose) cat('Connecting to PGE weather database...')
db.con <- dbConnect(dbDriver("MySQL"),
host = "sssl-cyclops.stanford.edu",
user = user, password = password,
dbname = dbname)
return (db.con)
}
# Test:
# db_cons = open.db.conn('PGE_SAM', user = 'adalbert', password = 'adrian')
# perform query
run.query = function(query,db='pge_res', verbose=FALSE, user = 'adrian', password = 'xmenxmen') {
if (verbose) cat(paste('Performing query:',query))
data <- c()
tryCatch({
con <- open.db.conn(db, verbose = verbose, user = user, password = password)
res <- dbGetQuery(con, query)
if(length(res)>0) data <- res
},
error = function(e) {print(e)},
finally = {
# close the results set if necessary
resultSet <- dbListResults(con)
if(length(resultSet)>0) {
dbClearResult(resultSet[[1]])
rm(resultSet)
}
dbDisconnect(con)
rm(con)
} )
return(data)
}
# # Test:
# raw_data = run.query("select * from pge_res_final3_unique LIMIT 0,1000")
# raw_data = subset(raw_data, PER_ID == 8420562867)
|
1112fa272387a9c847508b3b190ae3bbc25d19e5
|
2b837f06f5b756dd9da76f613c5d58308a01f828
|
/man/lagInfluDataFHI_flyttet.Rd
|
1bf04a44b4170e6c9c08bb441c39ea8d4a425842
|
[
"MIT"
] |
permissive
|
Rapporteket/korona
|
f9f495af8a9aeeb092c71ff5276c384cf6b8e719
|
b54d69883d30405a98ba133fd585a47fb6cfae96
|
refs/heads/rel
| 2023-07-07T18:41:47.818867
| 2023-06-27T10:21:20
| 2023-06-27T10:21:20
| 250,873,426
| 0
| 0
|
NOASSERTION
| 2022-03-09T15:52:29
| 2020-03-28T19:09:35
|
R
|
UTF-8
|
R
| false
| true
| 347
|
rd
|
lagInfluDataFHI_flyttet.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/InfluensadataFHI.R
\name{lagInfluDataFHI_flyttet}
\alias{lagInfluDataFHI_flyttet}
\title{Henter data og velger variabler for overføring til FHI}
\usage{
lagInfluDataFHI_flyttet()
}
\value{
data
}
\description{
Henter data og velger variabler for overføring til FHI
}
|
70d61b00e975325d2bc838640ad954a0647a5fb3
|
fcd75cb3da22da9651e877950cc7dc96bdb1369c
|
/OTHERS/r-scripts/rq2-fireplace.R
|
5eaa26ca9eabb6fef9caf5288d5d8b261648e0e4
|
[] |
no_license
|
adini121/install-scripts
|
4be62db18cabcdf2a722f6e08a09967c15c5a152
|
c86f5f33cf701d94f0613ea5bb4ccf572978cec7
|
refs/heads/master
| 2021-03-27T10:41:59.683556
| 2016-03-16T02:53:30
| 2016-03-16T02:53:30
| 40,886,552
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,517
|
r
|
rq2-fireplace.R
|
library(lars)
# load data from csv file
fireplaceData <- read.csv('/Users/adityanisal/Dropbox/ExtractedResultFiles/CSV/combined-fireplace.csv',header=TRUE, sep=",")
# display data
fireplaceData
# remove "null" columns (columns with all 0s)
fireplaceDataNonZero <- fireplaceData[,colSums(fireplaceData) !=0]
fireplaceDataNonZero
# Take all columns (features) except the last solutionVector
fireplace_x_var <- fireplaceDataNonZero[,c(1:11)]
fireplace_x_var
# solution vector
fireplace_y_var <- fireplaceDataNonZero[,12]
fireplace_y_var
#normalize with mean =0 and sd=1
fireplace_fin_x = scale(fireplace_x_var,center = TRUE,scale = TRUE)
fireplace_fin_x
names(fireplace_x_var)
# create lasso model using lars
fireplace_fit <- lars(fireplace_fin_x,fireplace_y_var,type = "lasso")
# show steps
fireplace_fit
# show standardized coefficients
coef(fireplace_fit)
# plot lasso path
fireplacemyplot.lars <- edit(plot.lars)
dev.new()
fireplacemyplot.lars(fireplace_fit,breaks = FALSE,lty=1,cex.lab=0.9,cex.axis=0.7,ylim = c(-1.5,1))
# text(fireplace_fin_x,fireplace_y_var)
fireplacelables <- names(fireplace_x_var)
fireplacelab = paste(col=1:length(fireplacelables),sep = "-",fireplacelables)
fireplacelab
# legend('topleft', legend=fireplacelables,legend=(col=1:length(fireplacelables)), col=1:length(fireplacelables),cex=0.7, lty=1,bty = "n")
legend('topleft', title="Robustness Factors",pt.lwd = 2,pt.cex = 2,legend=fireplacelab, col=1:length(fireplacelab),cex=0.8, lty=1,bty = "n")
fireplacelables
length(fireplacelables)
|
045ee4683dfd17aeae17722c43f2d0a7d594eb8c
|
1f9ea9bb2b10b7dc3a65b5f1824fd9deafa803ab
|
/cachematrix.R
|
d21f38605392d1025d7c0a5f7e9ed35b9c8c9f14
|
[] |
no_license
|
tomzisis/ProgrammingAssignment2
|
e9d213a382cf21fde7c9738e7fe8a6bcd320b802
|
eb9b59436cc584e29f890e157799e48575886ab1
|
refs/heads/master
| 2020-12-26T13:04:54.761916
| 2015-07-26T20:21:41
| 2015-07-26T20:21:41
| 34,408,728
| 0
| 0
| null | 2015-04-22T18:39:40
| 2015-04-22T18:39:39
| null |
UTF-8
|
R
| false
| false
| 1,373
|
r
|
cachematrix.R
|
## The code below creates a special matrix object and then calculates
## and caches its inverse matrix
## The makeCacheMatrix function creates a special "matrix" with the form
## of a list , which stores the value of a matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y){
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(solve) i <<- solve
getinverse <- function() i
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse )
}
## The cacheSolve function computes the inverse of the "matrix" created
## from the previous function.In case of an inverse matrix that has been
## already calculated , it gets the cached inverse without computation.
## Otherwise, it computes the inverse of the matrix and stores it in the
## cache via the setinverse function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached matrix")
return(i)
}
mat <- x$get()
i <- solve(mat,...)
x$setinverse(i)
i
}
|
241edfa1024ac56a3d02b5bca10718b9556ee44c
|
3830551a6c5213a309e9d4aa40fd54131c5fdcbb
|
/tests/testthat/test-flag_item.R
|
576ee28ac71cf24effd5accbf2442114a2c63cab
|
[
"MIT"
] |
permissive
|
b-rodrigues/paint
|
765df57b266bdc078c9be3da19a44548f566a630
|
163e333d0ce785b797ea57389176e037817fa24f
|
refs/heads/master
| 2023-07-09T22:00:49.768581
| 2021-08-08T10:56:54
| 2021-08-08T10:58:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 490
|
r
|
test-flag_item.R
|
test_that("flag_item", {
rlang::with_options(
cli.num_colors = 256,
paint_n_rows = NULL,
paint_max_width = NULL,
paint_palette = NULL,
paint_align_row_head = NULL,
paint_dark_mode = NULL,
.expr = {
expect_error(flag_item(c(1,2,NA)), regexp = "You gave me an item of length > 1 to flag")
expect_true(flag_item(NULL))
expect_false(flag_item(character(0)))
expect_true(flag_item(Inf))
expect_true(flag_item(NA))
expect_true(flag_item(NaN))
})
})
|
1d8c2ab7042c819e8b4415e4f84e8dde1c623a2c
|
10e6895fffcba7bc2fd7c8bc1bd90f5bf1321637
|
/Homework5.R
|
70f50bca0394485d75e6d1235d6acaad32757053
|
[] |
no_license
|
alpallion/STAT-3355-Homeworks
|
3e271ed2ba23651cc4ef2b3d5f486fa788fb9ce9
|
f044aa54a68372775a24e524dfd14221d03782d9
|
refs/heads/master
| 2021-03-05T10:55:43.566706
| 2020-04-13T17:57:32
| 2020-04-13T17:57:32
| 246,116,941
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,157
|
r
|
Homework5.R
|
# Install the package if you never did
install.packages("ggplot2")
# Load the pacakge
library(ggplot2)
# Load the mpg dataset
data("diamonds")
#PROBLEM 1
#the data is skewed left and not noise. This means that more bin numbers better represent the datas trend. That and
#Rices Rule is relativly simple
k <- ceiling(2*(length(diamonds$carat)^(1/3)))
#(a)
#There are more lower weight diamonds than higher weight ones.
ggplot(data = diamonds, aes(x = carat)) + geom_histogram(bins = k) +
labs(x = "Carat", y = "Count", title = "Relationship Between the Amount of Diamonds Within Specific Carat Ranges")
#(b)
#The better the clarity, the more likely the diamond is to have a lower weight.
ggplot(data = diamonds) + geom_point(mapping = aes(x = carat, y = price, color = clarity))
#(c)
#The better the clarity, the more likely the diamond is to have a lower weight. The price and clarity are Sigmoid functions.
ggplot(data = diamonds) + geom_point(mapping = aes(x = carat, y = price, color = clarity), alpha = 0.3) +
scale_colour_brewer(palette = "Dark2") +
geom_smooth(mapping = aes(x = carat, y = price, color = clarity), method = 'gam') +
labs(x = "Carat", y = "Price", title = "Relationship between the Price and Carat of Specific Clarity of Diamonds") +
theme_minimal() + scale_fill_discrete(name='Clarity')
#(d)
#The better the clarity, the more likely the diamond is to have a lower weight. The price and clarity are polynomial functions.
ggplot(data = diamonds) + geom_point(mapping = aes(x=carat, y=price, color=clarity)) + facet_wrap(~clarity) +
labs(x = "Carat", y = "Price", title = "Relationship between the Price and Carat of Specific Clarity of Diamonds") +
theme_minimal() + scale_fill_discrete(name='Clarity')
#(e)
#The better quality cut, the lower the weight
ggplot(data = diamonds, mapping = aes(y = as.factor(cut), x = carat, color = cut) ) + geom_point() #4
#The better quality cut, the lower the weight
ggplot(data = diamonds, mapping = aes(y = as.factor(cut), x = carat, color = cut) ) + geom_jitter() #3
#The better quality cut, the lower the weight
ggplot(data = diamonds, mapping = aes(y = as.factor(cut), x = carat, color = cut) ) + geom_boxplot() #2
#The better quality cut, the lower the weight
ggplot(data = diamonds, mapping = aes(y = as.factor(cut), x = carat, color = cut) ) + geom_violin() #1
#PROBLEM 2
#(a)
ggplot(data = diamonds) + geom_bar(mapping = aes(x = clarity, fill = as.factor(cut)), position = 'dodge') +
labs(x = "Clarity", y = "Count", title = "Relationship between the Quanitity and Clarity of Specific Cuts of Diamonds") +
theme_minimal() +
scale_fill_discrete(name='Cut')
ggplot(data = diamonds) + geom_bar(mapping = aes(x = clarity, fill = as.factor(cut)), position = 'dodge') +
facet_grid(cut ~.) +
labs(x = "Clarity", y = "Count", title = "Relationship between the Quanitity and Clarity of Specific Cuts of Diamonds") +
theme_minimal() +
scale_fill_discrete(name='Cut')
#(b)
ggplot(data = diamonds) + geom_smooth(mapping = aes(x = carat, y = price, color = clarity), method = lm, se = FALSE)+
labs(x = "Carat", y = "Price", title = "Relationship between the Price and Carat of Specific Clarities of Diamonds") +
theme_minimal() +
scale_fill_discrete(name='Clarity')
#(c)
ggplot(data = diamonds) + geom_boxplot(mapping = aes(x = clarity, y = price, color = cut))+
labs(x = "Clarity", y = "Price", title = "Relationship between the Price and Clarity of Specific Cuts of Diamonds") +
theme_minimal() +
scale_fill_discrete(name='Cut')
#(d)
ggplot(data = subset(diamonds, round(carat) < 4)) +
geom_boxplot(mapping = aes(x = as.factor(round(carat)), y = price, color = cut))+
labs(x = "Carat", y = "Price", title = "Relationship between the Price and Carat of Specific Cuts of Diamonds") +
theme_minimal() +
scale_fill_discrete(name='Cut')
#(e)
ggplot(data = diamonds, aes(x = depth)) + geom_histogram(aes(y = ..density..), binwidth = 0.1) + facet_grid(cut ~.)+
labs(x = "Depth", y = "Density", title = "Relationship between the Density of Depth of Specific Cuts of Diamonds") +
theme_minimal() +
scale_fill_discrete(name='Cut')
|
c296dd47af9f0c355cda10918d8c4a947aae66a7
|
a883b2b1fcf12369cbb9aaa148ee9f8ff0e7c486
|
/pnps/Old/pnps.gene.R
|
13ad75b79526fd24b6527dab055299284784c53e
|
[] |
no_license
|
connor122721/Daphnia-analyses
|
a8cad181a9ef938bee789435f5c2ac6633fc67b2
|
be838edc10ad6ab97e1fe8f2155c0943ee8f7258
|
refs/heads/master
| 2023-01-28T19:44:02.428198
| 2020-12-17T18:32:33
| 2020-12-17T18:32:33
| 264,460,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,497
|
r
|
pnps.gene.R
|
# Libraries
library(data.table)
library(SeqArray)
library(foreach)
library(tidyverse)
library(SNPRelate)
library(seqinr)
# set working directory
setwd("/project/berglandlab/Karen/MappingDec2019/WithPulicaria/")
# Load meta-data file
samps <- fread("June2020/Superclones201617182019withObtusaandPulicaria_kingcorr_20200623_wmedrd.txt")
samps <- samps[Nonindependent==0][is.na(LowReadDepth)]
# Only OO clones from 2017 DBunk
samps <- samps[!clone %in% c(samps[SC%in%c("OO")][year==2017][population=="DBunk"]$clone)]
# Gene analyses
pro <- read.fasta("/project/berglandlab/Karen/genomefiles/Daphnia.proteins.aed.0.6.fasta", seqtype="AA")
pro <- data.table(gene.name=getName(pro), AA.length=getLength(pro))
pro <- pro[,splice:=tstrsplit(gene.name, "-")[[2]]]
# Load GDS file
genofile <- seqOpen("June2020/MapJune2020_ann.seq.gds")
# Good snps and chromosomes
snpFilter <- fread("June2020/finalsetsnpset01pulex_table_20200623")
# Gene pnps function
pnps.fun <- function(SC.i, sample.n) {
SC.i="A"; sample.n=15;j=1
# Filter
seqResetFilter(genofile)
seqSetFilter(genofile, sample.id=sample(samps[population%in%c("D8", "DBunk", "DCat", "Dcat", "D10")][SC==SC.i]$clone, sample.n),
variant.id=snpFilter$variant.ids)
# Extract data
snp.dt <- data.table(variant.id=seqGetData(genofile, "variant.id"),
position=seqGetData(genofile, "position"),
chr=seqGetData(genofile, "chromosome"),
alleleCount=seqAlleleCount(genofile, ref.allele=1L),
af=seqAlleleFreq(genofile, ref.allele=1L))
# Gets annotations
tmp <- seqGetData(genofile, "annotation/info/ANN")
len1 <- tmp$length
len2 <- tmp$data
snp.dt1 <- data.table(len=rep(len1, times=len1),
ann=len2,
variant.id=rep(snp.dt$variant.id, times=len1))
# Extract SNP class and genes
snp.dt1[,class:=tstrsplit(snp.dt1$ann,"\\|")[[2]]]
snp.dt1[,gene:=tstrsplit(snp.dt1$ann,"\\|")[[4]]]
snp.dt1[,feature.id:=tstrsplit(snp.dt1$ann,"\\|")[[7]]]
# Keep first row - most influential SNP class presented by SeqArray
snp.dt1 <- data.table(snp.dt1 %>% group_by(variant.id) %>%
filter(row_number() == 1) %>% select(-c(ann)))
# Merge with gene data
snp.dt1.an <- merge(snp.dt1, snp.dt, by="variant.id")
# Summarize
m <- snp.dt1.an[class %in% c("missense_variant", "synonymous_variant")][,
list(pn = sum(class=="missense_variant"),
ps = sum(class=="synonymous_variant"),
SC = SC.i, rep = j, sample.n,
variant.id, position, class,
chr, af, MAC = alleleCount),
list(gene, feature.id)]
# Final merge
m <- merge(m, pro, by.x="feature.id", by.y="gene.name")
# Return
return(m)
}
# Clones of interest
clones <- c("A", "B", "C", "D", "E", "F", "OO")
# Run pnps.out function
pnps.out <- foreach(j = 1:10, .combine="rbind") %do% {
o <- foreach(i = clones, .combine="rbind") %do% {
message(paste("Sample", i, "#", j, sep=" "))
pnps.fun(SC.i=i, sample.n=15)
}
}
# Saving pnps output
save(pnps.out, file="/project/berglandlab/connor/pnps/gene.pnps.15samp.10rep.filt.Rdata")
|
766611bfb3aa5d8381064c294f4d14ecf7a0218c
|
f4f1f07f1f3544866a58814f43d1c6821fb0a444
|
/tests/testthat/test_reifyObject.R
|
3840b805f101a3d6f89fcbf33364a7411ed3ef94
|
[] |
no_license
|
cran/wyz.code.testthat
|
b909448e0efe9111174f75990c252ddb608b69b9
|
17a3a5a478c3fd18ddd2ff2f25b7c4405772199b
|
refs/heads/master
| 2021-10-08T09:47:12.339645
| 2021-10-06T05:50:02
| 2021-10-06T05:50:02
| 200,671,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 920
|
r
|
test_reifyObject.R
|
context("reifyObject")
source(file.path(system.file(package = "wyz.code.offensiveProgramming"),
'code-samples', 'classes', 'sample-classes.R'))
obj <- list( MyEnv(),
Bu_S3(),
new('Person_RC', name = 'neonira'),
new('Person_S4', name = 'neonira'),
Wyx(1:7),
Accumulator_R6$new()
)
#print(obj)
robj <- lapply(obj, function(e) tryCatch(wyz.code.testthat:::reifyObject(e, '', ''),
error = function(e) e))
#print(obj)
test_that("reifyObject", {
expect_true(is.na(reifyObject(new.env(), '', '')))
sapply(seq_len(length(robj)), function(k) {
ock <- wyz.code.offensiveProgramming::getObjectClassNames(obj[[k]])$classname
b <- grepl(ock, as.character(robj[[k]]$to_reify))[1]
#cat('\n', k, ock, 'robj', as.character(robj[[k]]$to_reify), 'result', b, '\n')
expect_true(b)
})
})
|
1bab3bf113fe528bb787996cceb71260ad8b444c
|
d98a0e786c7df047f6bac7896fa272048e940dff
|
/Rcodes/random_graphs_diffusion.R
|
a20ba7466aaa0c442355be2499b1deb9cee51ec1
|
[] |
no_license
|
JonathanRamos/dissertation
|
5c679263588dc4eb92a6a6072e70fb4f6f1cebbb
|
db40554388aa3d8fb8158fe188b40c4f52d60d37
|
refs/heads/master
| 2020-12-25T05:18:08.997550
| 2012-06-02T22:12:39
| 2012-06-02T22:12:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,147
|
r
|
random_graphs_diffusion.R
|
require("igraph")
source("diffusion_distances.R")
random.graph <- function(data, p){
n <- nrow(data)
W <- zeros(n,n)
for(i in 1:n){
sample <- runif(n)
W[i,] = (sample <= p)
}
diag(W) <- 0
## B <- graph.adjacency(W,mode="undirected",diag=FALSE)
## plot.igraph(B, layout=layout.fruchterman.reingold,vertex.size=10)
return(W)
}
diffusion.random.graph <- function(n,p,t,iterations){
for(i in 1:iterations){
G <- erdos.renyi.game(n,p)
W <- get.adjacency(G,type="both")
diffusion.struct <- diffusion.distance(W, t)
dist <- as.matrix(diffusion.struct$dist)
kNN <- 5
for(j in 1:n){
dist.j <- dist[j,]
tmp <- sort(dist.j, index.return = TRUE)
dist[j,] <- 0
dist[j,tmp$ix[1:kNN]] <- 1
}
A <- graph.adjacency(W, mode="undirected",weighted=NULL, diag=FALSE)
B <- graph.adjacency(dist,mode="undirected",weighted=NULL,diag=FALSE)
B.all.pairs.shortest.path <- shortest.paths(B,mode="all")
inf_idx <- find(B.all.pairs.shortest.path == Inf)
B.all.pairs.shortest.path[inf_idx] <- 10000.0
idx <- find(W > 0)
tmp <- table(B.all.pairs.shortest.path[idx])
if(i == 1){
tsukue <- tmp
## all <- B.all.pairs.shortest.path
}
else{
z <- c(tsukue,tmp)
## all <- c(all,B.all.pairs.shortest.path)
tsukue <- tapply(z, names(z), sum)
}
}
## par(mfrow=c(1,1))
## plot.igraph(A,layout=layout.fruchterman.reingold,vertex.size=10)
## plot.igraph(B,layout=layout.fruchterman.reingold,vertex.size=10)
## plot(density(B.all.pairs.shortest.path[idx]),\
## main="Shortest path distance between original k-NN pairs",col="red")
## plot(density(as.data.frame(tsukue),
## main="Shortest path distance between original k-NN pairs", col = "green"))
## hist(all,main=sprintf("Distribution of shortest path distance between original k-NN pairs at time scale %d",t),freq=FALSE)
## lines(density(tsukue),col="red")
barplot(tsukue,main=sprintf("Distribution of shortest path distance between original k-NN pairs at time scale %d",t))
## return(B)
}
## num.clust <- 20
## X <- 10*rnorm(50)
## Y <- 10*rnorm(50)
## data <- cbind(X,Y)
## labels <- seq(1,by=1,length.out = 50)
## W <- gaussian.similarity(data, epsilon = 4.0)
## length(find(W > 0.05))
## diffusion <- diffusion.distance(W, t = 5, k = 2, sparsity = FALSE, cutoff = 0)
## d.diffusion = diffusion$dist
## map <- cmdscale(d.diffusion, k = 2)
## ## tree.diffusion = hclust(as.dist(d.diffusion))
## ## cluster.diffusion = cutree(tree.diffusion, k = num.clust)
## ## data.table <- as.table(cbind(data, cluster.diffusion))
## clusplot(data, cluster.diffusion, color = TRUE, shade = FALSE, labels = 0, lines = 0)
## par(mfrow = c(1,2))
## plot(X,Y,pch=19,col=labels)
## #plot(X,Y, col = data.table[,3],pch=19, xlab = "X", ylab = "Y")
## plot(map[,1],map[,2],pch=19,col=labels)
## #text(map[,1],map[,2] + 0.05,labels)
## data <- clusters2.data(n = 40)
## df <- data.frame(x=data[,1],y=data[,2])
## W <- gaussian.similarity(data, epsilon = 0.57)
## diffusion.struct <- diffusion.distance(W, t = 5)
## df1 <- data.frame(x1=diffusion.struct$map[,2],y1=diffusion.struct$map[,3])
## W <- gaussian.similarity(data, epsilon = 0.58)
## diffusion.struct <- diffusion.distance(W, t = 5)
## df2 <- data.frame(x2=diffusion.struct$map[,2],y2=diffusion.struct$map[,3])
## W <- gaussian.similarity(data, epsilon = 0.60)
## diffusion.struct <- diffusion.distance(W, t = 5)
## df3 <- data.frame(x3=diffusion.struct$map[,2],y3=diffusion.struct$map[,3])
## W <- gaussian.similarity(data, epsilon = 0.62)
## diffusion.struct <- diffusion.distance(W, t = 5)
## df4 <- data.frame(x4=diffusion.struct$map[,2],y4=diffusion.struct$map[,3])
## W <- gaussian.similarity(data, epsilon = 0.64)
## diffusion.struct <- diffusion.distance(W, t = 5)
## df5 <- data.frame(x5=diffusion.struct$map[,2],y5=diffusion.struct$map[,3])
## dframe <- cbind(df,df1,df2,df3,df4,df5)
## g <- ggobi(dframe)
|
16a053fe8a76f7949e86211b74921338c94ed723
|
ed18176c49b90c242144b20e37e7e7be1a92dbe2
|
/man/BAC_binom.Rd
|
9fd61ab4ed743405ec6eaf381fa88bc2affd9a94
|
[] |
no_license
|
cran/BACCT
|
e01e5690798582ab803243e237a140d2f86cba35
|
c91515614860e47f5644ed213a07b7fd03c02485
|
refs/heads/master
| 2021-01-09T20:35:24.674069
| 2016-06-25T19:07:22
| 2016-06-25T19:07:22
| 61,953,450
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,131
|
rd
|
BAC_binom.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BAC_binom.R
\name{BAC_binom}
\alias{BAC_binom}
\title{Bayesian Augmented Control for Binary Responses}
\usage{
BAC_binom(yh, nh, n1, n2, y1.range = 0:n1, y2.range = 0:n2, n.chain = 5,
tau.alpha = 0.001, tau.beta = 0.001, prior.type = "nonmixture",
criterion.type = c("diff", "prob"), prob.threshold, sim.mode = c("full",
"express"))
}
\arguments{
\item{yh, nh}{Vector of the numbers of events (subjects) in the historical
trial(s). Must be of equal length.}
\item{n1, n2}{Number of subjects in the control or treatment arm of the current
trial.}
\item{y1.range, y2.range}{Number of events in control or treatment arm of the
current trial. See "Details".}
\item{n.chain}{Controls the number of posterior samples. Each chain contains
20,000 samples.}
\item{tau.alpha, tau.beta}{Hyperparameters of the inverse gamma distribution
controling the extent of borrowing.}
\item{prior.type}{Type of prior on control groups. Currenly, only the
inverse-gamma prior is implemented.}
\item{criterion.type}{Type of posterior quantities to be monitored. See
"Details."}
\item{prob.threshold}{For \code{criterion.type="prob"} only. See "Details".}
\item{sim.mode}{Simulation duration reduces greatly in \code{"express"}
mode, if treatment and control arms are independent. See "Details".}
}
\value{
An object of class "BAC".
}
\description{
Calling JAGS to implement BAC for binary responses
}
\details{
There are two types of posterior quantities for
\code{criterion.type} argument. With \code{"diff"} option, the quantity
computed is \eqn{p_{T} - p_{C}}; with \code{"prob,"} such quantity is
\eqn{pr(p_{T} - p_{C}>\Delta)}, where \eqn{\Delta} is specified by
\code{prob.threshold} argument.
By default, \code{y1.range} and \code{y2.range} cover all possible outcomes
and should be left unspecified in most cases. However, when \code{n1}
and/or \code{n2} is fairly large, it is acceptable to use a reduced range
that covers the outcomes that are most likely (e.g., within 95\% CI) to be
observed. This may help shorten the time to run MCMC.
Another way that can greatly shorten the MCMC running time is to specify
\code{"express"} mode in \code{sim.mode} argument. Express mode reduces the
number of simulations from \code{length(y1.range)*length(y2.range)} to
\code{length(y1.range)+length(y2.range)}. Express mode is proper when the
treatment arm rate is independent of control arm rate.
}
\examples{
\dontrun{
library(BACCT)
#borrow from 3 historical trials#
yh = c(11,300,52);nh = c(45,877,128)
#specify current trial sample sizes#
n1 = 20;n2 = 30
#Difference criterion type in full simulation mode#
obj1 = BAC_binom(yh=yh,nh=nh,n1=n1,n2=n2,n.chain=5,
criterion.type="diff",sim.mode="full")
#Probability criterion type in express simulation mode#
obj2 = BAC_binom(yh=yh,nh=nh,n1=n1,n2=n2,n.chain=5,
criterion.type="prob",prob.threshold=0.1,sim.mode="express")
#S3 method for class "BAC"
summary(obj1)
}
}
\author{
Hongtao Zhang
}
|
54938808f278f6b27978394b2b86a05a97462920
|
40080e787a5b57eb2d9a662018ead95375497d71
|
/plot3.R
|
b24843cd0c7316d35d57a0c7505ba6886c0bbd51
|
[] |
no_license
|
wintonlavier/ExData_Plotting1
|
6c110771590193c72b02ac05d50891ace51599c2
|
740e98bdd933d7ff87de826877c8bc696a152d48
|
refs/heads/master
| 2020-03-31T17:59:37.193340
| 2018-10-12T00:04:30
| 2018-10-12T00:04:30
| 152,441,971
| 0
| 0
| null | 2018-10-10T14:59:06
| 2018-10-10T14:59:05
| null |
UTF-8
|
R
| false
| false
| 854
|
r
|
plot3.R
|
household_power_consumption <- read.csv("~/household_power_consumption.txt", sep=";")
household_power_consumption$Date<-as.Date(strptime(household_power_consumption$Date,"%d/%m/%Y"))
hcp<-household_power_consumption[household_power_consumption$Date=="2007-02-01"|household_power_consumption$Date=="2007-02-02",]
hcp$DateTime<-as.POSIXct(paste(hcp$Date, as.character(hcp$Time),"%H:%M:%S"), format="%Y-%m-%d %H:%M:%S")
png(file="plot3.png")
plot(hcp$DateTime,as.character(hcp$Sub_metering_1),type="n",xlab="",ylab="Energy sub metering")
lines(hcp$DateTime,as.character(hcp$Sub_metering_1))
lines(hcp$DateTime,as.character(hcp$Sub_metering_2),col="red")
lines(hcp$DateTime,as.character(hcp$Sub_metering_3),col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c("black","red","blue"))
dev.off()
|
737a81137195dce12877259907323234e39480d4
|
8533da991709da4e39cdf6e809e3c3ee00386417
|
/R/Modularity.R
|
7df25715fc7d6b32021d0d61c4f83f8ed2e17cb5
|
[] |
no_license
|
mmantho/bctR
|
7b4043de3bef3cc1105739987f32da1256b50357
|
b3c1c1bdb76b1a6d94020497b478a292241264a0
|
refs/heads/master
| 2021-06-01T04:08:56.729690
| 2016-01-17T20:39:10
| 2016-01-17T20:39:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,817
|
r
|
Modularity.R
|
# code for Modularity functions
#' Louvain Algorithm on Undirected Signed Graphs
#'
#' The optimal community structure is a subdivision of the
#' network into non-overlapping groups of nodes in a way that
#' maximizes the number of within-group edges, and minimizes the
#' number of between-group edges. The modularity is a statistic
#' that quantifies the degree to which the network may be
#' subdivided into clearly delineated groups.
#'
#' The Louvain algorithm is a fast and accurate community
#' detection algorithm.
#'
#' Use this function as opposed to the modularity.louvain.und()
#' only if hte network contains a mix of positive and negative
#' weights. If the network contains all positive weights, the
#' output of the two functions will be equivalent.
#'
#' Note: Function is not validated/running yet.
#'
#' @param W : a Matrix - undirected weighted/binary connection
#' matrix with positive and negative weights
#' @param qtype : a string - can be 'sta' (default), 'pos', 'smp',
#' 'gja', 'neg'. See Rubinov and Sporns (2011) for a description.
#' @param gamma : a float - resolution parameter, default value = 1.
#' Values 0 <= gamma < 1 detect larger modules while gamme > 1 detects
#' smaller modules
#' @param seed : an integer - the random seed
#'
#' @return ciQ : a list - two elements where element one is 'ci',
#' a vector (refined community affiliation network), and element
#' two is 'Q', a float (optimized modularity metric)
#'
UNC.modularity.louvain.und.sign <- function(W,
gamma=1,
qtype='sta',
seed=NA){
if (!is.na(seed)) set.seed(seed)
n <- nrow(W)
W0 <- W * (W>0)
W1 <- -W * (W<0)
s0 <- sum(W0)
s1 <- sum(W1)
d <- switch(qtype,
'smp'=list(1/s0,1/s1),
'gja'=list(1/(s0+s1),1/(s0+s1)),
'sta'=list(1/s0,1/(s0+s1)),
'pos'=list(1/s0,0),
'neg'=list(0,1/s1)
)
d0 <- d[[1]]
d1 <- d[[2]]
h <- 2 # hierarchy index
nh <- n # number of nodes in hierarcy
ci <- list(1:n) # hierarchical module assignments
#q <- c(-1,0) # hierarchical modularity values
q <- numeric(100)
q[1:2] <- c(-1,0)
while ( (q[h] - q[h-1]) > 1e-10 ){
stopifnot(h < 300) # Modularity Infinite Loop Style A ??
kn0 <- colSums(W0)
kn1 <- colSums(W1)
# copying is expensive...
km0 <- kn0
km1 <- kn1
knm0 <- W0
knm1 <- W1
m <- 1:n # initial module assignments
flag <- T
it <- 0
while (flag){
it <- it + 1
stopifnot(it < 1000) # Infinite Loop Detected and Stopped
flag <- F
# loop over nodes in random order
for (u in sample(nh)){
ma <- m[u]
dQ0 <- ((knm0[u:length(knm0)] + W0[u,u] - knm0[u,ma]) -
gamma * kn0[u] * (km0 + kn0[u] - km0[ma]) / s0) # positive dQ
dQ1 <- ((knm1[u,length(knm1)] + W1[u,u] - knm1[u,ma]) -
gamma * kn1[u] * (km1 + kn1[u] - km1[ma]) / s1) # negative dQ
dQ <- d0 * dQ0 - d1 * dQ1 # rescaled changes in modularity
dQ[ma] <- 0 # no changes for same module
max.dQ <- max(dQ) # maximal increase in modularity
if ( max.dQ > 1e-10 ){
flag <- T
mb <- which.max(dQ)
# change postive node-to-module degrees
knm0[1:mb] <- knm0[1:mb] + W0[1:u]
knm0[1:ma] <- knm0[1:ma] - W0[1:u]
# change negative node-to-module degrees
knm1[1:mb] <- knm1[1:mb] + W1[1:u]
knm1[1:ma] <- knm1[1:ma] - W1[1:u]
km0[mb] <- km0[mb] + kn0[u] # change positive module degrees
km0[ma] <- km0[ma] - kn0[u]
km1[mb] <- km1[mb] + kn1[u] # change negative module degrees
km1[ma] <- km1[ma] - kn1[u]
m[u] <- mb # reassign module
}
}
}
m <- as.factor(m)
m <- vapply(m,function(y) which(levels(m)==y),numeric(1)) # new module assignments
h <- h + 1
ci[[h]] <- m[ci[[h-1]]][1:n]
nh <- max(m) # number of new nodes
wn0 <- matrix(nrow=nh, ncol=nh) # new positive weights matrix
wn1 <- wn0 # copy
# this is bad R code
for (u in 1:nh){
for (v in u:nh){
wn0[u,v] <- sum(W0[m==u,m==v])
wn1[u,v] <- sum(W1[m==u,m==v])
wn0[v,u] <- wn0[u,v]
wn1[v,u] <- wn1[u,v]
}
}
W0 <- wn0
W1 <- wn1
#q <- c(q,0) # this is slow
q[h] <- 0
# compute modularity
q0 <- sum(diag(W0)) - sum(W0 %*% W0) / s0
q1 <- sum(diag(W1)) - sum(W1 %*% W1) / s1
q[h] <- d0 * q0 - d1 * q1
ci.ret <- vapply(ci[[length(ci)]],function(y) which(levels(as.factor(m))==y),numeric(1))
}
return(list(ci.ret=ci.ret,q[length(q)]))
}
modularity.louvain.und.sign <- compiler::cmpfun(UNC.modularity.louvain.und.sign)
#' Louvain Modularity Algorithm on Undirected Graph
#'
#' The optimal community structure is a subdivision of the network into
#' nonoverlapping groups of nodes in a way that maximizes the number of
#' within-group edges, and minimizes the number of between-group edges.
#' The modularity is a statistic that quantifies the degree to which the
#' network may be subdivided into such clearly delineated groups.
#'
#' The Louvain algorithm is a fast and accurate community detection
#' algorithm (as of writing). The algorithm may also be used to detect
#' hierarchical community structure.
#'
#' R Microbenchmark - Fast enough..
#' Unit: milliseconds
#' expr min lq mean median uq max neval
#' fun 8.890078 11.65477 12.90705 12.62741 13.85725 19.57911 100
#'
#' WITH compile::cmpfun() - Fast!
#' Unit: milliseconds
#' expr min lq mean median uq max neval
#' fun 6.015344 7.543102 9.385713 9.529057 10.69335 13.49019 100
#'
#' Note: Function is not validated yet.
#'
#' @param W : a Matrix - undirected weighted/binary connection matrix
#' @param gamma : a float - resolution parameter. default value=1.
#' Values 0 <= gamma < 1 detect larger modules while gamma > 1
#' detects smaller modules.
#' @param hierarchy : a boolean - enables hier. output
#' @param seed : an integer - random seed
#'
#' @return ciQ : a list - two elements where element one is 'ci',
#' a vector (refined community affiliation network), and element
#' two is 'Q', a float (optimized modularity metric).If hierarchical
#' output enabled, becomes an Hx1 array of floats instead.
#'
UNC.modularity.louvain.und <- function(W,
gamma=1,
hierarchy=FALSE,
seed=NA){
if (!is.na(seed)) set.seed(seed)
n <- nrow(W) # number of nodes
s <- sum(W) # total edge weight
h <- 1 # hierarchy index
ci <- list(1:n) # hierarchical module assignments
q <- numeric(100)
q[1] <- -1
#q <- c(-1) # hierarhcical modularity values
n0 <- n
while (TRUE){
stopifnot(h < 300) # infinite loop
k <- rowSums(W) # node degree
Km <- k # module degree
Knm <- W # node-to-module degree
m <- 1:n # initial module assignments
flag <- TRUE
it <- 0
# GOOD UP TO HERE
while (flag){
#cat('Iteration: ' , it, '\n')
it <- it + 1
stopifnot(it < 1000) # infinite loop
flag <- FALSE
for (i in sample(n)){
ma <- m[i] # module assignment
# algorithm condition
dQ <- ((Knm[i,] - Knm[i,ma] + W[i,i]) -
gamma * k[i] * (Km - Km[ma] + k[i]) / s)
dQ[ma] <- 0 # change to itself is 0
max.dQ <- max(dQ) # find maximum modularity increase
if (max.dQ > 1e-10){
j <- which.max(dQ)
Knm[,j] <- Knm[,j] + W[,i] # change node-to-module degrees
Knm[,ma] <- Knm[,ma] - W[,i]
Km[j] <- Km[j] + k[i] # change module degrees
Km[ma] <- Km[ma] - k[i]
m[i] <- j # reassign module
flag <- TRUE
}
}
}
m <- as.numeric(as.factor(m))
h <- h + 1
ci[[h]] <- m[ci[[h-1]]][1:n]
n <- max(m) # new number of modules
W1 <- matrix(nrow=n,ncol=n) # new weighted matrix
for (i in 1:n){
for (j in i:n){
# pool weights of nodes in same module
wp <- sum(W[m==i,m==j])
W1[i,j] <- wp
W1[j,i] <- wp
}
}
W <- W1
q[h] <- sum(diag(W)) / s - gamma * sum((W/s) %*% (W/s))
if ( (q[h] - q[h-1]) < 1e-10 ) break
}
if (hierarchy){
ci <- ci[[2:(length(ci)-1)]]
q <- q[2:(length(q)-1)]
ciq <- list(ci=ci,q=q)
}
else{
ciq <- list(ci=ci[[h-1]],q=q[h-1])
}
return(ciq)
}
modularity.louvain.und <- compiler::cmpfun(UNC.modularity.louvain.und)
|
853aacdf38792041d9a78762f02f9d7eb1ced3a3
|
37ebe117da6bd32371de418f9b29440c3765f017
|
/server.R
|
ac0154fa95b29621274447eccc0dcfbc7f30a291
|
[] |
no_license
|
Canuteson/ddp-shiny-app
|
07bdad0a81cdd9c861de355e7138d3c77756ee22
|
762198a884b3c85c2be8cb01d63b1dcbe1b3a14a
|
refs/heads/master
| 2016-09-14T09:44:24.625717
| 2016-05-01T00:44:38
| 2016-05-01T00:44:38
| 57,465,665
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,121
|
r
|
server.R
|
library(UsingR)
data(galton)
lmfit <- lm(child ~ parent, data = galton)
predictChild <- function(parentHeight) {
parent <- parentHeight
new_data <- data.frame(parent)
predict(lmfit, newdata = data.frame(parent))
}
childmu <- mean(galton$child)
shinyServer(
function(input, output) {
pred <- reactive({as.numeric(predictChild(input$height))})
output$inputValue <- renderPrint({input$height})
output$prediction <- renderText({pred()})
output$newHist <- renderPlot({
hist(
galton$child,
xlab = "Children's Heights",
xlim = c(60,75),
col = 'lightblue',
main='Height Distribution'
)
lines(c(input$height, input$height), c(0,200), col = 'blue', lwd=5)
lines(c(childmu, childmu), c(0,200), col = 'green', lwd=5)
lines(c(pred(), pred()), c(0, 200), col='red', lwd=5)
legend("topright", c("Parent height", "Child Mean", "Predicted Height"),
col=c("blue", "green", "red"), lwd=10)
})
}
)
|
80564a314c44b7c175d03d17c723e9e5b61ba076
|
59a25a98d4fa8a50374b9e8bf6259aa37f8fa2c6
|
/man/week_selection.Rd
|
d2d8abdc5bc51c072ce855cf6953e70452f122eb
|
[] |
no_license
|
claysiusd/antaresXpansion
|
1a99a72754c79891f1e2a5d385b63cde2acb0c6a
|
ef19f6bf930c7a6b831a89139baba4b5e8aeeeec
|
refs/heads/master
| 2022-02-02T15:20:46.046795
| 2019-06-13T11:55:08
| 2019-06-13T11:55:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 857
|
rd
|
week_selection.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/week_selection.R
\name{week_selection}
\alias{week_selection}
\title{Smart selection of the weeks to simuate}
\usage{
week_selection(current_it, mc_years, weeks, tmp_folder, exp_options)
}
\arguments{
\item{current_it}{list of current iteration characteristics}
\item{mc_years}{vector of all the Monte Carlo years identifier}
\item{weeks}{vector of all the weeks identifier}
\item{tmp_folder}{temporary folder of the benders decomposition}
\item{exp_options}{list of options related to the expansion planning, as returned
by the function \code{\link{read_options}}}
}
\value{
updated current iteration characteristics
}
\description{
week_selection is a function which select the MC years and
weeks which will be simulated during this iteration
}
|
c0700436cb764bf34acfdfefb2fc6b59f5c1d782
|
1066bcc4ad47e19b2fc7f98a410b81d2efacfaee
|
/cachematrix.R
|
6d9c6a399bff1818c58cd4af467e6fe7adbcaa12
|
[] |
no_license
|
jeannief/ProgrammingAssignment2
|
7f0c83578c17f16ed25024827f7d31f3a69a690d
|
a5d1fc704f26c6eba5841c5d6653ef7fee9b0dfb
|
refs/heads/master
| 2021-01-16T19:16:36.457668
| 2014-07-27T22:42:40
| 2014-07-27T22:42:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,191
|
r
|
cachematrix.R
|
#Coursera Data Science Specialisation. R Programming Assignment 2.
#These two functions together show how to create a matrix objet that can cache its inverse,
#The functions allow the matrix to be displayed, and to get, set and cache the inverse
#create a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
#return the matrix
get <- function() x
#get set the inverse
setinverse <- function(solve) m <<- solve
getinverse <- function() m
#list of available functions
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#check to see if the inverse has already been calculated.
#if it has - return it
#if not calculate the inverse and cache the value
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
#get the matrix
data <- x$get()
#calculate the inverse
m <- solve(data, ...)
#cache the value
x$setinverse(m)
m
}
|
8cdf795a552ce132d2b1f2e4ff39633b739606b0
|
4b79952c84a3f6b65052bccfd1f5e3e4e5b92cb9
|
/Bootstrap_CI/Bootstrap_CI_v1.R
|
d8539607bdf329196a49d3380915f591ccf03cc9
|
[
"MIT"
] |
permissive
|
ecostash/JAMS_1
|
62e12bf2f75c274e369cb3baffe96a53b4638fe3
|
f5d8be67bf3b02c11b707862f943a445a0b04cdd
|
refs/heads/master
| 2021-01-20T00:22:17.643080
| 2017-04-23T09:12:04
| 2017-04-23T09:12:04
| 89,122,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,531
|
r
|
Bootstrap_CI_v1.R
|
# Pre-analysis setup
# Clear all data from previous runs
rm(list=ls())
# Load required packages
library(dplyr)
library(tidyr)
library(lubridate)
library(zoo)
# Set working directory
setwd("C:/Users/Michael/Desktop/R_code/2oProd_code/Spider_analyses_20Mar2017")
# Import Biomass table from "data" folder in working directory.
Biomass = read.csv("data/Spider_v1.csv", header = TRUE)
#==============================================================================================
# Step 1
# Resample_function_biomass
Resample_function_biomass = function(Biomass)
{group_by(Biomass, SITE, DATE, HABITAT) %>%
slice(sample(n(), replace = TRUE))%>%
ungroup()
}
#==============================================================================================
# Step 2
# Define number of resampling events
N_resample = 1000
# Loop 1
for (n in 1:N_resample)
{Resample_biomass = Resample_function_biomass(Biomass)
# Writes CSV file to "check" folder in working directory to make sure the protion of Step 1 worked correctly
# Delete # to run this line of code
# write.csv(Resample_biomass, file = paste("check/Resample_biomass.csv"), row.names = FALSE)
#==============================================================================================
# Step 3
Hab_avg_biomass = select(Resample_biomass, -DATE) %>%
group_by(SITE, HABITAT) %>%
summarise_each(funs(mean))
# Writes CSV file to "check" folder in working directory to make sure the protion of Step 3 worked correctly
# Delete # to run this line of code
# write.csv(Hab_avg_biomass, file = paste("check/Hab_avg_biomass.csv"), row.names = FALSE)
#==============================================================================================
# Step 4
if (n==1)
{Hab_biomass = Hab_avg_biomass}
else
{Hab_biomass = bind_rows(Hab_biomass, Hab_avg_biomass)}
}
# Writes CSV file to "results" folder in working directory
write.csv(Hab_biomass, file = paste("results/Hab_biomass.csv"), row.names = FALSE)
#==============================================================================================
# Step 5
# By SITE and HABITAT
Site_hab_quantile = group_by(Hab_biomass, SITE, HABITAT) %>%
summarise("Mean_M_HABITAT" = mean(M_HABITAT),
"M_HABITAT 95% UP" = quantile(M_HABITAT, probs = 0.975),
"M_HABITAT 95% DW" = quantile(M_HABITAT, probs = 0.025),
"Mean_M_VALLEY" = mean(M_VALLEY),
"M_VALLEY 95% UP" = quantile(M_VALLEY, probs = 0.975),
"M_VALLEY 95% DW" = quantile(M_VALLEY, probs = 0.025))
# By SITE
Site_quantile = group_by(Hab_biomass, SITE, HABITAT) %>%
mutate(ID = sequence(n())) %>%
ungroup() %>%
group_by(SITE, ID) %>%
summarise(Sum_M_VALLEY = sum(M_VALLEY), Sum_M_HABITAT = sum(M_HABITAT)) %>%
summarise("Mean_M_HABITAT" = mean(Sum_M_HABITAT),
"M_HABITAT 95% UP" = quantile(Sum_M_HABITAT, probs = 0.975),
"M_HABITAT 95% DW" = quantile(Sum_M_HABITAT, probs = 0.025),
"Mean_M_VALLEY" = mean(Sum_M_VALLEY),
"M_VALLEY 95% UP" = quantile(Sum_M_VALLEY, probs = 0.975),
"M_VALLEY 95% DW" = quantile(Sum_M_VALLEY, probs = 0.025))
# Writes CSV file to "results" folder in working directory
write.csv(Site_hab_quantile, file = paste("results/Site_hab_quantile.csv"), row.names = FALSE)
write.csv(Site_quantile, file = paste("results/Site_quantile.csv"), row.names = FALSE)
|
88de0961375c7ec3cba16c73a2f026780ba2b8b5
|
2598d11a33b60cd067b92d3bd945df06953a6348
|
/R/app_server.R
|
22a0c2e3e547a97e5bda4b23f2af9955f2844fa5
|
[] |
no_license
|
borishejblum/UBcovidsurv
|
59256a41ece5845bc47119b05ed64c31ef179288
|
4cb4e0ef1151ad846ce482449b1cb49afdda62ba
|
refs/heads/master
| 2022-07-08T12:27:35.671429
| 2021-03-25T13:45:46
| 2021-03-25T13:45:46
| 296,362,737
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 802
|
r
|
app_server.R
|
#' @import shiny
app_server <- function(input, output, session) {
# reactive(if(is.null(input$scalingfactor)){
# scalingfactor <- 1
# })
output$distPlot <- renderPlot({
plot_prior_post(alpha = input$alpha, beta = input$beta,
nsuccess = input$nsuccess, ntrials = input$ntrials,
credibility_mass = input$credibility_mass,
logscale = input$logscale,
incid_ref = input$incid_ref)
})
output$resTable <- renderTable({
table_res(alpha = input$alpha, beta = input$beta,
nsuccess = input$nsuccess, ntrials = input$ntrials,
credibility_mass = input$credibility_mass,
ref_incid = input$incid_ref)
})
}
|
68c86d897b13cd8db12c0f6fe9ab50ff9c7ed2e3
|
fb21a3f5a1957bc272bab3c7b3791d81ac56f238
|
/static/individualproject2021/Chammika/AS2018577_WEERASOORIYA PAGGM_183916_assignsubmission_file_/AS2018577.R
|
3f6aa2854afbcc7b2867228d9a73e85ca84ae07f
|
[] |
no_license
|
statisticsmart/Rprogramming
|
acc99b14173f02952f36ae0a61127a66fc75ea19
|
f0e43e7b550f9b2182db932d639f6088d7b27ac2
|
refs/heads/master
| 2023-06-08T13:08:58.383614
| 2023-06-01T23:13:12
| 2023-06-01T23:13:12
| 232,999,998
| 2
| 1
| null | 2023-06-01T22:26:24
| 2020-01-10T08:21:51
|
HTML
|
UTF-8
|
R
| false
| false
| 13,360
|
r
|
AS2018577.R
|
library(devtools)
library(mice)
library(sta3262)
get_individual_project_country("AS2018577")
library(coronavirus)
data(coronavirus)
head(coronavirus)
tail(coronavirus)
unique(coronavirus$country)
library(tidyverse)
library(magrittr)
coronavirus
#Data Cleanung
coronavirus1=subset(coronavirus,select = -province)
coronavirus2=drop_na(coronavirus1)
nrow(coronavirus)
nrow(coronavirus2)
is.na(coronavirus2)
#Select my country
lebanon_corona <- coronavirus2 %>% filter(country == "Lebanon")
#recover_lebanon_corona <- lebanon_corona %>% filter(type=="confirmed")
#head(recover_lebanon_corona)
#ggplot(recover_lebanon_corona, aes(x=date, y=cases)) + geom_line() + ggtitle("Lebanon: Daily Covid-19 Recoveries")
library(ggplot2)
library(maptools)
library(tibble)
library(tidyverse)
library(ggrepel)
library(png)
library(grid)
library(sp)
data(wrld_simpl)
#Map
p <- ggplot() +
geom_polygon(
data = wrld_simpl,
aes(x = long, y = lat, group = group), fill = "gray", colour = "white"
) +
coord_cartesian(xlim = c(-180, 180), ylim = c(-90, 90)) +
scale_x_continuous(breaks = seq(-180, 180, 120)) +
scale_y_continuous(breaks = seq(-90, 90, 100))
p +
geom_point(
data = lebanon_corona, aes(x = long, y = lat), color = "red", size
= 1
)
#confirmed Lebanon
lebanon_corona_c <- lebanon_corona %>% filter(type == "confirmed")
ggplot(lebanon_corona_c, aes(x=date, y=cases)) + geom_line() + ggtitle("Lebanon: Daily Covid-19 confirmed cases")
sum(lebanon_corona_c[, 'cases'])
summary(lebanon_corona_c)
lebanon_corona_c1=lebanon_corona_c[order(lebanon_corona_c$cases),]
tail(lebanon_corona_c1)
# sum of confirmed=617662, higher value=6154 date= 2021-01-15
lebanon_corona_d <- lebanon_corona %>% filter(type == "death")
ggplot(lebanon_corona_d, aes(x=date, y=cases)) + geom_line() + ggtitle("Lebanon: Daily Covid-19 death")
sum(lebanon_corona_d[, 'cases'])
summary(lebanon_corona_d)
lebanon_corona_d1=lebanon_corona_d[order(lebanon_corona_d$cases),]
tail(lebanon_corona_d1)
# sum of death=8232, higher value=351 date= 2021-01-30
lebanon_corona_r <- lebanon_corona %>% filter(type == "recovered")
lebanon_corona_a <- lebanon_corona %>% filter(type == " active cases")
sum(lebanon_corona_r[, 'cases'])
summary(lebanon_corona_r)
lebanon_corona_r1=lebanon_corona_r[order(lebanon_corona_r$cases),]
tail(lebanon_corona_r1)
# sum of recivered= 537653, higher value=12635 date= 2021-02-14
#crate active casses
newl=merge(x= lebanon_corona_r, y= lebanon_corona_d, by= 'date', all.x= T)
newl$allc=newl$cases.x+newl$cases.y
new_a=merge(x= lebanon_corona_c, y= newl, by= 'date', all.x= T)
new_a=drop_na(new_a)
new_a$active=new_a$cases-new_a$allc
#lebanon_active= lebanon_corona_r$cases+lebanon_corona_d$cases
#q=abs(lebanon_corona_r$cases)
#lebanon_corona_r$cases1=q
#dfla=data.frame(lebanon_active)
#dfla=drop_na(dfla)
#dfla1=lebanon_corona_c$cases-dfla
#dfla1=drop_na(dfla1)
#lebanon_corona$active=dfla1
plot(new_a$active,type="l",xlab ="ID",ylab = "Values",main
="comfrm vs recover")
#Active cases
data.frame(new_a$active, c=cumsum(new_a$active))
c=cumsum(new_a$active)
data.frame(c)
plot(c,type="l",xlab ="Days",ylab = "Cases",main
="Active Cases Lebanon")
#####
ggp <- ggplot(NULL, aes(x=date, y=cases)) + # Draw ggplot2 plot based on two data frames
geom_line(data = lebanon_corona_c, col = "blue") +
geom_line(data = lebanon_corona_r, col = "red")+
ggtitle("Lebanon confirmed V Recovered")
ggp+theme(legend.position = "bottom")
#####
#compare_USa
usa_corona <- coronavirus2 %>% filter(country == "US")
recover_usa_corona <- lebanon_usa %>% filter(type=="confirmed")
head(recover_usa_corona)
ggplot(recover_usa_corona, aes(x=date, y=cases)) + geom_line() + ggtitle("US: Daily Covid-19 Recoveries")
#######################
ggp <- ggplot(NULL, aes(x=date, y=cases)) + # Draw ggplot2 plot based on two data frames
geom_line(data = recover_lebanon_corona, col = "red") +
geom_line(data = recover_usa_corona, col = "blue")+
ggtitle("US v Lebanon")
ggp
####################
usa_corona_c <- usa_corona %>% filter(type == "confirmed")
usa_corona_d <- usa_corona %>% filter(type == "death")
ggplot(usa_corona_r, aes(x=date, y=cases1)) + geom_line() + ggtitle("Lebanon: Daily Covid-19 death")
usa_corona_r <- usa_corona %>% filter(type == "recovered")
#lebanon_corona_a <- lebanon_corona %>% filter(type == " active cases")
#crate active casses usa
q=abs(usa_corona_r$cases)
usa_corona_r$cases1=q
usa_active= usa_corona_r$cases+usa_corona_d$cases
dfla_usa=data.frame(usa_active)
dfla1_usa=usa_corona_c$cases-dfla_usa
usa_corona$active=dfla1_usa
plot(dfla1_usa$usa_active,type="l",xlab ="ID",ylab = "Values",main
="comform vs recover")
boxplot(usa_corona_r$cases1,xlab ="Sepal Length",ylab = "Spread",main ="Sepal
Length Distribution",col="green")
#Active cases
data.frame(dfla1_usa, c_usa=cumsum(dfla1_usa))
c_usa=cumsum(dfla1_usa)
data.frame(c_usa)
plot(c_usa$usa_active,type="l",xlab ="Days",ylab = "Cases",main
="Active Cases USA")
sum(usa_corona_c[, 'cases'])
summary(usa_corona_c)
usa_corona_c1=usa_corona_c[order(usa_corona_c$cases),]
tail(usa_corona_c1)
sum(usa_corona_r[, 'cases'])
summary(usa_corona_r)
usa_corona_r1=usa_corona_r[order(usa_corona_r$cases),]
tail(usa_corona_r1)
sum(usa_corona_d[, 'cases'])
summary(usa_corona_d)
usa_corona_d1=usa_corona_d[order(usa_corona_d$cases),]
tail(usa_corona_d1)
####israel
#compare_USa
Israel_corona <- coronavirus2 %>% filter(country == "Israel")
recover_Israel_corona <- Israel_corona %>% filter(type=="confirmed")
head(recover_Israel_corona)
ggplot(recover_Israel_corona, aes(x=date, y=cases)) + geom_line() + ggtitle("US: Daily Covid-19 Recoveries")
#######################
ggp <- ggplot(NULL, aes(x=date, y=cases)) + # Draw ggplot2 plot based on two data frames
geom_line(data = recover_lebanon_corona, col = "red") +
geom_line(data = recover_usa_corona, col = "blue")+
ggtitle("US v Lebanon")
ggp
####################
Israel_corona_c <- Israel_corona %>% filter(type == "confirmed")
Israel_corona_d <- Israel_corona %>% filter(type == "death")
ggplot(Israel_corona_d, aes(x=date, y=cases)) + geom_line() + ggtitle("Lebanon: Daily Covid-19 death")
Israel_corona_r <- Israel_corona %>% filter(type == "recovered")
#lebanon_corona_a <- lebanon_corona %>% filter(type == " active cases")
#crate active casses usa
Israel_active= Israel_corona_r$cases+Israel_corona_d$cases
dfla_Israel=data.frame(Israel_active)
dfla1_Israel=Israel_corona_c$cases-dfla_Israel
Israel_corona$active=dfla1_Israel
plot(dfla1_Israel$Israel_active,type="l",xlab ="ID",ylab = "Values",main
="comform vs recover")
#Active cases
data.frame(dfla1_Israel, c_Israel=cumsum(dfla1_Israel))
c_Israel=cumsum(dfla1_Israel)
data.frame(c_Israel)
plot(c_Israel$Israel_active,type="l",xlab ="Days",ylab = "Cases",main
="Active Cases Israel")
ggp <- ggplot(NULL, aes(x=date, y=cases)) + # Draw ggplot2 plot based on two data frames
geom_line(data = recover_lebanon_corona, col = "red") +
geom_line(data = recover_Israel_corona, col = "blue")+
text(locator(), labels = c("red line", "black line)"))+
ggtitle("US v Israel")
ggp
sum(Israel_corona_c[, 'cases'])
summary(Israel_corona_c)
Israel_corona_c1=Israel_corona_c[order(Israel_corona_c$cases),]
tail(Israel_corona_c1)
sum(Israel_corona_r[, 'cases'])
summary(Israel_corona_r)
Israel_corona_r1=Israel_corona_r[order(Israel_corona_r$cases),]
tail(Israel_corona_r1)
sum(Israel_corona_d[, 'cases'])
summary(Israel_corona_d)
Israel_corona_d1=Israel_corona_d[order(Israel_corona_d$cases),]
tail(Israel_corona_d1)
#####Libya
#compare_USa
Libya_corona <- coronavirus2 %>% filter(country == "Libya")
recover_Libya_corona <- Libya_corona %>% filter(type=="confirmed")
head(recover_Libya_corona)
ggplot(recover_Libya_corona, aes(x=date, y=cases)) + geom_line() + ggtitle("Libya: Daily Covid-19 Recoveries")
#######################
ggp <- ggplot(NULL, aes(x=date, y=cases)) + # Draw ggplot2 plot based on two data frames
geom_line(data = recover_lebanon_corona, col = "red") +
geom_line(data = recover_usa_corona, col = "blue")+
ggtitle("US v Lebanon")
ggp
####################
Libya_corona_c <- Libya_corona %>% filter(type == "confirmed")
Libya_corona_d <- Libya_corona %>% filter(type == "death")
ggplot(Libya_corona_d, aes(x=date, y=cases)) + geom_line() + ggtitle("Libya: Daily Covid-19 death")
Libya_corona_r <- Libya_corona %>% filter(type == "recovered")
#lebanon_corona_a <- lebanon_corona %>% filter(type == " active cases")
#crate active casses usa
Libya_active= Libya_corona_r$cases+Libya_corona_d$cases
dfla_Libya=data.frame(Libya_active)
dfla1_Libya=Libya_corona_c$cases-dfla_Libya
Libya_corona$active=dfla1_Libya
plot(dfla1_Libya$Libya_active,type="l",xlab ="ID",ylab = "Values",main
="comform vs recover")
#Active cases
data.frame(dfla1_Libya, c_Libya=cumsum(dfla1_Libya))
c_Libya=cumsum(dfla1_Libya)
data.frame(c_Libya)
plot(c_Libya$Libya_active,type="l",xlab ="Days",ylab = "Cases",main
="Active Cases Israel")
ggp <- ggplot(NULL, aes(x=date, y=cases)) + # Draw ggplot2 plot based on two data frames
geom_line(data = recover_lebanon_corona, col = "red") +
geom_line(data = recover_Israel_corona, col = "blue")+
geom_line(data = recover_Libya_corona, col = "green")+
ggtitle("US v Lebanon v Libya")
ggp
sum(Libya_corona_c[, 'cases'])
summary(Libya_corona_c)
Libya_corona_c1=Libya_corona_c[order(Libya_corona_c$cases),]
tail(Libya_corona_c1)
sum(Libya_corona_r[, 'cases'])
summary(Libya_corona_r)
Libya_corona_r1=Libya_corona_r[order(Libya_corona_r$cases),]
tail(Libya_corona_r1)
sum(Libya_corona_d[, 'cases'])
summary(Libya_corona_d)
Libya_corona_d1=Libya_corona_d[order(Libya_corona_d$cases),]
tail(Libya_corona_d1)
######china comparie
#compare_USa
China_corona <- coronavirus2 %>% filter(country == "China")
recover_China_corona <- China_corona %>% filter(type=="confirmed")
head(recover_China_corona)
ggplot(recover_China_corona, aes(x=date, y=cases)) + geom_line() + ggtitle("China: Daily Covid-19 Recoveries")
#######################
ggp <- ggplot(NULL, aes(x=date, y=cases)) + # Draw ggplot2 plot based on two data frames
geom_line(data = recover_lebanon_corona, col = "red") +
geom_line(data = recover_China_corona, col = "blue")+
ggtitle("US v Lebanon")
ggp
####################
China_corona_c <- China_corona %>% filter(type == "confirmed")
China_corona_d <- China_corona %>% filter(type == "death")
ggplot(China_corona_d, aes(x=date, y=cases)) + geom_line() + ggtitle("China: Daily Covid-19 death")
China_corona_r <- China_corona %>% filter(type == "recovered")
#lebanon_corona_a <- lebanon_corona %>% filter(type == " active cases")
#crate active casses usa
China_active= China_corona_r$cases+China_corona_d$cases
dfla_China=data.frame(China_active)
dfla1_China=China_corona_c$cases-dfla_China
China_corona$active=dfla1_China
plot(dfla1_China$China_active,type="l",xlab ="ID",ylab = "Values",main
="comform vs recover")
#Active cases
data.frame(dfla1_China, c_China=cumsum(dfla1_China))
c_China=cumsum(dfla1_China)
data.frame(c_China)
plot(c_China$China_active,type="l",xlab ="Days",ylab = "Cases",main
="Active Cases China")
sum(China_corona_c[, 'cases'])
summary(China_corona_c)
China_corona_c1=China_corona_c[order(China_corona_c$cases),]
tail(China_corona_c1)
# total confirmed=107909 ,higst 14840 0n 2020-04-13
sum(China_corona_r[, 'cases'])
summary(China_corona_r)
China_corona_r1=China_corona_r[order(China_corona_r$cases),]
tail(China_corona_r1)
# total recovered = 99228 ,higst 3418 on 2020-02-22
sum(China_corona_d[, 'cases'])
summary(China_corona_d)
China_corona_d1=China_corona_d[order(China_corona_r$cases),]
tail(China_corona_d1)
ggp <- ggplot(NULL, aes(x=date, y=cases)) + # Draw ggplot2 plot based on two data frames
geom_line(data = recover_lebanon_corona, col = "red") +
geom_line(data = recover_Israel_corona, col = "blue")+
geom_line(data = recover_Libya_corona, col = "green")+
geom_line(data = recover_China_corona, col = "gold")+
ggtitle("US v Lebanon v Libya v China")
ggp
US_corona_r
ggplot(China_corona_r, aes(x=date, y=cases)) + geom_line() + ggtitle("China: Daily Covid-19 death")
q=abs(China_corona_r$cases)
China_corona_r$cases1=q
ggplot(China_corona_r, aes(x=date, y=cases1)) + geom_line() + ggtitle("China: Daily Covid-19 death")
new_df1 <- data.frame(size=rep(lebanon_corona_c$date, lebanon_corona_c$cases), sample="lebanon")
new_df2 <- data.frame(size=rep(Libya_corona_c$date, Libya_corona_c$cases), sample="Libya")
new_df3 <- data.frame(size=rep(usa_corona_c$date, usa_corona_c$cases), sample="USA")
new_df4 <- data.frame(size=rep(Israel_corona_c$date, Israel_corona_c$cases), sample="Israel")
new_df5 <- data.frame(size=rep(China_corona_c$date, China_corona_c$cases), sample="China")
all_sample <- rbind(new_df1, new_df2,new_df3,new_df4,new_df5)
ggplot(data=all_sample, aes(x=size)) + geom_density(aes(colour=sample))
|
90ac745e7c7866c0e2b7480bf2beb490aeb3401b
|
54d2268815ee1338582cc6941dae2065ea16c5cf
|
/r/biotool.R
|
e470c9fd648f9b883d878ce65cc950919db8f10c
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Slugger70/biotool
|
b6261466273d70a48b1c03df5217d0190705b720
|
be259bd89a15d897cf03e29226dde45ba7419f4f
|
refs/heads/master
| 2021-01-12T11:54:19.853665
| 2016-09-25T13:02:00
| 2016-09-25T13:02:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,758
|
r
|
biotool.R
|
#!/usr/bin/env Rscript
VERSION <- "1.0"
DEFAULT_MIN_LEN <- 0
DEFAULT_VERBOSE <- FALSE
suppressPackageStartupMessages({
library(argparse, quietly = TRUE)
library(seqinr, quietly = TRUE)
})
parser <- ArgumentParser(description = "Print FASTA stats") # nolint
parser$add_argument("fasta_files",
metavar = "FASTA_FILE",
type = "character",
nargs = "+",
help = "Input FASTA files. Use - to read from stdin")
parser$add_argument("--minlen",
metavar = "N",
type = "integer",
dest = "min_len",
default = DEFAULT_MIN_LEN,
help = paste0("Minimum length sequence to include in stats",
" [default: %(default)s]"))
parser$add_argument("--verbose",
dest = "verbose",
action = "store_true",
default = DEFAULT_VERBOSE,
help = "Print more stuff about what's happening")
parser$add_argument("--version",
dest = "print_version",
action = "store_true",
help = "Print version and exit")
# Print version
if ("--version" %in% commandArgs()) {
cat(basename(commandArgs()[4]), VERSION, "\n")
quit(save = "no")
}
# Process command line arguments
args <- parser$parse_args()
# Read from stdin if file is '-'
args$fasta_files[args$fasta_files == "-"] <- "stdin"
# Get statistics of a FASTA file
get_fasta_stats <- function(filename, min_len) {
# Calculate statistics of a FASTA file.
#
# Args:
# filename: The name of the input FASTA file.
# min_len: The minimum length of the sequence to include when calculating
# statistics.
# Returns:
# A list containing FASTA stats.
min_seq <- Inf
max_seq <- 0
num_seq <- 0
num_bases <- 0
sequences <- tryCatch(
read.fasta(file = filename, seqtype = "AA", seqonly = TRUE),
error = function(e) {
if (args$verbose) warning(filename, " has no sequences.", call. = FALSE)
return(NULL)
}
)
for (seq in sequences) {
this_len <- nchar(seq[1])
if (this_len >= min_len) {
num_seq <- num_seq + 1
min_seq <- min(min_seq, this_len)
max_seq <- max(max_seq, this_len)
num_bases <- num_bases + this_len
}
}
min_seq <- ifelse(num_seq == 0, 0, min_seq)
return(list(filename = filename, numseq = num_seq, total = num_bases,
min = min_seq, avg = round(num_bases / num_seq), max = max_seq))
}
pretty_output <- function(stats) {
# Use a dash (-) in place of min, avg, and max if numseq is zero.
#
# Args:
# stats: The list containing FASTA stats.
# Returns:
# A list containing FASTA stats suitable for output.
if (stats[["numseq"]] == 0) {
stats[["min"]] <- "-"
stats[["avg"]] <- "-"
stats[["max"]] <- "-"
}
return(stats)
}
# Check if all FASTA files exist
exists <- sapply(args$fasta_files, file.exists)
exists[args$fasta_files == "stdin"] <- TRUE
if (any(! exists)) {
stop("Files do not exist:\n\t",
paste(names(exists)[! exists], collapse = "\n\t"))
}
# Check if all FASTA files have read permission
can_read <- file.access(args$fasta_files, mode = 4)
if (any(can_read == -1)) {
stop("Files cannot be read:\n\t",
paste(names(can_read)[can_read == -1], collapse = "\n\t"))
}
# Process each FASTA file
results <- lapply(args$fasta_files, FUN = function(x) {
pretty_output(get_fasta_stats(x, args$min_len))
})
# Convert into table
results <- do.call(rbind, results)
colnames(results) <- toupper(colnames(results))
# Write to stdout
write.table(results, stdout(), sep = "\t", row.names = FALSE, quote = FALSE)
|
3bde585ae927521df1e02c01cfed7f92bcd0bda9
|
5e34346a0c3eb00bee01b27d31dd86b8640786ec
|
/code/lib/HMM/man/viterbiTraining.Rd
|
69c1c786014b738bfb87a809d6321d39394f3580
|
[] |
no_license
|
andermic/change-point-detection
|
b9d795f02d842658cfb54ca7ad9864044a4c3638
|
247e7144ec928327d74fc2981ab835042178e1c7
|
refs/heads/master
| 2016-09-06T17:47:56.938373
| 2013-07-11T05:41:51
| 2013-07-11T05:41:51
| 3,736,521
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,818
|
rd
|
viterbiTraining.Rd
|
\encoding{latin1}
\name{viterbiTraining}
\alias{viterbiTraining}
\title{Inferring the parameters of a Hidden Markov Model via Viterbi-training}
\description{
For an initial Hidden Markov Model (HMM) and a given sequence of observations, the
Viterbi-training algorithm infers optimal parameters to the HMM. Viterbi-training
usually converges much faster than the Baum-Welch algorithm, but the underlying
algorithm is theoretically less justified. Be careful: The algorithm converges to
a local solution which might not be the optimum.
}
\usage{
viterbiTraining(hmm, observation, maxIterations=100, delta=1E-9, pseudoCount=0)
}
\arguments{
\item{hmm }{ A Hidden Markov Model.}
\item{observation }{ A sequence of observations.}
\item{maxIterations }{ The maximum number of iterations in the Viterbi-training algorithm.}
\item{delta }{ Additional termination condition, if the transition
and emission matrices converge, before reaching the maximum
number of iterations (\code{maxIterations}). The difference
of transition and emission parameters in consecutive iterations
must be smaller than \code{delta} to terminate the algorithm.}
\item{pseudoCount }{ Adding this amount of pseudo counts in the estimation-step
of the Viterbi-training algorithm.}
}
\format{
Dimension and Format of the Arguments.
\describe{
\item{hmm }{A valid Hidden Markov Model, for example instantiated by \code{\link{initHMM}}.}
\item{observation }{A vector of observations.}
}
}
\value{
Return Values:
\item{hmm }{The inferred HMM. The representation is equivalent to the
representation in \code{\link{initHMM}}.}
\item{difference }{Vector of differences calculated from consecutive transition and emission
matrices in each iteration of the Viterbi-training.
The difference is the sum of the distances between consecutive
transition and emission matrices in the L2-Norm.}
}
\references{
For details see: Lawrence R. Rabiner: A Tutorial on Hidden Markov Models and Selected Applications
in Speech Recognition. Proceedings of the IEEE 77(2) p.257-286, 1989.
}
\examples{
# Initial HMM
hmm = initHMM(c("A","B"),c("L","R"),
transProbs=matrix(c(.9,.1,.1,.9),2),
emissionProbs=matrix(c(.5,.51,.5,.49),2))
print(hmm)
# Sequence of observation
a = sample(c(rep("L",100),rep("R",300)))
b = sample(c(rep("L",300),rep("R",100)))
observation = c(a,b)
# Viterbi-training
vt = viterbiTraining(hmm,observation,10)
print(vt$hmm)
}
\seealso{
See \code{\link{baumWelch}}.
}
\keyword{methods}
\author{Lin Himmelmann <hmm@linhi.com>, Scientific Software Development
}
|
e5b552fe3d396f66c17891130a3f7bafbcad8dd2
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/rocTree/man/rocTree-package.Rd
|
9280d93a0584b1588f5f71d123c8ef34267b8348
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,847
|
rd
|
rocTree-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rocTree_pkg.R
\docType{package}
\name{rocTree-package}
\alias{rocTree-package}
\alias{_PACKAGE}
\title{rocTree:Receiver Operating Characteristic (ROC)-Guided Classification Survival Tree and Ensemble.}
\description{
The \code{rocTree} package uses a Receiver Operating Characteristic (ROC) guided classification
algorithm to grow prune survival trees and ensemble.
}
\section{Introduction}{
The \code{rocTree} package provides implementations to a unified framework for
tree-structured analysis with censored survival outcomes.
Different from many existing tree building algorithms,
the \code{rocTree} package incorporates time-dependent covariates by constructing
a time-invariant partition scheme on the survivor population.
The partition-based risk prediction function is constructed using an algorithm guided by
the Receiver Operating Characteristic (ROC) curve.
The generalized time-dependent ROC curves for survival trees show that the
target hazard function yields the highest ROC curve.
The optimality of the target hazard function motivates us to use a weighted average of the
time-dependent area under the curve on a set of time points to evaluate the prediction
performance of survival trees and to guide splitting and pruning.
Moreover, the \code{rocTree} package also offers a novel ensemble algorithm,
where the ensemble is on unbiased martingale estimating equations.
}
\section{Methods}{
The package contains functions to construct ROC-guided survival trees and ensemble through
the main function \code{\link{rocTree}}.
}
\seealso{
\code{\link{rocTree}}
}
\author{
\strong{Maintainer}: Sy Han Chiou \email{schiou@utdallas.edu}
Authors:
\itemize{
\item Yifei Sun \email{ys3072@cumc.columbia.edu}
\item Mei-Cheng Wang \email{mcwang@jhu.edu}
}
}
|
abcc1430451a8f58d800f3f1d6b0316c1dbd093d
|
3d3502b01a3dbf15f0799d873c7b414bb35802fb
|
/man/subgroup_tree.Rd
|
da033f3e41ed2d96e4b074ee2a7f515fab5ed6fa
|
[] |
no_license
|
molson2/subgroupTree
|
f8b7c9477859e2b51a0e3d9d66143dd337af263f
|
4263665d8c00ca98485c5422be5cd41ce903d276
|
refs/heads/master
| 2020-08-08T15:58:02.907607
| 2019-12-03T08:29:59
| 2019-12-03T08:29:59
| 213,864,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,116
|
rd
|
subgroup_tree.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subgroup_tree.R
\name{subgroup_tree}
\alias{subgroup_tree}
\title{subgroup detection tree}
\usage{
subgroup_tree(response, treated, X, direction = c("max", "min"), ...)
}
\arguments{
\item{response}{numeric outcome of interest}
\item{treated}{boolean vector of treatment assignments}
\item{X}{data.frame of predictors; must be either numeric or factor types}
\item{direction}{"max" to search for largest treatment effect, "min" to
search for smallest treatment effect}
\item{...}{additional arguments to rpart.control, such as maxdepth, etc.}
}
\description{
Fits a tree designed to aggressively seek out subsets of the data with
large (or small) average treatment effects
}
\examples{
\dontrun{
set.seed(123)
n = 500
p = 5
treated = sample(c(TRUE, FALSE), n, replace = TRUE)
X = as.data.frame(matrix(rnorm(n * p), n))
high_ate = X[, 1] > 0.5
response = rbinom(n, 1, ifelse(high_ate & treated, 0.9, 0.5))
max_tree = subgroup_tree(response, treated, X, 'max', maxdepth = 2,
minbucket = 50)
print(max_tree)
}
}
|
29ec37987afbf9a9a208f8c5c0f82a218a5d7f4e
|
7d7323289d118e11f2ca23fbe977f4555739dc28
|
/man/conTemporal.Rd
|
46bd340eae8e4e20c671d8dcd9b6e3085a96cfb9
|
[] |
no_license
|
jedalong/wildlifeDI
|
7333e67e20d1e99664fcec3c67eb0f12a6a86ae8
|
55b726212431e001a77e8369b510dacedc662e49
|
refs/heads/master
| 2022-12-22T05:09:37.676341
| 2022-12-19T14:34:47
| 2022-12-19T14:34:47
| 110,672,734
| 13
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,119
|
rd
|
conTemporal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conTemporal.R
\name{conTemporal}
\alias{conTemporal}
\title{conTemporal}
\usage{
conTemporal(traj, units = "auto")
}
\arguments{
\item{traj}{an object of the class \code{ltraj} which is output from the function \code{conPhase}.}
\item{units}{units of duration e.g., \code{'mins'} (see \code{difftime}).}
}
\value{
A data frame, with the time and duration attributes associated with contact phases.
}
\description{
Create a summary dataframe of the timing and and duration of contact phases.
}
\details{
This function is used to calculate the start and end times of contact phases, and their duration following use of the \code{conPhase} function.
}
\examples{
\dontrun{
data(does)
doecons <- conProcess(does,tc=15*60,dc=50)
doephas <- conPhase(doecons,pc=60*60)
conTemporal(doephas)
}
}
\references{
Long, JA, Webb, SL, Harju, SM, Gee, KL (2022) Analyzing Contacts and Behavior from High Frequency
Tracking Data Using the wildlifeDI R Package. \emph{Geographical Analysis}. \bold{54}, 648--663.
}
\seealso{
conPhase
}
\keyword{contacts}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.