content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#service_data_new_data_eda.csv
dataset <- read.csv(file.choose())
View(dataset)
str(dataset)
# ๋ฒ์ฃผํ vs ๋ฒ์ฃผํ์ ๊ฐ์ง๊ณ ๋ฐ์ดํฐ์ ๋ถํฌ๋ฅผ ํ์ธํ๋ค๋ฉด?
# 1. resident2, gender2๋ฅผ ๋ฒ์ฃผํ์ผ๋ก ๋ณํ
dataset$resident2 <- factor(dataset$resident2)
dataset$gender2 <- factor(dataset$gender2)
levels(dataset$resident2)
levels(dataset$gender2)
# 2. ๋ ๋ณ์๋ฅผ table()ํจ์๋ฅผ ์ด์ฉํ์ฌ ๋ถํฌ๋ฅผ ํ์ธํด๋ณด์
resident_gender <- table(dataset$resident2, dataset$gender2)
#๋ง๋ ๊ฐ๋ก๋ก horiz์ด์ฉ
barplot(resident_gender,horiz = T)
#ํ ๋ง๋๋ก ๋์ ๋์ด์๋๊ฑฐ๋ฅผ ๋ฉํฐ๋ฐ๋ก ๋์ด
barplot(resident_gender,horiz = T,beside = T)
#๋ฒ๋ก ์ถ๊ฐ
barplot(resident_gender,horiz = T,beside = T, legend = row.names(resident_gender))
#์ ์ง์
barplot(resident_gender,horiz = T,beside = T, legend = row.names(resident_gender), col = rainbow(5))
###์ ๊ทธ๋ํ๋ ๋น์ทํ๊ฒ ggplot์ด์ฉ
resident_gender_df <- data.frame(resident_gender)
###๊ทธ๋ฃน์ ๋ฐ์คํ๋กฏํ ๋ ํน์ ๊ทธ๋ฃน์ผ๋ก ๋ฌถ์ด์ง๋๋ฐ ์ฌ๊ธฐ์๋ fill์์ฒด์์ ๊ทธ๋ฃนํ๋์ด์ง
ggplot(data = resident_gender_df, aes(x=Freq, y = Var2, fill = Var1, group = Var1)) +
geom_bar(stat = "identity", position = 'dodge')
#๋ชจ์์ดํฌํ๋กฏ
mosaicplot(resident_gender, col = rainbow(2))
# ์ง์
์ ํ(job2) vs ๋์ด(age2)
dataset$job2 <- factor(dataset$job2)
dataset$age2 <- factor(dataset$age2)
jobage <- table(dataset$job2, dataset$age2)
barplot(jobage, beside = T,legend = row.names(jobage), col = rainbow(3))
jobage_df <- data.frame(jobage)
ggplot(data = jobage_df, aes(x=Freq, y=Var2, fill = Var1, group = Var1)) +
geom_bar(stat = "identity", position = "dodge") +
coord_flip()
# ์ซ์ํ vs ๋ฒ์ฃผํ
# ์ง์
์ ํ์ ๋ฐ๋ฅธ ๋์ด ๋น์จ
# ์นดํ
๊ณ ๋ฆฌ ์ ํ๋ณ ์๊ฐํ
install.packages("lattice")
library(lattice)
?densityplot
#density๋ x์ถ๊ฐ ๋จผ์ ์ ์ํ๊ณ ๋ฐ์ดํฐ์ ์, ๊ทผ๋ฐ ํฉํฐํ์ด์ด์ผํจ
densityplot(dataset$age2, dataset)
str(dataset)
#auto.key = T ๋ก ๋ฒ๋ก ์ค์
densityplot(dataset$age, dataset, group = dataset$job2, auto.key = T)
ggplot(data = dataset, aes(x= age, fill = job2)) +
geom_bar(width = .5, position = "dodge")
##์ค์ต3!!!
# ๋ฐ์ดํฐ ํ๋ ์์ ๋ณต์ฌ๋ณธ ์์ฑํ๊ธฐ
library(ggplot2)
midwest
midwest_raw <- data.frame(midwest)
midwest_new <- midwest_raw
str(midwest_new)
head(midwest_new)
# [๋ฌธ์ ]
# poptotal(์ ์ฒด์ธ๊ตฌ) ๋ณ์๋ฅผ total๋ก,
# popasian(์์์ ์ธ๊ตฌ) ๋ณ์๋ฅผ asian์ผ๋ก ์์ ํ์ธ์.
midwest_new <- rename(midwest_new, "total" = "poptotal")
midwest_new <- rename(midwest_new, "asian" = "popasian")
library(dplyr)
# [๋ฌธ์ ]
# total, asian ๋ณ์๋ฅผ ์ด์ฉํด '์ ์ฒด ์ธ๊ตฌ ๋๋น ์์์ ์ธ๊ตฌ ๋ฐฑ๋ถ์จ' percasian ํ์๋ณ์๋ฅผ ๋ง๋ค๊ณ ,
# ํ์คํ ๊ทธ๋จ์ ๋ง๋ค์ด ๋์๋ค์ด ์ด๋ป๊ฒ ๋ถํฌํ๋์ง ์ดํด๋ณด์ธ์.
midwest_new$"percasian2" <- (midwest_new$asian / midwest_new$total)*100
hist(midwest_new$percasian2)
# [๋ฌธ์ ]
# ์์์ ์ธ๊ตฌ ๋ฐฑ๋ถ์จ ์ ์ฒด ํ๊ท ์ ๊ตฌํ๊ณ ,
# ํ๊ท ์ ์ด๊ณผํ๋ฉด "large",
# ๊ทธ ์ธ์๋ "small"์ ๋ถ์ฌํ๋ mean ํ์๋ณ์๋ฅผ ๋ง๋ค์ด ๋ณด์ธ์.
mean(midwest_new$percasian2)
midwest_new$mean_percasian2 <- ifelse(midwest_new$percasian2 > mean(midwest_new$percasian2),"large","small")
# [๋ฌธ์ ]
# "large"์ "small"์ ํด๋นํ๋ ์ง์ญ์ด ์ผ๋ง๋ ๋๋์ง ๋น๋ํ์ ๋น๋ ๋ง๋ ๊ทธ๋ํ๋ฅผ ๋ง๋ค์ด ํ์ธํด ๋ณด์ธ์.
densityplot(midwest_new$percasian2, midwest_new, group = midwest_new$country, auto.key = T)
ggplot(data = midwest_new,
aes(x=percasian2,y=county)) +
geom_bar(stat = "identity",width = .1, position = "dodge") +
geom_point()
teacher <- as.data.frame(table(midwest_new$mean_percasian2))
ggplot(teacher,
aes(x=Var1, y=Freq)) +
geom_bar(stat = "identity", width = .2)
# ggplot2์ midwest ๋ฐ์ดํฐ๋ฅผ ์ฌ์ฉํ์ฌ ๋ฐ์ดํฐ ๋ถ์์ ์ค์ตํ๋ ๋ฌธ์ ์
๋๋ค.
# popadults๋ ํด๋น ์ง์ญ์ ์ฑ์ธ ์ธ๊ตฌ, poptotal์ ์ ์ฒด ์ธ๊ตฌ๋ฅผ ๋ํ๋
๋๋ค.
# 1๋ฒ ๋ฌธ์
# midwest ๋ฐ์ดํฐ์ '์ ์ฒด ์ธ๊ตฌ ๋๋น ๋ฏธ์ฑ๋
์ธ๊ตฌ ๋ฐฑ๋ถ์จ' ๋ณ์๋ฅผ ์ถ๊ฐํ์ธ์.
midwest_new$"์ ์ฒด ์ธ๊ตฌ ๋๋น ๋ฏธ์ฑ๋
์ธ๊ตฌ ๋ฐฑ๋ถ์จ" <- ((midwest_new$total - midwest_new$popadults) / midwest_new$total)*100
##๊ฐ์ฌ๋ ๋ต
midwest_new <- midwest_new %>%
mutate(percyouth = (poptotal - popadults)/poptotal * 100)
# 2๋ฒ ๋ฌธ์
# ๋ฏธ์ฑ๋
์ธ๊ตฌ ๋ฐฑ๋ถ์จ์ด ๊ฐ์ฅ ๋์ ์์ 5๊ฐ county(์ง์ญ)์ ๋ฏธ์ฑ๋
์ธ๊ตฌ ๋ฐฑ๋ถ์จ์ ์ถ๋ ฅํ์์ค.
tail(sort(midwest_new$"์ ์ฒด ์ธ๊ตฌ ๋๋น ๋ฏธ์ฑ๋
์ธ๊ตฌ ๋ฐฑ๋ถ์จ"), n=5)
##๊ฐ์ฌ๋ ๋ต
midwest_new %>% arrange(desc(percyouth)) %>%
dplyr::select(county, percyouth) %>%
head(5)
# 3๋ฒ ๋ฌธ์
# ๋ค์๊ณผ ๊ฐ์ ๋ถ๋ฅํ์ ๊ธฐ์ค์ ๋ฐ๋ผ ๋ฏธ์ฑ๋
๋น์จ ๋ฑ๊ธ ๋ณ์๋ฅผ ์ถ๊ฐํ๊ณ , ๊ฐ ๋ฑ๊ธ์ ๋ช ๊ฐ์ ์ง์ญ์ด ์๋์ง ์์๋ณด์ธ์.
# ๋ถ๋ฅ ๊ธฐ์ค
# large 40%์ด์
# middle 30 ~ 40๋ฏธ๋ง
# small 30๋ฏธ๋ง
midwest_new$"๋ฏธ์ฑ๋
๋น์จ ๋ฑ๊ธ" <- ifelse("์ ์ฒด ์ธ๊ตฌ ๋๋น ๋ฏธ์ฑ๋
์ธ๊ตฌ ๋ฐฑ๋ถ์จ" >= 40,"large",ifelse("์ ์ฒด ์ธ๊ตฌ ๋๋น ๋ฏธ์ฑ๋
์ธ๊ตฌ ๋ฐฑ๋ถ์จ" >= 30,"middle","small"))
##๊ฐ์ฌ๋ ๋ต
midwest_new <- midwest_new %>%
mutate(gradeyouth = ifelse(percyouth >= 40 ,"large",
ifelse(percyouth >= 30,"middle","small")))
table(midwest_new$gradeyouth)
# 4๋ฒ ๋ฌธ์
# popasian์ ํด๋น ์ง์ญ์ ์์์์ธ ์ธ๊ตฌ๋ฅผ ๋ํ๋
๋๋ค.
# '์ ์ฒด ์ธ๊ตฌ ๋๋น ์์์์ธ ์ธ๊ตฌ ๋ฐฑ๋ถ์จ' ๋ณ์๋ฅผ ์ถ๊ฐํ๊ณ ํ์ 10๊ฐ ์ง์ญ์ state(์ฃผ), county(์ง์ญ), ์์์์ธ ์ธ๊ตฌ ๋ฐฑ๋ถ์จ์ ์ถ๋ ฅํ์ธ์.
midwest_new$"์ ์ฒด ์ธ๊ตฌ ๋๋น ์์์์ธ ์ธ๊ตฌ ๋ฐฑ๋ถ์จ" <- (midwest_new$asian / midwest_new$total)*100
head(sort(midwest_new$state), n=10)
head(sort(midwest_new$county), n=10)
head(sort(midwest_new$"์ ์ฒด ์ธ๊ตฌ ๋๋น ์์์์ธ ์ธ๊ตฌ ๋ฐฑ๋ถ์จ"), n=10)
##๊ฐ์ฌ๋ ๋ต
midwest_new <- midwest_new %>%
mutate(ratio_asian = popasian/poptotal*100)
midwest_new %>% arrange(ratio_asian) %>%
dplyr::select(state, county, ratio_asian) %>%
head(10)
# ์๊ณ์ด(time series)
# ๋ณ์๊ฐ์ ์๊ด์ฑ
# iris ์๊ณ์ด ๋ฐ์ดํฐ ๋ง๋ค๊ธฐ
iris
seq <- as.integer( rownames(iris) )
?cbind
irisDF <- cbind(seq = as.integer(rownames(iris)),iris)
# x์ถ์ seq
# y์ถ์ -Species
#๋๋คํ๊ฒ 4์ ์ถ์ถ
colsColor <- topo.colors(4,alpha = .4)
#2์ด๋ถํฐ 5์ด๊น์ง ์ด๋ฆ๋ถ์ฌ
names(colsColor) <- names(irisDF)[2:5]
#meltํจ์ ์ด์ฉํด์ ๊ธฐ์คseq, species
#๋๋จธ์ง ์ปฌ๋ผ์ variableํด์ wide -> long
irisDF
library(reshape2)
iris_melt <- melt(irisDF, id = c("seq","Species"))
##๋ฒ์ ผ๋๋ฌธ์ ์ค๋ฅ๊ฐ ๋๋ฉด id๋์ id.vars๋ก ํํ
library(ggplot2)
g <- ggplot(data = iris_melt,
aes(x=seq, y=value, col = variable)) +
geom_line(cex = 0.8, show.legend = T)
#์ถ๊ฐ์ ์ผ๋ก ์ ์ ์์๊ณผ ๋ฒ๋ก ๋ผ๋ฒจ๋ง
g <- g + scale_color_manual(
name = "",
values = colsColor[iris_melt$variable],
labels = c("๊ฝ๋ฐ์นจ ๊ธธ์ด", "๊ฝ๋ฐ์นจ ๋๋น", "๊ฝ์ ๊ธธ์ด", "๊ฝ์ ๋๋น")
)
# ๋ ์ง
# ๋ฌธ์๋ณ์๋ฅผ ๋ ์ง๋ณ์ ๋ณํ
# R์ ๋ ์ง ๋ฐ์ดํฐ ํ์
"POSIXct"
# as.POSIXct()
str_date <- "200730 13:40"
as.POSIXct(str_date, format = "%y%m%d %H:%M")
#2020์ด๋ผY 20์ด๋ฉดy
str_date <- "2020-07-30 13:40:01 PM"
as.POSIXct(str_date, format = "%Y-%m-%d %H:%M:%S")
str_date <- "07/30/20 13:40:01"
as.POSIXct(str_date, format = "%m/%d/%y %H:%M:%S")
cospi <- read.csv(file.choose())
#๋ ์ง๋ณ ์ฃผ๊ฐ ๋ง๋ค๊ธฐ(์๊ฐ์ด ์์ด์ ๋ฐ๋ก ์๊ณ์ด ๋ง๋คํ์ ์์)
cospi_test <- cospi
cospi_melt <- melt(cospi_test, id = c("Date","Volume"))
ggplot(data = cospi_melt, aes(x= Date,y=value,col = variable, group = variable)) +
geom_line(cex = 0.5, show.legend = T)
##์ค์ต4!!!
spanish_train <- read.csv(file.choose())
str(spanish_train)
# 1.
# ๋ฐ์ดํฐ ๋ด์ ๊ฒฐ์ธก์น ์ฌ๋ถ๋ฅผ ํ์ธํ๋ค.
# NA๊ฐ์ด 310681๊ฐ ์๋ ๊ฒ์ ํ์ธํ ์ ์๋ค.
#๊ฒฐ์ธก์น ํ์ธ๋ฐฉ๋ฒ
spanish_train[!complete.cases(spanish_train),]
str(spanish_train[!complete.cases(spanish_train),])
#๊ฒฐ์ธก์น ์ ๊ฑฐ
test_renfe2 <- spanish_train[complete.cases(spanish_train),]
str(test_renfe2)
# 2.
# filter์ !is.naํจ์๋ฅผ ํตํด ๊ฒฐ์ธก์น๋ฅผ ๋ชจ๋ ์ ๊ฑฐํ๋ค.
# 3.
# ๋ง๋๋ฆฌ๋ ์ถ๋ฐ
# ๋ง๋๋ฆฌ๋์์ ์ถ๋ฐํ๋ ์ด์ฐจ ๋ฐ์ดํฐ๋ง์ ๋ผ์ด๋ด madrid_origin์ด๋ผ๋ ๋ณ์๋ก ์ ์ฅํ๊ณ
# ์ฐ์ , ๋ง๋๋ฆฌ๋์์ ์ถ๋ฐํ๋ ์ด์ฐจ ๋ฐ์ดํฐ๋ง์ ์ด์ฉํด ๋น๊ตํด๋ณด๊ธฐ๋ก ํ๋ค.
madrid_origin <- filter(test_renfe2, test_renfe2$origin == "MADRID")
str(madrid_origin)
# 4.
# summaryํจ์๋ฅผ ํตํด ์ผ๋ฐ์ ๋ฐ์ดํฐ ์ ๋ณด๋ฅผ ๋ค์ ํ์ธํ๋ค.
summary(test_renfe2)
# 5.
# ๋ง๋๋ฆฌ๋ ์ถ๋ฐ ์ด์ฐจ์ ๋น๋ ์
# ๋ง๋๋ฆฌ๋๋ฅผ ์ถ๋ฐํ๋ ๊ธฐ์ฐจ์ ๋์ฐฉ ๋์๋ณ ์ดํ๋น๋ ์๋ฅผ ๋ฐํํ๋ก ๋ํ๋ด๋ณด์
ggplot(data = madrid_origin,
aes(x=destination,fill=origin)) +
geom_bar(width = .5, position = "dodge")
# 6.
# ๋ง๋๋ฆฌ๋๋ฐ ๋์ฐฉ์ง๋ณ ๊ฐ๊ฒฉ ๋ฐ์คํ๋กฏ์ผ๋ก ํฐ์ผ๊ฐ๊ฒฉ์ ๋์ ์์ ํ์ธํด๋ณด์
boxplot(price ~ destination, data=madrid_origin)
str(madrid_origin)
# 7.
# AVE์์ข์ ๋ฑ๊ธ๋ณ ๊ฐ๊ฒฉ๋ฐ์คํ๋กฏ์ด ์๊ฐํ
# ๋๊ฐ์ ์ด์ฐจ์ ๋๊ฐ์ ์ข์๋ฑ๊ธ, ๋๊ฐ์ ๋์ฐฉ์ง๋ผ ํ๋๋ผ๋ ๊ฐ๊ฒฉ์ด ์ฐจ์ด๊ฐ ๋๋ ๊ฒ์ ํ์ธํ ์ ์๋ค.
AVE_MADRID <- filter(madrid_origin, madrid_origin$train_type == "AVE")
boxplot(price ~ destination,data= AVE_MADRID)
# 8.
# ์ด ์ฐจ์ด๋ฅผ ์ดํดํ๊ณ ์ถ์ด ์๊ณ์ด๋ก ๋ฐ์ดํฐ๋ฅผ ๋ง๋ค์ด๋ณด์๋ค.
# 9.
#๋ ์ง ๋ฐ์ดํฐ ๋ณํ. as.POSIXct๋ factorํ์์ ๋ ์ง ์ฌ์ฉ๊ฐ๋ฅ
# 10.
# ์ปฌ๋ผ์ด๋ฆ์ง์
# colnames(a_b) <- c("preferente","Turista")
# 11.
# ๋์ฐฉ์ง๋ณ, ํธ๋ ์ธ ํด๋์ค๋ณ๋ก ๊ฐ๊ฒฉ์ ๋ฐ์คํ๋กฏํํ๋ก ๋ํ๋ผ ์๋ ์๋ค.
| /eighth_class.R | no_license | Lucidhomme/R | R | false | false | 10,122 | r | #service_data_new_data_eda.csv
dataset <- read.csv(file.choose())
View(dataset)
str(dataset)
# ๋ฒ์ฃผํ vs ๋ฒ์ฃผํ์ ๊ฐ์ง๊ณ ๋ฐ์ดํฐ์ ๋ถํฌ๋ฅผ ํ์ธํ๋ค๋ฉด?
# 1. resident2, gender2๋ฅผ ๋ฒ์ฃผํ์ผ๋ก ๋ณํ
dataset$resident2 <- factor(dataset$resident2)
dataset$gender2 <- factor(dataset$gender2)
levels(dataset$resident2)
levels(dataset$gender2)
# 2. ๋ ๋ณ์๋ฅผ table()ํจ์๋ฅผ ์ด์ฉํ์ฌ ๋ถํฌ๋ฅผ ํ์ธํด๋ณด์
resident_gender <- table(dataset$resident2, dataset$gender2)
#๋ง๋ ๊ฐ๋ก๋ก horiz์ด์ฉ
barplot(resident_gender,horiz = T)
#ํ ๋ง๋๋ก ๋์ ๋์ด์๋๊ฑฐ๋ฅผ ๋ฉํฐ๋ฐ๋ก ๋์ด
barplot(resident_gender,horiz = T,beside = T)
#๋ฒ๋ก ์ถ๊ฐ
barplot(resident_gender,horiz = T,beside = T, legend = row.names(resident_gender))
#์ ์ง์
barplot(resident_gender,horiz = T,beside = T, legend = row.names(resident_gender), col = rainbow(5))
###์ ๊ทธ๋ํ๋ ๋น์ทํ๊ฒ ggplot์ด์ฉ
resident_gender_df <- data.frame(resident_gender)
###๊ทธ๋ฃน์ ๋ฐ์คํ๋กฏํ ๋ ํน์ ๊ทธ๋ฃน์ผ๋ก ๋ฌถ์ด์ง๋๋ฐ ์ฌ๊ธฐ์๋ fill์์ฒด์์ ๊ทธ๋ฃนํ๋์ด์ง
ggplot(data = resident_gender_df, aes(x=Freq, y = Var2, fill = Var1, group = Var1)) +
geom_bar(stat = "identity", position = 'dodge')
#๋ชจ์์ดํฌํ๋กฏ
mosaicplot(resident_gender, col = rainbow(2))
# ์ง์
์ ํ(job2) vs ๋์ด(age2)
dataset$job2 <- factor(dataset$job2)
dataset$age2 <- factor(dataset$age2)
jobage <- table(dataset$job2, dataset$age2)
barplot(jobage, beside = T,legend = row.names(jobage), col = rainbow(3))
jobage_df <- data.frame(jobage)
ggplot(data = jobage_df, aes(x=Freq, y=Var2, fill = Var1, group = Var1)) +
geom_bar(stat = "identity", position = "dodge") +
coord_flip()
# ์ซ์ํ vs ๋ฒ์ฃผํ
# ์ง์
์ ํ์ ๋ฐ๋ฅธ ๋์ด ๋น์จ
# ์นดํ
๊ณ ๋ฆฌ ์ ํ๋ณ ์๊ฐํ
install.packages("lattice")
library(lattice)
?densityplot
#density๋ x์ถ๊ฐ ๋จผ์ ์ ์ํ๊ณ ๋ฐ์ดํฐ์ ์, ๊ทผ๋ฐ ํฉํฐํ์ด์ด์ผํจ
densityplot(dataset$age2, dataset)
str(dataset)
#auto.key = T ๋ก ๋ฒ๋ก ์ค์
densityplot(dataset$age, dataset, group = dataset$job2, auto.key = T)
ggplot(data = dataset, aes(x= age, fill = job2)) +
geom_bar(width = .5, position = "dodge")
##์ค์ต3!!!
# ๋ฐ์ดํฐ ํ๋ ์์ ๋ณต์ฌ๋ณธ ์์ฑํ๊ธฐ
library(ggplot2)
midwest
midwest_raw <- data.frame(midwest)
midwest_new <- midwest_raw
str(midwest_new)
head(midwest_new)
# [๋ฌธ์ ]
# poptotal(์ ์ฒด์ธ๊ตฌ) ๋ณ์๋ฅผ total๋ก,
# popasian(์์์ ์ธ๊ตฌ) ๋ณ์๋ฅผ asian์ผ๋ก ์์ ํ์ธ์.
midwest_new <- rename(midwest_new, "total" = "poptotal")
midwest_new <- rename(midwest_new, "asian" = "popasian")
library(dplyr)
# [๋ฌธ์ ]
# total, asian ๋ณ์๋ฅผ ์ด์ฉํด '์ ์ฒด ์ธ๊ตฌ ๋๋น ์์์ ์ธ๊ตฌ ๋ฐฑ๋ถ์จ' percasian ํ์๋ณ์๋ฅผ ๋ง๋ค๊ณ ,
# ํ์คํ ๊ทธ๋จ์ ๋ง๋ค์ด ๋์๋ค์ด ์ด๋ป๊ฒ ๋ถํฌํ๋์ง ์ดํด๋ณด์ธ์.
midwest_new$"percasian2" <- (midwest_new$asian / midwest_new$total)*100
hist(midwest_new$percasian2)
# [๋ฌธ์ ]
# ์์์ ์ธ๊ตฌ ๋ฐฑ๋ถ์จ ์ ์ฒด ํ๊ท ์ ๊ตฌํ๊ณ ,
# ํ๊ท ์ ์ด๊ณผํ๋ฉด "large",
# ๊ทธ ์ธ์๋ "small"์ ๋ถ์ฌํ๋ mean ํ์๋ณ์๋ฅผ ๋ง๋ค์ด ๋ณด์ธ์.
mean(midwest_new$percasian2)
midwest_new$mean_percasian2 <- ifelse(midwest_new$percasian2 > mean(midwest_new$percasian2),"large","small")
# [๋ฌธ์ ]
# "large"์ "small"์ ํด๋นํ๋ ์ง์ญ์ด ์ผ๋ง๋ ๋๋์ง ๋น๋ํ์ ๋น๋ ๋ง๋ ๊ทธ๋ํ๋ฅผ ๋ง๋ค์ด ํ์ธํด ๋ณด์ธ์.
densityplot(midwest_new$percasian2, midwest_new, group = midwest_new$country, auto.key = T)
ggplot(data = midwest_new,
aes(x=percasian2,y=county)) +
geom_bar(stat = "identity",width = .1, position = "dodge") +
geom_point()
teacher <- as.data.frame(table(midwest_new$mean_percasian2))
ggplot(teacher,
aes(x=Var1, y=Freq)) +
geom_bar(stat = "identity", width = .2)
# ggplot2์ midwest ๋ฐ์ดํฐ๋ฅผ ์ฌ์ฉํ์ฌ ๋ฐ์ดํฐ ๋ถ์์ ์ค์ตํ๋ ๋ฌธ์ ์
๋๋ค.
# popadults๋ ํด๋น ์ง์ญ์ ์ฑ์ธ ์ธ๊ตฌ, poptotal์ ์ ์ฒด ์ธ๊ตฌ๋ฅผ ๋ํ๋
๋๋ค.
# 1๋ฒ ๋ฌธ์
# midwest ๋ฐ์ดํฐ์ '์ ์ฒด ์ธ๊ตฌ ๋๋น ๋ฏธ์ฑ๋
์ธ๊ตฌ ๋ฐฑ๋ถ์จ' ๋ณ์๋ฅผ ์ถ๊ฐํ์ธ์.
midwest_new$"์ ์ฒด ์ธ๊ตฌ ๋๋น ๋ฏธ์ฑ๋
์ธ๊ตฌ ๋ฐฑ๋ถ์จ" <- ((midwest_new$total - midwest_new$popadults) / midwest_new$total)*100
##๊ฐ์ฌ๋ ๋ต
midwest_new <- midwest_new %>%
mutate(percyouth = (poptotal - popadults)/poptotal * 100)
# 2๋ฒ ๋ฌธ์
# ๋ฏธ์ฑ๋
์ธ๊ตฌ ๋ฐฑ๋ถ์จ์ด ๊ฐ์ฅ ๋์ ์์ 5๊ฐ county(์ง์ญ)์ ๋ฏธ์ฑ๋
์ธ๊ตฌ ๋ฐฑ๋ถ์จ์ ์ถ๋ ฅํ์์ค.
tail(sort(midwest_new$"์ ์ฒด ์ธ๊ตฌ ๋๋น ๋ฏธ์ฑ๋
์ธ๊ตฌ ๋ฐฑ๋ถ์จ"), n=5)
##๊ฐ์ฌ๋ ๋ต
midwest_new %>% arrange(desc(percyouth)) %>%
dplyr::select(county, percyouth) %>%
head(5)
# 3๋ฒ ๋ฌธ์
# ๋ค์๊ณผ ๊ฐ์ ๋ถ๋ฅํ์ ๊ธฐ์ค์ ๋ฐ๋ผ ๋ฏธ์ฑ๋
๋น์จ ๋ฑ๊ธ ๋ณ์๋ฅผ ์ถ๊ฐํ๊ณ , ๊ฐ ๋ฑ๊ธ์ ๋ช ๊ฐ์ ์ง์ญ์ด ์๋์ง ์์๋ณด์ธ์.
# ๋ถ๋ฅ ๊ธฐ์ค
# large 40%์ด์
# middle 30 ~ 40๋ฏธ๋ง
# small 30๋ฏธ๋ง
midwest_new$"๋ฏธ์ฑ๋
๋น์จ ๋ฑ๊ธ" <- ifelse("์ ์ฒด ์ธ๊ตฌ ๋๋น ๋ฏธ์ฑ๋
์ธ๊ตฌ ๋ฐฑ๋ถ์จ" >= 40,"large",ifelse("์ ์ฒด ์ธ๊ตฌ ๋๋น ๋ฏธ์ฑ๋
์ธ๊ตฌ ๋ฐฑ๋ถ์จ" >= 30,"middle","small"))
##๊ฐ์ฌ๋ ๋ต
midwest_new <- midwest_new %>%
mutate(gradeyouth = ifelse(percyouth >= 40 ,"large",
ifelse(percyouth >= 30,"middle","small")))
table(midwest_new$gradeyouth)
# 4๋ฒ ๋ฌธ์
# popasian์ ํด๋น ์ง์ญ์ ์์์์ธ ์ธ๊ตฌ๋ฅผ ๋ํ๋
๋๋ค.
# '์ ์ฒด ์ธ๊ตฌ ๋๋น ์์์์ธ ์ธ๊ตฌ ๋ฐฑ๋ถ์จ' ๋ณ์๋ฅผ ์ถ๊ฐํ๊ณ ํ์ 10๊ฐ ์ง์ญ์ state(์ฃผ), county(์ง์ญ), ์์์์ธ ์ธ๊ตฌ ๋ฐฑ๋ถ์จ์ ์ถ๋ ฅํ์ธ์.
midwest_new$"์ ์ฒด ์ธ๊ตฌ ๋๋น ์์์์ธ ์ธ๊ตฌ ๋ฐฑ๋ถ์จ" <- (midwest_new$asian / midwest_new$total)*100
head(sort(midwest_new$state), n=10)
head(sort(midwest_new$county), n=10)
head(sort(midwest_new$"์ ์ฒด ์ธ๊ตฌ ๋๋น ์์์์ธ ์ธ๊ตฌ ๋ฐฑ๋ถ์จ"), n=10)
##๊ฐ์ฌ๋ ๋ต
midwest_new <- midwest_new %>%
mutate(ratio_asian = popasian/poptotal*100)
midwest_new %>% arrange(ratio_asian) %>%
dplyr::select(state, county, ratio_asian) %>%
head(10)
# ์๊ณ์ด(time series)
# ๋ณ์๊ฐ์ ์๊ด์ฑ
# iris ์๊ณ์ด ๋ฐ์ดํฐ ๋ง๋ค๊ธฐ
iris
seq <- as.integer( rownames(iris) )
?cbind
irisDF <- cbind(seq = as.integer(rownames(iris)),iris)
# x์ถ์ seq
# y์ถ์ -Species
#๋๋คํ๊ฒ 4์ ์ถ์ถ
colsColor <- topo.colors(4,alpha = .4)
#2์ด๋ถํฐ 5์ด๊น์ง ์ด๋ฆ๋ถ์ฌ
names(colsColor) <- names(irisDF)[2:5]
#meltํจ์ ์ด์ฉํด์ ๊ธฐ์คseq, species
#๋๋จธ์ง ์ปฌ๋ผ์ variableํด์ wide -> long
irisDF
library(reshape2)
iris_melt <- melt(irisDF, id = c("seq","Species"))
##๋ฒ์ ผ๋๋ฌธ์ ์ค๋ฅ๊ฐ ๋๋ฉด id๋์ id.vars๋ก ํํ
library(ggplot2)
g <- ggplot(data = iris_melt,
aes(x=seq, y=value, col = variable)) +
geom_line(cex = 0.8, show.legend = T)
#์ถ๊ฐ์ ์ผ๋ก ์ ์ ์์๊ณผ ๋ฒ๋ก ๋ผ๋ฒจ๋ง
g <- g + scale_color_manual(
name = "",
values = colsColor[iris_melt$variable],
labels = c("๊ฝ๋ฐ์นจ ๊ธธ์ด", "๊ฝ๋ฐ์นจ ๋๋น", "๊ฝ์ ๊ธธ์ด", "๊ฝ์ ๋๋น")
)
# ๋ ์ง
# ๋ฌธ์๋ณ์๋ฅผ ๋ ์ง๋ณ์ ๋ณํ
# R์ ๋ ์ง ๋ฐ์ดํฐ ํ์
"POSIXct"
# as.POSIXct()
str_date <- "200730 13:40"
as.POSIXct(str_date, format = "%y%m%d %H:%M")
#2020์ด๋ผY 20์ด๋ฉดy
str_date <- "2020-07-30 13:40:01 PM"
as.POSIXct(str_date, format = "%Y-%m-%d %H:%M:%S")
str_date <- "07/30/20 13:40:01"
as.POSIXct(str_date, format = "%m/%d/%y %H:%M:%S")
cospi <- read.csv(file.choose())
#๋ ์ง๋ณ ์ฃผ๊ฐ ๋ง๋ค๊ธฐ(์๊ฐ์ด ์์ด์ ๋ฐ๋ก ์๊ณ์ด ๋ง๋คํ์ ์์)
cospi_test <- cospi
cospi_melt <- melt(cospi_test, id = c("Date","Volume"))
ggplot(data = cospi_melt, aes(x= Date,y=value,col = variable, group = variable)) +
geom_line(cex = 0.5, show.legend = T)
##์ค์ต4!!!
spanish_train <- read.csv(file.choose())
str(spanish_train)
# 1.
# ๋ฐ์ดํฐ ๋ด์ ๊ฒฐ์ธก์น ์ฌ๋ถ๋ฅผ ํ์ธํ๋ค.
# NA๊ฐ์ด 310681๊ฐ ์๋ ๊ฒ์ ํ์ธํ ์ ์๋ค.
#๊ฒฐ์ธก์น ํ์ธ๋ฐฉ๋ฒ
spanish_train[!complete.cases(spanish_train),]
str(spanish_train[!complete.cases(spanish_train),])
#๊ฒฐ์ธก์น ์ ๊ฑฐ
test_renfe2 <- spanish_train[complete.cases(spanish_train),]
str(test_renfe2)
# 2.
# filter์ !is.naํจ์๋ฅผ ํตํด ๊ฒฐ์ธก์น๋ฅผ ๋ชจ๋ ์ ๊ฑฐํ๋ค.
# 3.
# ๋ง๋๋ฆฌ๋ ์ถ๋ฐ
# ๋ง๋๋ฆฌ๋์์ ์ถ๋ฐํ๋ ์ด์ฐจ ๋ฐ์ดํฐ๋ง์ ๋ผ์ด๋ด madrid_origin์ด๋ผ๋ ๋ณ์๋ก ์ ์ฅํ๊ณ
# ์ฐ์ , ๋ง๋๋ฆฌ๋์์ ์ถ๋ฐํ๋ ์ด์ฐจ ๋ฐ์ดํฐ๋ง์ ์ด์ฉํด ๋น๊ตํด๋ณด๊ธฐ๋ก ํ๋ค.
madrid_origin <- filter(test_renfe2, test_renfe2$origin == "MADRID")
str(madrid_origin)
# 4.
# summaryํจ์๋ฅผ ํตํด ์ผ๋ฐ์ ๋ฐ์ดํฐ ์ ๋ณด๋ฅผ ๋ค์ ํ์ธํ๋ค.
summary(test_renfe2)
# 5.
# ๋ง๋๋ฆฌ๋ ์ถ๋ฐ ์ด์ฐจ์ ๋น๋ ์
# ๋ง๋๋ฆฌ๋๋ฅผ ์ถ๋ฐํ๋ ๊ธฐ์ฐจ์ ๋์ฐฉ ๋์๋ณ ์ดํ๋น๋ ์๋ฅผ ๋ฐํํ๋ก ๋ํ๋ด๋ณด์
ggplot(data = madrid_origin,
aes(x=destination,fill=origin)) +
geom_bar(width = .5, position = "dodge")
# 6.
# ๋ง๋๋ฆฌ๋๋ฐ ๋์ฐฉ์ง๋ณ ๊ฐ๊ฒฉ ๋ฐ์คํ๋กฏ์ผ๋ก ํฐ์ผ๊ฐ๊ฒฉ์ ๋์ ์์ ํ์ธํด๋ณด์
boxplot(price ~ destination, data=madrid_origin)
str(madrid_origin)
# 7.
# AVE์์ข์ ๋ฑ๊ธ๋ณ ๊ฐ๊ฒฉ๋ฐ์คํ๋กฏ์ด ์๊ฐํ
# ๋๊ฐ์ ์ด์ฐจ์ ๋๊ฐ์ ์ข์๋ฑ๊ธ, ๋๊ฐ์ ๋์ฐฉ์ง๋ผ ํ๋๋ผ๋ ๊ฐ๊ฒฉ์ด ์ฐจ์ด๊ฐ ๋๋ ๊ฒ์ ํ์ธํ ์ ์๋ค.
AVE_MADRID <- filter(madrid_origin, madrid_origin$train_type == "AVE")
boxplot(price ~ destination,data= AVE_MADRID)
# 8.
# ์ด ์ฐจ์ด๋ฅผ ์ดํดํ๊ณ ์ถ์ด ์๊ณ์ด๋ก ๋ฐ์ดํฐ๋ฅผ ๋ง๋ค์ด๋ณด์๋ค.
# 9.
#๋ ์ง ๋ฐ์ดํฐ ๋ณํ. as.POSIXct๋ factorํ์์ ๋ ์ง ์ฌ์ฉ๊ฐ๋ฅ
# 10.
# ์ปฌ๋ผ์ด๋ฆ์ง์
# colnames(a_b) <- c("preferente","Turista")
# 11.
# ๋์ฐฉ์ง๋ณ, ํธ๋ ์ธ ํด๋์ค๋ณ๋ก ๊ฐ๊ฒฉ์ ๋ฐ์คํ๋กฏํํ๋ก ๋ํ๋ผ ์๋ ์๋ค.
|
#Access data from the openFEC api using R wrapper from Stephen Holzman
#https://stephenholzman.github.io/tidyusafec/articles/intro.html
library(tidyverse)
library(tidyusafec)
library(tigris)
library(leaflet)
#signup for api key at https://api.open.fec.gov/developers/. Save a one line file called "data.gov.key" in the root project folder, that one line assigning the key to a variable like the next line:
save_datagov_apikey(key = "n3BB27dCbHpsI0BAIyYi5i4nMa3xJk9AXF7cG2Hc", install = TRUE)
#I want to do the opposite of in, later as part of my filter, so I'm definining the %notin% function here
`%notin%` <- Negate(`%in%`)
#select all candidates running for Senate, unnest the data, deliver it in a df, and make sure they raised money
#For North Carolina races
#senate <- search_candidates(state = "NC", election_year = "2020", office = "S", candidate_status = "C" , has_raised_funds = TRUE, unnest_committees = TRUE ) %>%
#for alaska races (this is useful because the file is somewhat small..... See my sampling script at the bottom for a way to take a radom sample to make testing much speedier)
#senate <- search_candidates(state = "AK", election_year = "2020", office = "S", candidate_status = "C" , has_raised_funds = TRUE, unnest_committees = TRUE ) %>%
#for colorado races
#senate <- search_candidates(state = "CO", election_year = "2020", office = "S", candidate_status = "C" , has_raised_funds = TRUE, unnest_committees = TRUE ) %>%
#for arizona races
#### Here's the queries I've been using for counties
#senate <- search_candidates(name = c("MCSALLY, MARTHA"), election_year = "2020", office = "S", candidate_status = "C" , has_raised_funds = TRUE, unnest_committees = TRUE ) %>%
#I've split these into two queries, because my key currently only allows for 1,000 queries in an hour. This is the one for Kelly. When we make the maps, we can address this.
senate <- search_candidates(name = c("KELLY, MARK"), election_year = "2020", office = "S", candidate_status = "C" , has_raised_funds = TRUE, unnest_committees = TRUE ) %>%
#get all their itemized contributions
get_itemized_contributions(data_structure = "tidy") %>%
#unnests the pesky committee column and creates unique columns for each of the nested list items.
#here's a great tutorial on how to get rid of nested lists https://cfss.uchicago.edu/notes/simplify-nested-lists/ Thanks Andrew Tran for pointing me to this
#something else he suggested. I didn't need it, but am putting it here for safe keeping https://jennybc.github.io/purrr-tutorial/ls00_inspect-explore.html
unnest_wider(committee, names_repair = "unique") %>%
#select only the fields I want.
select(schedule_type_full, report_type, line_number, line_number_label, contributor_state, is_individual, report_year, contributor_city, contribution_receipt_date, contributor_aggregate_ytd, two_year_transaction_period, contribution_receipt_amount, contributor_zip, contributor_name, state, party, committee_type_full, state_full, name, fec_election_type_desc, memo_code) %>%
# itemized individual contributions are recorded on line 11ai & we want to get the contributions given in the last 2-year cycle. We also want to exclude a double-count that happens with win red and act blue contributions. By excluding the x memo_code, we can eliminate those double-counts
filter(report_year > 2018 & line_number %in% "11AI" & memo_code %notin% "X" )
#preserve the zeroes
senate$contributor_zip <- as.character(senate$contributor_zip)
#Rural/urban codes were downloaded from https://www.ers.usda.gov/data-products/rural-urban-continuum-codes.aspx
#fec data accessed from https://classic.fec.gov/disclosurep/PDownload.do
#subsets the first 5 numbers of the zipcodes row https://www.rdocumentation.org/packages/base/versions/3.6.0/topics/substr
senate$contributor_zip <- substr(senate$contributor_zip, 1, 5)
#FYI, if you want the last 5, use https://www.rdocumentation.org/packages/FedData/versions/1.1.0/topics/substrRight
##########
#let's see what and where these senators are getting their funding. This query is just for Tillis donations... and I need to go back and make sure there's no pacs in it. Right now it's set up just to make the map work.
totals <- senate %>%
#this filter changes based on the race
group_by(contributor_zip) %>%
summarise( total_raised = sum(contribution_receipt_amount))
#returns tigris query file as a shapefile (if not, it's a weird list that doesn't join)
options(tigris_class = "sf")
#Download a Zip Code Tabulation Area (ZCTA) shapefile into R
#read in our shapefile
options(tigris_use_cache = TRUE)
#code_shapefile <- zctas(cb = FALSE, year = 2010, state = "NC")
#code_shapefile <- zctas(cb = FALSE, year = 2010, state = "AK")
#code_shapefile <- zctas(cb = FALSE, year = 2010, state = "CO")
code_shapefile <- zctas(cb = FALSE, year = 2010, state = "AZ")
#join democratic fundraising numbers with shapefile
#make sure the zip column is numeric and then join the two, so we can make the shapefile and contributions data work together like happy friends in leaflet
cong_tot <- left_join(code_shapefile, totals , by = c("ZCTA5CE10"= "contributor_zip" ))
#and there's two counties that have a total of 0 (their populations are a total of 27k residents), so we're going to replace those values with zeroes.We'll create a new column just in case
cong_tot <- cong_tot %>%
mutate(total_raised_no_na = replace_na(total_raised, 0))
##########
#########works, but is so slow because of all the shapefiles to plot.
#ggplot(cong_tot) +
# geom_sf(data = cong_tot) +
# aes(fill= cut(total_raised,breaks = c(-1, 10000, 30000, 60000, 120000), labels= c("0-9k", "10k-29k", "30k-59k", "60k-120k") )) +
# geom_sf(color="black") +
# scale_fill_manual(values = c("#FFFFFF", "#C9C5DB", "#938CB8", "#3E386D")) +
# theme_void() +
# labs(title="Funds raised by Thom Tillis in NC zip codes", caption="Source: Federal Elections Commission", color='legend', fill='legend title')
#leaflet works much faster and it gives us a more interactive graphic, which i'm partial to.
bins <- c(0, 100, 1000, 10000, 100000, 200000, Inf)
pal1 <- colorBin(palette = c("#FFFFFF", "#C9C5DB","#05B69C", "#F9A51A", "#C73D49"), domain = cong_tot$total_raised_no_na, bins = bins)
map <- leaflet(cong_tot) %>% addTiles()
state_popup1 <- paste0("<strong> District: </strong>",
cong_tot$ZCTA5CE10,
"<br><strong>Total Raised: </strong>",
cong_tot$total_raised_no_na)
leaflet(data = cong_tot) %>%
addProviderTiles("CartoDB.Positron") %>%
addPolygons(fillColor = ~pal1(total_raised_no_na),
fillOpacity = 0.8,
color = "#BDBDC3",
weight = 1,
popup = state_popup1) %>%
addLegend("bottomright", pal = pal1, values = ~total_raised_no_na,
title = "Total raised",
labFormat = labelFormat(prefix = " "))
#helpful for running code in a smaller subset of data for testing purposes
dt <-
#df %>%
#slices off number of rows from 1:xx in df
#slice(1:4)
#slices off random number of rows from data
sample_n(senate, 10)
#from zip code and noncensus packages. Was used in original code. And Supposedly cleans up zip codes, gets me FIPS codes. Cran has been rudely pulled down the orphan packages which I found rude. It's useful. But if it's gone, it's gone.
#library(zipcode)
#library(noncensus)
#senate$contributor_zip <- clean.zipcodes(senate$contributor_zip)
#conjures up the zip_codes table from the noncensus library. Which is a great resource for this project. Because, unlike the zip codes data(zipcodes) table. It has fips codes.
#data(zip_codes)
#however, there was a lack of zeroes in the fips code fields. And since the clean.zipcodes puts zeroes in front of some four-digit I'm using it here to match up the Census database.
#zip_codes$fips <- clean.zipcodes(zip_codes$fips)
#############This will be useful if we need to group by county. Not just by zip. Until then, it shall be noted and sit idle.
#counties <- read_csv("ZIP_COUNTY.csv")
#props to Dhmontgomery in newsnerdery. to helping me eliminate the excess dupes by using a combination of rank and filter...basically what top_n does. But with the added ability of adding the ties.method element. #I was able to select the highest ratios and then, in the case of ties, R randomly chose one.
#nodupes <- counties %>%
# group_by(zip) %>%
# mutate(rank = rank(tot_ratio, ties.method = "random")) %>%
# filter(rank < 2)
######################
#USEFUL DOCUMENTATION #
#https://s3.amazonaws.com/ire16/campaign-finance/MiningFECData.pdf | /old or unused scripts/vulnerable_senators_zips.R | no_license | ChrisCioffi/districts | R | false | false | 8,643 | r | #Access data from the openFEC api using R wrapper from Stephen Holzman
#https://stephenholzman.github.io/tidyusafec/articles/intro.html
library(tidyverse)
library(tidyusafec)
library(tigris)
library(leaflet)
#signup for api key at https://api.open.fec.gov/developers/. Save a one line file called "data.gov.key" in the root project folder, that one line assigning the key to a variable like the next line:
save_datagov_apikey(key = "n3BB27dCbHpsI0BAIyYi5i4nMa3xJk9AXF7cG2Hc", install = TRUE)
#I want to do the opposite of in, later as part of my filter, so I'm definining the %notin% function here
`%notin%` <- Negate(`%in%`)
#select all candidates running for Senate, unnest the data, deliver it in a df, and make sure they raised money
#For North Carolina races
#senate <- search_candidates(state = "NC", election_year = "2020", office = "S", candidate_status = "C" , has_raised_funds = TRUE, unnest_committees = TRUE ) %>%
#for alaska races (this is useful because the file is somewhat small..... See my sampling script at the bottom for a way to take a radom sample to make testing much speedier)
#senate <- search_candidates(state = "AK", election_year = "2020", office = "S", candidate_status = "C" , has_raised_funds = TRUE, unnest_committees = TRUE ) %>%
#for colorado races
#senate <- search_candidates(state = "CO", election_year = "2020", office = "S", candidate_status = "C" , has_raised_funds = TRUE, unnest_committees = TRUE ) %>%
#for arizona races
#### Here's the queries I've been using for counties
#senate <- search_candidates(name = c("MCSALLY, MARTHA"), election_year = "2020", office = "S", candidate_status = "C" , has_raised_funds = TRUE, unnest_committees = TRUE ) %>%
#I've split these into two queries, because my key currently only allows for 1,000 queries in an hour. This is the one for Kelly. When we make the maps, we can address this.
senate <- search_candidates(name = c("KELLY, MARK"), election_year = "2020", office = "S", candidate_status = "C" , has_raised_funds = TRUE, unnest_committees = TRUE ) %>%
#get all their itemized contributions
get_itemized_contributions(data_structure = "tidy") %>%
#unnests the pesky committee column and creates unique columns for each of the nested list items.
#here's a great tutorial on how to get rid of nested lists https://cfss.uchicago.edu/notes/simplify-nested-lists/ Thanks Andrew Tran for pointing me to this
#something else he suggested. I didn't need it, but am putting it here for safe keeping https://jennybc.github.io/purrr-tutorial/ls00_inspect-explore.html
unnest_wider(committee, names_repair = "unique") %>%
#select only the fields I want.
select(schedule_type_full, report_type, line_number, line_number_label, contributor_state, is_individual, report_year, contributor_city, contribution_receipt_date, contributor_aggregate_ytd, two_year_transaction_period, contribution_receipt_amount, contributor_zip, contributor_name, state, party, committee_type_full, state_full, name, fec_election_type_desc, memo_code) %>%
# itemized individual contributions are recorded on line 11ai & we want to get the contributions given in the last 2-year cycle. We also want to exclude a double-count that happens with win red and act blue contributions. By excluding the x memo_code, we can eliminate those double-counts
filter(report_year > 2018 & line_number %in% "11AI" & memo_code %notin% "X" )
#preserve the zeroes
senate$contributor_zip <- as.character(senate$contributor_zip)
#Rural/urban codes were downloaded from https://www.ers.usda.gov/data-products/rural-urban-continuum-codes.aspx
#fec data accessed from https://classic.fec.gov/disclosurep/PDownload.do
#subsets the first 5 numbers of the zipcodes row https://www.rdocumentation.org/packages/base/versions/3.6.0/topics/substr
senate$contributor_zip <- substr(senate$contributor_zip, 1, 5)
#FYI, if you want the last 5, use https://www.rdocumentation.org/packages/FedData/versions/1.1.0/topics/substrRight
##########
#let's see what and where these senators are getting their funding. This query is just for Tillis donations... and I need to go back and make sure there's no pacs in it. Right now it's set up just to make the map work.
totals <- senate %>%
#this filter changes based on the race
group_by(contributor_zip) %>%
summarise( total_raised = sum(contribution_receipt_amount))
#returns tigris query file as a shapefile (if not, it's a weird list that doesn't join)
options(tigris_class = "sf")
#Download a Zip Code Tabulation Area (ZCTA) shapefile into R
#read in our shapefile
options(tigris_use_cache = TRUE)
#code_shapefile <- zctas(cb = FALSE, year = 2010, state = "NC")
#code_shapefile <- zctas(cb = FALSE, year = 2010, state = "AK")
#code_shapefile <- zctas(cb = FALSE, year = 2010, state = "CO")
code_shapefile <- zctas(cb = FALSE, year = 2010, state = "AZ")
#join democratic fundraising numbers with shapefile
#make sure the zip column is numeric and then join the two, so we can make the shapefile and contributions data work together like happy friends in leaflet
cong_tot <- left_join(code_shapefile, totals , by = c("ZCTA5CE10"= "contributor_zip" ))
#and there's two counties that have a total of 0 (their populations are a total of 27k residents), so we're going to replace those values with zeroes.We'll create a new column just in case
cong_tot <- cong_tot %>%
mutate(total_raised_no_na = replace_na(total_raised, 0))
##########
#########works, but is so slow because of all the shapefiles to plot.
#ggplot(cong_tot) +
# geom_sf(data = cong_tot) +
# aes(fill= cut(total_raised,breaks = c(-1, 10000, 30000, 60000, 120000), labels= c("0-9k", "10k-29k", "30k-59k", "60k-120k") )) +
# geom_sf(color="black") +
# scale_fill_manual(values = c("#FFFFFF", "#C9C5DB", "#938CB8", "#3E386D")) +
# theme_void() +
# labs(title="Funds raised by Thom Tillis in NC zip codes", caption="Source: Federal Elections Commission", color='legend', fill='legend title')
#leaflet works much faster and it gives us a more interactive graphic, which i'm partial to.
bins <- c(0, 100, 1000, 10000, 100000, 200000, Inf)
pal1 <- colorBin(palette = c("#FFFFFF", "#C9C5DB","#05B69C", "#F9A51A", "#C73D49"), domain = cong_tot$total_raised_no_na, bins = bins)
map <- leaflet(cong_tot) %>% addTiles()
state_popup1 <- paste0("<strong> District: </strong>",
cong_tot$ZCTA5CE10,
"<br><strong>Total Raised: </strong>",
cong_tot$total_raised_no_na)
leaflet(data = cong_tot) %>%
addProviderTiles("CartoDB.Positron") %>%
addPolygons(fillColor = ~pal1(total_raised_no_na),
fillOpacity = 0.8,
color = "#BDBDC3",
weight = 1,
popup = state_popup1) %>%
addLegend("bottomright", pal = pal1, values = ~total_raised_no_na,
title = "Total raised",
labFormat = labelFormat(prefix = " "))
#helpful for running code in a smaller subset of data for testing purposes
dt <-
#df %>%
#slices off number of rows from 1:xx in df
#slice(1:4)
#slices off random number of rows from data
sample_n(senate, 10)
#from zip code and noncensus packages. Was used in original code. And Supposedly cleans up zip codes, gets me FIPS codes. Cran has been rudely pulled down the orphan packages which I found rude. It's useful. But if it's gone, it's gone.
#library(zipcode)
#library(noncensus)
#senate$contributor_zip <- clean.zipcodes(senate$contributor_zip)
#conjures up the zip_codes table from the noncensus library. Which is a great resource for this project. Because, unlike the zip codes data(zipcodes) table. It has fips codes.
#data(zip_codes)
#however, there was a lack of zeroes in the fips code fields. And since the clean.zipcodes puts zeroes in front of some four-digit I'm using it here to match up the Census database.
#zip_codes$fips <- clean.zipcodes(zip_codes$fips)
#############This will be useful if we need to group by county. Not just by zip. Until then, it shall be noted and sit idle.
#counties <- read_csv("ZIP_COUNTY.csv")
#props to Dhmontgomery in newsnerdery. to helping me eliminate the excess dupes by using a combination of rank and filter...basically what top_n does. But with the added ability of adding the ties.method element. #I was able to select the highest ratios and then, in the case of ties, R randomly chose one.
#nodupes <- counties %>%
# group_by(zip) %>%
# mutate(rank = rank(tot_ratio, ties.method = "random")) %>%
# filter(rank < 2)
######################
#USEFUL DOCUMENTATION #
#https://s3.amazonaws.com/ire16/campaign-finance/MiningFECData.pdf |
# load necessary libraries
library("shiny")
# intro page
intro <-
mainPanel(
h1("Reproductive Health and Resources"),
strong("Purpose and Importance of the Project"),
p("As a group, we believe that women should have reproductive rights. We
think this would serve as an opportunity to delve into learning which
reproductive resources are provided in the United States and the
differences between in each geographic region. We also find it
interesting how there are many different sub-categories that fall under
reproductive health such as teen health, resources, infant health,
abortion, etc."),
strong("Origin of Sources"),
p(
"The", a("Guttmacher Institute", href = "https://data.guttmacher.org/states/table?dataset=data&state=AL+AK+AZ+AR+CA+CO+CT+DE+DC+FL+GA+HI+ID+IL+IN+IA+KS+KY+LA+ME+MD+MA+MI+MN+MS+MO+MT+NE+NV+NH+NJ+NM+NY+NC+ND+OH+OK+OR+PA+RI+SC+SD+TN+TX+UT+VT+VA+WA+WV+WI+WY&topics=57+283+65"),
" dataframe has information on number of abortions, number of abortion
clinics, percent of women taking contraceptive, etc. The",
a("Kaggle", href = "https://www.kaggle.com/omer2040/usa-states-to-region"),
" dataframe labels each of the United States into the four
different geographic regions. We then joined these dataframes to create
our new dataset that's grouped by regions."
),
img("We support women's reproductive health!", src = "https://www.plannedparenthood.org/uploads/filer_public_thumbnails/filer_public/cb/c4/cbc45653-b657-4b15-97fd-490aeefcd262/damore_ppla_da_7867.jpg__1200x675_q75_crop_subsampling-2.jpg")
)
# Interactive page one
interactive_one <- sidebarLayout(
sidebarPanel(
h4("Choose which region you want to highlight"),
selectInput(
inputId = "chart_regions",
label = "Pick a geographic region below:",
choices = region_grouped$Region
)
),
mainPanel(
h1("Number of Abortion Clinics Per Region"),
plotlyOutput("bar_graph"),
tags$p(
id = "graph_two_paragraph",
"This bar graph shows the number of abortion clinics in each region.
It attempts to show the general differences while attempting to
highlight specific regions upon user selection on the left. A bar graph
is ideal because the numerical data points arenโt closely related. As
shown, the West region has the most abortion clinics - more than 250,
with the midwest region having the least - less than 100. When hovering
with the cursor over each bar, the specific number of abortion clinics
is displayed."
)
)
)
# Interactive page two
interactive_two <-
sidebarLayout(# creates a side bar and provides a wdiget to adjust Y axis
sidebarPanel(
h4("Zoom in or out on the Abortion Rates Axis"),
sliderInput(
inputId = "Max_y", "Pick a Max y Value",
min = 0, max = 300, value = 300
),
sliderInput(
inputId = "Min_y", "Pick a Min y Value",
min = 0, max = 300, value = 80
)
),
mainPanel(
h1("Abortion Clinics x Abortion Rates"), # page title
plotlyOutput("clin_rates"), # ID
tags$p(
id = "graph_two_paragraph", # page paragraph definition
"This point chart attempts to show the relationship between the total
number of abortion clinics and the total abortion rate in each region.
I used a scatterplot to show this correlation, however from the
results it demostrates there is a small correlation between the two.
However, the results disprove our hypothesis that the more
amount of abortion clinics the more about of abortion rates. The
graph displays (with one outlier) that the more abortion clinics
the less total abortion rates in that region. This implies that
less people need abortions in regions that have more abortion
clinics available. This could be due to the fact that abortion
clinics also provide other health care resources such as
contraceptive, family planning, Plan B, health education and more,
which can decrease the need for an unwanted pregnancy
and therefore lessens the need for an abortion. "
)
)
)
# Interactive page three
interactive_three <-
sidebarLayout(
sidebarPanel(
h4("Zoom in or out on the Percent of Contraceptives Axis"),
sliderInput(
inputId = "Max", "Pick a Max y Value",
min = 75, max = 115, value = 80
),
sliderInput(
inputId = "Min", "Pick a Min y Value",
min = 20, max = 60, value = 60
)
),
mainPanel(
h1("Abortion Clinics x Contraceptives"),
plotlyOutput("clin_con"),
tags$p(
id = "graph_three_paragraph",
"This bubble chart attempts to show the relationship, if any, between
the number of abortion clinics in a region and the percentage of women
aged 18 to 49 that use contraceptives. If there is a relationship then
one could argue that abortion clincs being around might sway the number
of women on contraceptives. As you can see this chart shows little to no
correlation between the number of abortion clincs and the percentage of
women on contraceptives. This implies that it doesn't matter if you have
an easy option or not to abort, because either way most women are going
to be on contraceptives."
)
)
)
# Conclusion Page
summary <- mainPanel(
h1("Summary Information and Data"),
h3("Takeaway #1"),
h4("Table"),
tableOutput("table_1"),
h4("Explanation"),
tags$p(
id = "Expanation_1",
"When looking at the total number of abortion clinics
throughout the four different regions of the United States we
found that the West had the most recorded number of
clinics - 277 clinics - and the least being the Midwest - 91
clinics. We can see that the West had over three times the
number of abortion clinics compared to Midwest. This could
imply that there's a lower need for demand in the Midwest than
in the other regions, hence the lower number of clinics.
Higher population regions such as the West and Northeast
may have more clinics to serve more people. Beliefs and feeling
towards abortion in specific regions may also be a large
contributor to the significant difference in how many clinics
that are opened."
),
h3("Takeaway #2"),
h4("Table"),
tableOutput("table_clin_rates"),
h4("Explanation"),
tags$p(
id = "Explanation_2",
"As we took a look at the total number of abortion clinics to
the total abortion rate in each of the four regions we found
that there was no clear correlations in the data. While the
West had the most number of abortion clinics, it had the 2nd
lowest abortion rate. However the South, which had the 2nd
least number of clinics, surprisingly had the highest recorded
abortion rates. Meanwhile the Midwest had the lowest number
of abortion clinics and rates. These insights inform us that
the abortion rate can vary region to region regardless of the
number of clinics provided."
),
h3("Takeaway #3"),
h4("Table"),
tableOutput("table_clin_con"),
h4("Explanation"),
tags$p(
id = "Explanation_3",
"As we pulled and analyzed the information on the total
abortion clinics and the percentage of contraceptive usage
in each region, we found that data for contraceptive was fairly
the same falling within the bracket of 71.86% - 68.68 %. This
is very interesting to observe as that tells us the rate of
contraceptive usage isn't directly impacted by the total number
of clinics provided in the region."
)
)
# UI
ui <- fluidPage(
includeCSS("style.css"),
navbarPage(
"Womens Reproductive Resources",
tabPanel("Overview", intro),
tabPanel("Abortion Clinics/Region", interactive_one),
tabPanel("Abortion Clinics x Abortion Rates", interactive_two),
tabPanel("Abortion Clinics x Contraceptives", interactive_three),
tabPanel("Summary Information", summary)
)
)
| /Shiny Website/app_ui.R | permissive | info-201a-sp20/final-project-achenn98 | R | false | false | 8,461 | r | # load necessary libraries
library("shiny")
# intro page
intro <-
mainPanel(
h1("Reproductive Health and Resources"),
strong("Purpose and Importance of the Project"),
p("As a group, we believe that women should have reproductive rights. We
think this would serve as an opportunity to delve into learning which
reproductive resources are provided in the United States and the
differences between in each geographic region. We also find it
interesting how there are many different sub-categories that fall under
reproductive health such as teen health, resources, infant health,
abortion, etc."),
strong("Origin of Sources"),
p(
"The", a("Guttmacher Institute", href = "https://data.guttmacher.org/states/table?dataset=data&state=AL+AK+AZ+AR+CA+CO+CT+DE+DC+FL+GA+HI+ID+IL+IN+IA+KS+KY+LA+ME+MD+MA+MI+MN+MS+MO+MT+NE+NV+NH+NJ+NM+NY+NC+ND+OH+OK+OR+PA+RI+SC+SD+TN+TX+UT+VT+VA+WA+WV+WI+WY&topics=57+283+65"),
" dataframe has information on number of abortions, number of abortion
clinics, percent of women taking contraceptive, etc. The",
a("Kaggle", href = "https://www.kaggle.com/omer2040/usa-states-to-region"),
" dataframe labels each of the United States into the four
different geographic regions. We then joined these dataframes to create
our new dataset that's grouped by regions."
),
img("We support women's reproductive health!", src = "https://www.plannedparenthood.org/uploads/filer_public_thumbnails/filer_public/cb/c4/cbc45653-b657-4b15-97fd-490aeefcd262/damore_ppla_da_7867.jpg__1200x675_q75_crop_subsampling-2.jpg")
)
# Interactive page one
interactive_one <- sidebarLayout(
sidebarPanel(
h4("Choose which region you want to highlight"),
selectInput(
inputId = "chart_regions",
label = "Pick a geographic region below:",
choices = region_grouped$Region
)
),
mainPanel(
h1("Number of Abortion Clinics Per Region"),
plotlyOutput("bar_graph"),
tags$p(
id = "graph_two_paragraph",
"This bar graph shows the number of abortion clinics in each region.
It attempts to show the general differences while attempting to
highlight specific regions upon user selection on the left. A bar graph
is ideal because the numerical data points arenโt closely related. As
shown, the West region has the most abortion clinics - more than 250,
with the midwest region having the least - less than 100. When hovering
with the cursor over each bar, the specific number of abortion clinics
is displayed."
)
)
)
# Interactive page two
interactive_two <-
sidebarLayout(# creates a side bar and provides a wdiget to adjust Y axis
sidebarPanel(
h4("Zoom in or out on the Abortion Rates Axis"),
sliderInput(
inputId = "Max_y", "Pick a Max y Value",
min = 0, max = 300, value = 300
),
sliderInput(
inputId = "Min_y", "Pick a Min y Value",
min = 0, max = 300, value = 80
)
),
mainPanel(
h1("Abortion Clinics x Abortion Rates"), # page title
plotlyOutput("clin_rates"), # ID
tags$p(
id = "graph_two_paragraph", # page paragraph definition
"This point chart attempts to show the relationship between the total
number of abortion clinics and the total abortion rate in each region.
I used a scatterplot to show this correlation, however from the
results it demostrates there is a small correlation between the two.
However, the results disprove our hypothesis that the more
amount of abortion clinics the more about of abortion rates. The
graph displays (with one outlier) that the more abortion clinics
the less total abortion rates in that region. This implies that
less people need abortions in regions that have more abortion
clinics available. This could be due to the fact that abortion
clinics also provide other health care resources such as
contraceptive, family planning, Plan B, health education and more,
which can decrease the need for an unwanted pregnancy
and therefore lessens the need for an abortion. "
)
)
)
# Interactive page three
interactive_three <-
sidebarLayout(
sidebarPanel(
h4("Zoom in or out on the Percent of Contraceptives Axis"),
sliderInput(
inputId = "Max", "Pick a Max y Value",
min = 75, max = 115, value = 80
),
sliderInput(
inputId = "Min", "Pick a Min y Value",
min = 20, max = 60, value = 60
)
),
mainPanel(
h1("Abortion Clinics x Contraceptives"),
plotlyOutput("clin_con"),
tags$p(
id = "graph_three_paragraph",
"This bubble chart attempts to show the relationship, if any, between
the number of abortion clinics in a region and the percentage of women
aged 18 to 49 that use contraceptives. If there is a relationship then
one could argue that abortion clincs being around might sway the number
of women on contraceptives. As you can see this chart shows little to no
correlation between the number of abortion clincs and the percentage of
women on contraceptives. This implies that it doesn't matter if you have
an easy option or not to abort, because either way most women are going
to be on contraceptives."
)
)
)
# Conclusion Page
summary <- mainPanel(
h1("Summary Information and Data"),
h3("Takeaway #1"),
h4("Table"),
tableOutput("table_1"),
h4("Explanation"),
tags$p(
id = "Expanation_1",
"When looking at the total number of abortion clinics
throughout the four different regions of the United States we
found that the West had the most recorded number of
clinics - 277 clinics - and the least being the Midwest - 91
clinics. We can see that the West had over three times the
number of abortion clinics compared to Midwest. This could
imply that there's a lower need for demand in the Midwest than
in the other regions, hence the lower number of clinics.
Higher population regions such as the West and Northeast
may have more clinics to serve more people. Beliefs and feeling
towards abortion in specific regions may also be a large
contributor to the significant difference in how many clinics
that are opened."
),
h3("Takeaway #2"),
h4("Table"),
tableOutput("table_clin_rates"),
h4("Explanation"),
tags$p(
id = "Explanation_2",
"As we took a look at the total number of abortion clinics to
the total abortion rate in each of the four regions we found
that there was no clear correlations in the data. While the
West had the most number of abortion clinics, it had the 2nd
lowest abortion rate. However the South, which had the 2nd
least number of clinics, surprisingly had the highest recorded
abortion rates. Meanwhile the Midwest had the lowest number
of abortion clinics and rates. These insights inform us that
the abortion rate can vary region to region regardless of the
number of clinics provided."
),
h3("Takeaway #3"),
h4("Table"),
tableOutput("table_clin_con"),
h4("Explanation"),
tags$p(
id = "Explanation_3",
"As we pulled and analyzed the information on the total
abortion clinics and the percentage of contraceptive usage
in each region, we found that data for contraceptive was fairly
the same falling within the bracket of 71.86% - 68.68 %. This
is very interesting to observe as that tells us the rate of
contraceptive usage isn't directly impacted by the total number
of clinics provided in the region."
)
)
# UI
ui <- fluidPage(
includeCSS("style.css"),
navbarPage(
"Womens Reproductive Resources",
tabPanel("Overview", intro),
tabPanel("Abortion Clinics/Region", interactive_one),
tabPanel("Abortion Clinics x Abortion Rates", interactive_two),
tabPanel("Abortion Clinics x Contraceptives", interactive_three),
tabPanel("Summary Information", summary)
)
)
|
#' Base class
#'
#' @export
SimpleQuestion <- R6::R6Class(
"SimpleQuestion",
public = list(
type = "abstract",
quiz = NULL,
initialize = function(text,
data = quote({}),
hidden_data = quote({}),
seed = NULL,
hidden_seed = NULL,
feedback = NULL,
answer = NULL,
tags = NULL,
header = NULL) {
private$.text <- text
private$.data <- data
private$.hidden_data <- hidden_data
private$.seed <- seed
private$.hidden_seed <- hidden_seed
private$.answer <- answer
private$.feedback <- feedback
private$.tags <- tags
private$.header <- header
# Default placeholders
private$placeholders <- list(
TITLE = "get_title",
REC_DATA_CHUNK = "get_rec_data_chunk",
DATA_CHUNK = "get_data_chunk",
ANSWER_INFO = "get_answer_info",
INST_TEXT = "get_inst_text",
SEED_CHUNK = "get_seed_chunk",
EVALUATED_ANSWER = "get_evaluated_answer",
FEEDBACK = "get_feedback",
FEEDBACK_ANSWER = "get_feedback_answer",
ANSWER_STRING = "get_answer_string")
# Specific placeholders for XML export
private$xml_placeholders <- list(
TYPE = "get_type",
TITLE = "get_title",
XML_QUESTION_TEXT = "get_XML_question_text",
XML_GENERALFEEDBACK = "get_XML_generalfeedback",
XML_ANSWER = "get_XML_answer"
)
},
instantiate_placeholders = function(template, placeholders, opts, info) {
placeholders_regex <- paste0("@", names(placeholders), "@", collapse = "|")
repeat {
match <- stringi::stri_locate_first_regex(template, placeholders_regex)
if(any(is.na(match))) # breaking if no match
break
if(nrow(match) == 0)
break
# Computing replacement string
id <- substring(template, match[1] + 1, match[2] - 1)
funcname <- placeholders[[id]]
if (is.null(funcname))
stop("Unable to find corresponding function for ", sQuote(id))
replacement <- self[[funcname]](opts, info)
# If replacement is NULL, try to replace whole line
if (is.null(replacement)) {
template <- stringi::stri_replace_first_regex(template, paste0("^ *@", id, "@ *\n"), "", opts_regex = list(multiline = TRUE))
template <- stringi::stri_replace_first_regex(template, paste0("@", id, "@"), "")
} else {
# Escape backslash and dollar in replacement string
replacement <- gsub("\\\\", "\\\\\\\\", replacement)
replacement <- gsub("\\$", "\\\\$", replacement)
template <- stringi::stri_replace_first_regex(template, paste0("@", id, "@"), replacement)
}
}
if(nchar(template) == 0)
NULL
else
template
},
update_quiz = function(quiz) {
self$quiz <- quiz
self$invalidate_hidden_data_list()
},
get_data_and_environment = function() {
if(is.null(self$quiz))
data <- self$recursive_instantiated_data()
else
data <- self$quiz$recursive_instantiated_data()
env <- empty_env()
eval(data, env)
list(data = data, env = env)
},
validate_data = function(parent_data = NULL) {},
get_markdown_from_template = function(template, opts = list(), info = list()) {
self$instantiate_placeholders(template, private$placeholders, opts, info)
},
to_markdown = function(opts = list(), info = list()) {
opts <- update_list(private$default_options, opts)
opts$export <- "markdown"
private$validate_options(opts, info)
template <- "@INST_TEXT@\n\n@FEEDBACK@\n"
self$instantiate_placeholders(template, private$placeholders, opts, info)
},
#' Feedback options if none is provided
get_default_feedback = function() {
list(text = self$answer)
},
#' Sanitize provided feedback
get_feedback_from_field = function(feedback) {
feedback <-
if(is.null(feedback)) {
self$get_default_feedback()
} else if(is.character(feedback)) {
list(text = feedback)
} else if(is.numeric(feedback)) {
list(text = feedback)
} else if(is.language(feedback)) {
list(text = feedback)
} else if(is.list(feedback)) {
feedback
}
update_list(private$default_feedback_options, feedback)
},
#' Template for full feedback
get_feedback = function(opts, info) {
# No feedback
if (!is.null(opts$feedback) && !opts$feedback)
return(NULL)
"@ANSWER_INFO@\n\n@ANSWER_STRING@@EVALUATED_ANSWER@\n\n@FEEDBACK_ANSWER@\n"
},
get_answer_string = function(opts, info) {
if(is.null(info$answer_string))
"**R\u00E9ponse :** "
else
info$answer_string
},
#' Feedback itself giving the right answer
get_feedback_answer = function(opts, info) {
# Get feedback as a proper named list with default arguments
feedback_opts <- self$instantiated_feedback
# Check for spurious arguments in feedback_options
unknown_opts <- setdiff(names(feedback_opts),
names(private$default_feedback_options))
if (length(unknown_opts) > 0) {
stop("Unknown options: ", paste0(unknown_opts))
}
# Override feedback_options with upstream opts
feedback_opts <- update_list(feedback_opts, opts)
# Check that options are coherent
private$validate_feedback_options(feedback_opts, info)
feedback <-
if (feedback_opts$eval) {
feedback_opts$text
} else {
if (is.null(feedback_opts$noeval_text))
feedback_opts$text
else
feedback_opts$noeval_text
}
feedback <- if (is.character(feedback)) {
feedback
} else if (is.language(feedback))
sprintf("```{r}\n%s\n```\n", answerstr(feedback))
else if (is.numeric(feedback))
sprintf("```{r}\n%s\n```\n", answerstr(feedback))
else stop("Unsupported feedback type: ", sQuote(feedback))
# Maybe indent the feedback
if(is.null(opts$indent))
feedback
else
add_spaces_left(feedback, 4)
},
get_XML_question_text = function(opts, info) {
# md_question <- self$get_XML_question_markdown(opts, info)
md_question <- self$get_inst_text(opts, info)
HTML_question <- render_HTML(md_question, opts, info)
trimws(HTML_question) # pandoc seems to add some leading newlines
},
# Return XML "generalfeedback" node with feedback in HTML as CDATA
get_XML_generalfeedback = function(opts, info) {
if(!opts$feedback) return(NULL)
# Generate HTML for feedback
placeholders <- update_list(private$placeholders, private$xml_placeholders)
tmpl <- self$get_feedback(opts, info)
md_feedback <- self$instantiate_placeholders(tmpl, placeholders, opts, info)
HTML_feedback <- render_HTML(md_feedback, opts, info)
HTML_feedback0 <- trimws(HTML_feedback) # pandoc seems to add some leading newlines
# Return XML with inner HTML
tmpl <-"<generalfeedback format=\"html\">
<text><![CDATA[%s]]></text>
</generalfeedback>"
tmpl0 <- add_spaces_left(tmpl, opts$indent)
sprintf(tmpl0, HTML_feedback0)
},
get_XML_answer = function(opts, info) {
as.character(self$get_evaluated_answer2(opts, info))
},
#' Export Question as XML
to_XML = function(opts = NULL, info = NULL) {
# Set up environment for evaluating data if not already
if (is.null(info$env)) {
info0 <- self$get_data_and_environment()
info <- update_list(info, info0)
}
# Setting options
default_opts <- update_list(private$default_options, private$xml_default_options)
opts <- update_list(default_opts, opts)
opts$export <- "xml"
private$validate_options(opts, info)
template <- add_spaces_left(private$xml_question_template, opts$indent)
placeholders <- update_list(private$placeholders, private$xml_placeholders)
# Answers might modify datasets stored in info$env
self$instantiate_placeholders(template, placeholders, opts, info)
},
invalidate_all = function() {
self$invalidate_text()
self$invalidate_answer()
self$invalidate_feedback()
self$invalidate_hidden_data()
self$invalidate_data()
self$invalidate_hidden_data_list()
},
invalidate_ancestor = function() {
if (is.null(self$ancestor))
self$invalidate_hidden_data()
else
self$ancestor$invalidate_hidden_data()
},
invalidate_data = function() {
private$is_data_available <- FALSE
self$invalidate_inst_data()
},
invalidate_hidden_data = function() {
private$is_hidden_data_available <- FALSE
self$invalidate_inst_text()
self$invalidate_inst_answer()
self$invalidate_inst_feedback()
self$invalidate_inst_data()
self$invalidate_hidden_data_list()
},
invalidate_hidden_data_list = function() {
private$is_hidden_data_list_available <- FALSE
self$invalidate_inst_text()
self$invalidate_inst_answer()
self$invalidate_inst_feedback()
self$invalidate_inst_data()
},
invalidate_inst_data = function() {
private$is_data_instantiated <- FALSE
},
invalidate_inst_text = function() {
private$is_text_instantiated <- FALSE
},
invalidate_inst_answer = function() {
private$is_answer_instantiated <- FALSE
},
invalidate_inst_feedback = function() {
private$is_feedback_instantiated <- FALSE
},
invalidate_header = function() {
self$invalidate_text()
},
invalidate_text = function() {
private$is_text_available <- FALSE
self$invalidate_inst_text()
},
invalidate_answer = function() {
private$is_answer_available <- FALSE
self$invalidate_inst_answer()
},
invalidate_feedback = function() {
private$is_feedback_available <- FALSE
self$invalidate_inst_feedback()
},
recursive_instantiated_data = function(seed_init = FALSE) {
if (!seed_init & !is_empty_language(self$data) & is.null(self$seed))
stop("Some data but no seed to initialize")
if(is.null(self$seed))
self$instantiated_data
else
merge_languages(
instantiate_data_list(
bquote(set.seed(.(self$seed))),
self$hidden_data_list),
self$instantiated_data)
},
get_type = function(opts, info) {
self$type
},
get_title = function(opts, info) {
self$title
},
get_data_chunk = function(opts, info) {
l <- sanitize_language(self$local_instantiated_data)
if (is.null(l))
NULL
else
sprintf("```{r include = FALSE}\n%s\n```", answerstr(l))
},
get_rec_data_chunk = function(opts, info) {
if (is.null(self$quiz))
stop("No defined Quiz in question ", sQuote(self$title))
l <- sanitize_language(self$quiz$recursive_instantiated_data())
if (is.null(l))
NULL
else
sprintf("```{r include = FALSE}\n%s\n```", answerstr(l))
},
# Data chunk for formatting answer
get_answer_info = function(opts, info) {
sprintf("```{r, include = FALSE}\n%s\nanswer <- {\n%s}\n```",
answerstr(quote(cquote <- function(s) {
if (is.character(s))
"`"
else if (is.numeric(s))
"$"
else stop("Argument is not character or numeric ", sQuote(s))
})),
answerstr(self$instantiated_answer))
},
get_evaluated_answer = function(opts, info) {
"`r cquote(answer)``r answer``r cquote(answer)`"
},
get_evaluated_answer2 = function(opts, info) {
# Set up environment for evaluating data if not already
if (is.null(info$env)) {
info0 <- self$get_data_and_environment()
info <- update_list(info, info0)
}
eval(self$instantiated_answer, info$env)
},
get_seed_chunk = function(opts, info) {
if (is.null(self$seed))
NULL
else
sprintf("```{r}\nset.seed(%d)\n```", self$seed)
},
get_text = function(opts, info) {
self$text
},
get_inst_text = function(opts, info) {
if (!is.null(opts$numbered) && opts$numbered) {
paste0("**Question ", info$num, " :** ", self$instantiated_text)
} else
self$instantiated_text
},
get_inst_cookie = function(opts, info) {
stop("Abstract method")
},
get_inst_text_and_cookie = function(opts, info) {
sprintf("%s\n\n%s", self$get_inst_text(opts, info), self$get_inst_cookie(opts, info))
},
get_inst_text_and_number = function(opts, info) {
stopifnot(is.numeric(info$index))
sprintf("%s (%d)", self$get_inst_text(opts, info), info$index)
},
get_guess = function() {
},
get_is_correct_icon = function() {
},
instantiate_hidden_data_list = function(var_list = NULL, seed_init = FALSE) {
if (!seed_init & !is_empty_language(self$hidden_data) & is.null(self$hidden_seed))
stop("Some hidden data but no seed to instantiate them")
# VAR_LIST must be a named list for list2env to work
if (is.null(var_list)) var_list <- list()
env <- list2env(var_list, envir = empty_env())
# Set seed if any and eval hidden data
if (!is.null(self$hidden_seed))
eval(bquote(set.seed(.(self$hidden_seed))), envir = env)
eval(self$hidden_data, envir = env)
# Update variables in VAR_LIST with ENV
new_var_list <- update_list(var_list, as.list(env, all.names = TRUE))
self$hidden_data_list <- new_var_list
## private$.hidden_data_list <- new_var_list
## private$is_hidden_data_list_available <- TRUE
},
instantiate_feedback_list = function(feedback, var_list) {
text <- instantiate_object(feedback$text, self$hidden_data_list)
noeval_text <- instantiate_object(feedback$noeval_text, self$hidden_data_list)
feedback$text <- text
feedback$noeval_text <- noeval_text
feedback
},
hidden_data_names = function() {
env <- empty_env()
if (!is.null(self$hidden_seed))
eval(bquote(set.seed(.(self$hidden_seed))), envir = env)
eval(self$hidden_data, envir = env)
ls(envir = env, all.names = TRUE)
},
rename = function(prefix, names = self$hidden_data_names()) {
self$rename_text(prefix, names)
self$rename_header(prefix, names)
self$rename_answer(prefix, names)
self$rename_feedback(prefix, names)
self$rename_data(prefix, names)
self$rename_hidden_data(prefix, names)
self$invalidate_text()
self$invalidate_answer()
self$invalidate_hidden_data()
self$invalidate_hidden_data_list()
self$invalidate_feedback()
self$invalidate_data()
self
},
rename_header = function(prefix, names = self$hidden_data_names()) {
self$header <- prefix_object(prefix, names, self$header)
self
},
rename_text = function(prefix, names = self$hidden_data_names()) {
self$text <- prefix_object(prefix, names, private$.text)
self$invalidate_text()
self
},
rename_answer = function(prefix, names = self$hidden_data_names()) {
self$answer <- prefix_object(prefix, names, self$answer)
self$invalidate_answer()
self
},
rename_feedback = function(prefix, names = self$hidden_data_names()) {
feedback <- self$feedback
text <- prefix_object(prefix, names, feedback$text)
noeval_text <- prefix_object(prefix, names, feedback$noeval_text)
if (!is.null(text))
feedback$text <- text
if (!is.null(noeval_text))
feedback$noeval_text <- noeval_text
self$feedback <- feedback
self$invalidate_feedback()
self
},
rename_data = function(prefix, names = self$hidden_data_names()) {
self$data <- prefix_object(prefix, names, self$data)
self$invalidate_data()
self
},
rename_hidden_data = function(prefix, names = self$hidden_data_names()) {
self$hidden_data <- prefix_object(prefix, names, self$hidden_data)
self$invalidate_hidden_data()
self
},
copy = function() {
Question(self$text,
type = self$type,
seed = self$seed,
hidden_seed = self$hidden_seed,
hidden_data = self$hidden_data,
data = self$data,
answer = self$answer,
feedback = self$feedback)
}
),
active = list(
title = function(title) {
if (missing(title)) {
if (is.null(private$.title)) {
if (nchar(self$instantiated_text) > 60)
paste0(substr(self$instantiated_text, 0, 57), "...")
else
self$instantiated_text
} else private$.title
} else {
private$.title <- title
}
},
header = function(header) {
if (missing(header)) {
private$.header
} else {
private$.header <- header
self$invalidate_header()
private$.header
}
},
ancestor = function(ancestor) {
if (missing(ancestor)) {
private$.ancestor
} else {
private$.ancestor <- ancestor
self$invalidate_ancestor()
private$.ancestor
}
},
text = function(text) {
if (missing(text)) {
paste(c(trimws(self$header), trimws(private$.text)), collapse = "\n\n")
} else {
private$.text <- text
self$invalidate_inst_text()
}
},
instantiated_text = function() {
if (private$is_text_instantiated)
private$.instantiated_text
else {
private$.instantiated_text <- instantiate_text_list(
self$text,
self$hidden_data_list)
private$is_text_instantiated <- TRUE
private$.instantiated_text
}
},
answer = function(answer) {
if (missing(answer)) {
private$.answer
} else {
private$.answer <- answer
self$invalidate_inst_answer()
}
},
instantiated_answer = function() {
if (private$is_answer_instantiated)
private$.instantiated_answer
else {
private$.instantiated_answer <- instantiate_object(
self$answer,
self$hidden_data_list)
private$is_answer_instantiated <- TRUE
private$.instantiated_answer
}
},
feedback = function(feedback) {
if (missing(feedback)) {
if (private$is_feedback_available)
private$.feedback
else {
private$.feedback <- self$get_feedback_from_field(private$.feedback)
private$is_feedback_available <- TRUE
private$.feedback
}
} else {
private$.feedback <- self$get_feedback_from_field(feedback)
self$invalidate_inst_feedback()
private$is_feedback_available <- TRUE
}
},
instantiated_feedback = function() {
if (private$is_feedback_instantiated)
private$.instantiated_feedback
else {
inst_feedback <- self$instantiate_feedback_list(self$feedback, self$hidden_data_list)
private$is_feedback_instantiated <- TRUE
private$.instantiated_feedback <- inst_feedback
}
},
hidden_seed = function(seed) {
if (missing(seed))
private$.hidden_seed
else {
private$.hidden_seed <- seed
self$invalidate_ancestor()
}
},
hidden_data = function(hidden_data) {
if (missing(hidden_data)) {
private$.hidden_data
} else {
private$.hidden_data <- hidden_data
self$invalidate_ancestor()
}
},
hidden_data_list = function(hidden_data_list) {
if (missing(hidden_data_list)) {
if (private$is_hidden_data_list_available)
private$.hidden_data_list
else {
root <- self
while (!is.null(root$ancestor))
root <- root$ancestor
root$instantiate_hidden_data_list()
private$is_hidden_data_list_available <- TRUE
private$.hidden_data_list
}
} else {
private$is_hidden_data_list_available <- TRUE
private$.hidden_data_list <- hidden_data_list
## Need to be recomputed
self$invalidate_inst_text()
self$invalidate_inst_answer()
self$invalidate_inst_feedback()
self$invalidate_inst_data()
}
},
seed = function(seed) {
if (missing(seed))
private$.seed
else {
private$.seed <- seed
}
},
data = function(data) {
if (missing(data)) {
private$.data
} else {
private$.data <- data
self$invalidate_inst_data()
}
},
instantiated_data = function() {
if (private$is_data_instantiated)
private$.instantiated_data
else {
private$.instantiated_data <- instantiate_data_list(
private$.data,
self$hidden_data_list)
private$is_data_instantiated <- TRUE
private$.instantiated_data
}
},
local_instantiated_data = function() {
if(is.null(self$seed))
self$instantiated_data
else
merge_languages(
instantiate_data_list(
bquote(set.seed(.(self$seed))),
self$hidden_data_list),
self$instantiated_data)
},
tags = function(tags) {
if (missing(tags)) {
private$.tags
} else {
private$.tags <- tags
}
}
),
private = list(
.title = NULL,
.header = NULL,
.ancestor = NULL,
.text = NULL,
is_text_available = FALSE,
.answer = NULL,
is_answer_available = FALSE,
.feedback = NULL,
is_feedback_available = FALSE,
.data = NULL,
is_data_available = FALSE,
.seed = NULL,
.hidden_seed = NULL,
.hidden_data = NULL,
is_hidden_data_available = FALSE,
.hidden_data_list = NULL,
is_hidden_data_list_available = FALSE,
.instantiated_text = NULL,
is_text_instantiated = FALSE,
.instantiated_answer = NULL,
is_answer_instantiated = FALSE,
.instantiated_feedback = NULL,
is_feedback_instantiated = FALSE,
.instantiated_data = NULL,
is_data_instantiated = FALSE,
.tags = NULL,
default_feedback_options = list(text = "", noeval_text = NULL, eval = TRUE, indent = 2),
default_options = list(numbered = TRUE,
export = "markdown",
feedback = TRUE,
quiet = FALSE),
xml_default_options = list(
numbered = FALSE,
indent = 0
),
feedback_options = NULL,
placeholders = NULL,
placeholders_regex = NULL,
validate_options = function(opts, info) {
if (!is.null(opts$numbered) && opts$numbered && is.null(info$num))
stop("`numbered` option is enabled but no `num` available")
},
validate_feedback_options = function(opts, info) {
## super$validate_feedback_options(opts, info)
},
xml_question_template = trimws("
<question type=\"@TYPE@\">
<name>
<text><![CDATA[@TITLE@]]></text>
</name>
<questiontext format=\"html\">
<text><![CDATA[@XML_QUESTION_TEXT@]]></text>
</questiontext>
<answer fraction=\"100\" format=\"plain_text\">
<text>@XML_ANSWER@</text>
</answer>
@XML_GENERALFEEDBACK@
<hidden>0</hidden>
</question>
"),
xml_placeholders = NULL))
| /R/simple_question.R | no_license | thisirs/quizR | R | false | false | 27,951 | r | #' Base class
#'
#' @export
SimpleQuestion <- R6::R6Class(
"SimpleQuestion",
public = list(
type = "abstract",
quiz = NULL,
initialize = function(text,
data = quote({}),
hidden_data = quote({}),
seed = NULL,
hidden_seed = NULL,
feedback = NULL,
answer = NULL,
tags = NULL,
header = NULL) {
private$.text <- text
private$.data <- data
private$.hidden_data <- hidden_data
private$.seed <- seed
private$.hidden_seed <- hidden_seed
private$.answer <- answer
private$.feedback <- feedback
private$.tags <- tags
private$.header <- header
# Default placeholders
private$placeholders <- list(
TITLE = "get_title",
REC_DATA_CHUNK = "get_rec_data_chunk",
DATA_CHUNK = "get_data_chunk",
ANSWER_INFO = "get_answer_info",
INST_TEXT = "get_inst_text",
SEED_CHUNK = "get_seed_chunk",
EVALUATED_ANSWER = "get_evaluated_answer",
FEEDBACK = "get_feedback",
FEEDBACK_ANSWER = "get_feedback_answer",
ANSWER_STRING = "get_answer_string")
# Specific placeholders for XML export
private$xml_placeholders <- list(
TYPE = "get_type",
TITLE = "get_title",
XML_QUESTION_TEXT = "get_XML_question_text",
XML_GENERALFEEDBACK = "get_XML_generalfeedback",
XML_ANSWER = "get_XML_answer"
)
},
instantiate_placeholders = function(template, placeholders, opts, info) {
placeholders_regex <- paste0("@", names(placeholders), "@", collapse = "|")
repeat {
match <- stringi::stri_locate_first_regex(template, placeholders_regex)
if(any(is.na(match))) # breaking if no match
break
if(nrow(match) == 0)
break
# Computing replacement string
id <- substring(template, match[1] + 1, match[2] - 1)
funcname <- placeholders[[id]]
if (is.null(funcname))
stop("Unable to find corresponding function for ", sQuote(id))
replacement <- self[[funcname]](opts, info)
# If replacement is NULL, try to replace whole line
if (is.null(replacement)) {
template <- stringi::stri_replace_first_regex(template, paste0("^ *@", id, "@ *\n"), "", opts_regex = list(multiline = TRUE))
template <- stringi::stri_replace_first_regex(template, paste0("@", id, "@"), "")
} else {
# Escape backslash and dollar in replacement string
replacement <- gsub("\\\\", "\\\\\\\\", replacement)
replacement <- gsub("\\$", "\\\\$", replacement)
template <- stringi::stri_replace_first_regex(template, paste0("@", id, "@"), replacement)
}
}
if(nchar(template) == 0)
NULL
else
template
},
update_quiz = function(quiz) {
self$quiz <- quiz
self$invalidate_hidden_data_list()
},
get_data_and_environment = function() {
if(is.null(self$quiz))
data <- self$recursive_instantiated_data()
else
data <- self$quiz$recursive_instantiated_data()
env <- empty_env()
eval(data, env)
list(data = data, env = env)
},
validate_data = function(parent_data = NULL) {},
get_markdown_from_template = function(template, opts = list(), info = list()) {
self$instantiate_placeholders(template, private$placeholders, opts, info)
},
to_markdown = function(opts = list(), info = list()) {
opts <- update_list(private$default_options, opts)
opts$export <- "markdown"
private$validate_options(opts, info)
template <- "@INST_TEXT@\n\n@FEEDBACK@\n"
self$instantiate_placeholders(template, private$placeholders, opts, info)
},
#' Feedback options if none is provided
get_default_feedback = function() {
list(text = self$answer)
},
#' Sanitize provided feedback
get_feedback_from_field = function(feedback) {
feedback <-
if(is.null(feedback)) {
self$get_default_feedback()
} else if(is.character(feedback)) {
list(text = feedback)
} else if(is.numeric(feedback)) {
list(text = feedback)
} else if(is.language(feedback)) {
list(text = feedback)
} else if(is.list(feedback)) {
feedback
}
update_list(private$default_feedback_options, feedback)
},
#' Template for full feedback
get_feedback = function(opts, info) {
# No feedback
if (!is.null(opts$feedback) && !opts$feedback)
return(NULL)
"@ANSWER_INFO@\n\n@ANSWER_STRING@@EVALUATED_ANSWER@\n\n@FEEDBACK_ANSWER@\n"
},
get_answer_string = function(opts, info) {
if(is.null(info$answer_string))
"**R\u00E9ponse :** "
else
info$answer_string
},
#' Feedback itself giving the right answer
get_feedback_answer = function(opts, info) {
# Get feedback as a proper named list with default arguments
feedback_opts <- self$instantiated_feedback
# Check for spurious arguments in feedback_options
unknown_opts <- setdiff(names(feedback_opts),
names(private$default_feedback_options))
if (length(unknown_opts) > 0) {
stop("Unknown options: ", paste0(unknown_opts))
}
# Override feedback_options with upstream opts
feedback_opts <- update_list(feedback_opts, opts)
# Check that options are coherent
private$validate_feedback_options(feedback_opts, info)
feedback <-
if (feedback_opts$eval) {
feedback_opts$text
} else {
if (is.null(feedback_opts$noeval_text))
feedback_opts$text
else
feedback_opts$noeval_text
}
feedback <- if (is.character(feedback)) {
feedback
} else if (is.language(feedback))
sprintf("```{r}\n%s\n```\n", answerstr(feedback))
else if (is.numeric(feedback))
sprintf("```{r}\n%s\n```\n", answerstr(feedback))
else stop("Unsupported feedback type: ", sQuote(feedback))
# Maybe indent the feedback
if(is.null(opts$indent))
feedback
else
add_spaces_left(feedback, 4)
},
get_XML_question_text = function(opts, info) {
# md_question <- self$get_XML_question_markdown(opts, info)
md_question <- self$get_inst_text(opts, info)
HTML_question <- render_HTML(md_question, opts, info)
trimws(HTML_question) # pandoc seems to add some leading newlines
},
# Return XML "generalfeedback" node with feedback in HTML as CDATA
get_XML_generalfeedback = function(opts, info) {
if(!opts$feedback) return(NULL)
# Generate HTML for feedback
placeholders <- update_list(private$placeholders, private$xml_placeholders)
tmpl <- self$get_feedback(opts, info)
md_feedback <- self$instantiate_placeholders(tmpl, placeholders, opts, info)
HTML_feedback <- render_HTML(md_feedback, opts, info)
HTML_feedback0 <- trimws(HTML_feedback) # pandoc seems to add some leading newlines
# Return XML with inner HTML
tmpl <-"<generalfeedback format=\"html\">
<text><![CDATA[%s]]></text>
</generalfeedback>"
tmpl0 <- add_spaces_left(tmpl, opts$indent)
sprintf(tmpl0, HTML_feedback0)
},
get_XML_answer = function(opts, info) {
as.character(self$get_evaluated_answer2(opts, info))
},
#' Export Question as XML
to_XML = function(opts = NULL, info = NULL) {
# Set up environment for evaluating data if not already
if (is.null(info$env)) {
info0 <- self$get_data_and_environment()
info <- update_list(info, info0)
}
# Setting options
default_opts <- update_list(private$default_options, private$xml_default_options)
opts <- update_list(default_opts, opts)
opts$export <- "xml"
private$validate_options(opts, info)
template <- add_spaces_left(private$xml_question_template, opts$indent)
placeholders <- update_list(private$placeholders, private$xml_placeholders)
# Answers might modify datasets stored in info$env
self$instantiate_placeholders(template, placeholders, opts, info)
},
invalidate_all = function() {
self$invalidate_text()
self$invalidate_answer()
self$invalidate_feedback()
self$invalidate_hidden_data()
self$invalidate_data()
self$invalidate_hidden_data_list()
},
invalidate_ancestor = function() {
if (is.null(self$ancestor))
self$invalidate_hidden_data()
else
self$ancestor$invalidate_hidden_data()
},
invalidate_data = function() {
private$is_data_available <- FALSE
self$invalidate_inst_data()
},
invalidate_hidden_data = function() {
private$is_hidden_data_available <- FALSE
self$invalidate_inst_text()
self$invalidate_inst_answer()
self$invalidate_inst_feedback()
self$invalidate_inst_data()
self$invalidate_hidden_data_list()
},
invalidate_hidden_data_list = function() {
private$is_hidden_data_list_available <- FALSE
self$invalidate_inst_text()
self$invalidate_inst_answer()
self$invalidate_inst_feedback()
self$invalidate_inst_data()
},
invalidate_inst_data = function() {
private$is_data_instantiated <- FALSE
},
invalidate_inst_text = function() {
private$is_text_instantiated <- FALSE
},
invalidate_inst_answer = function() {
private$is_answer_instantiated <- FALSE
},
invalidate_inst_feedback = function() {
private$is_feedback_instantiated <- FALSE
},
invalidate_header = function() {
self$invalidate_text()
},
invalidate_text = function() {
private$is_text_available <- FALSE
self$invalidate_inst_text()
},
invalidate_answer = function() {
private$is_answer_available <- FALSE
self$invalidate_inst_answer()
},
invalidate_feedback = function() {
private$is_feedback_available <- FALSE
self$invalidate_inst_feedback()
},
recursive_instantiated_data = function(seed_init = FALSE) {
if (!seed_init & !is_empty_language(self$data) & is.null(self$seed))
stop("Some data but no seed to initialize")
if(is.null(self$seed))
self$instantiated_data
else
merge_languages(
instantiate_data_list(
bquote(set.seed(.(self$seed))),
self$hidden_data_list),
self$instantiated_data)
},
get_type = function(opts, info) {
self$type
},
get_title = function(opts, info) {
self$title
},
get_data_chunk = function(opts, info) {
l <- sanitize_language(self$local_instantiated_data)
if (is.null(l))
NULL
else
sprintf("```{r include = FALSE}\n%s\n```", answerstr(l))
},
get_rec_data_chunk = function(opts, info) {
if (is.null(self$quiz))
stop("No defined Quiz in question ", sQuote(self$title))
l <- sanitize_language(self$quiz$recursive_instantiated_data())
if (is.null(l))
NULL
else
sprintf("```{r include = FALSE}\n%s\n```", answerstr(l))
},
# Data chunk for formatting answer
get_answer_info = function(opts, info) {
sprintf("```{r, include = FALSE}\n%s\nanswer <- {\n%s}\n```",
answerstr(quote(cquote <- function(s) {
if (is.character(s))
"`"
else if (is.numeric(s))
"$"
else stop("Argument is not character or numeric ", sQuote(s))
})),
answerstr(self$instantiated_answer))
},
get_evaluated_answer = function(opts, info) {
"`r cquote(answer)``r answer``r cquote(answer)`"
},
get_evaluated_answer2 = function(opts, info) {
# Set up environment for evaluating data if not already
if (is.null(info$env)) {
info0 <- self$get_data_and_environment()
info <- update_list(info, info0)
}
eval(self$instantiated_answer, info$env)
},
get_seed_chunk = function(opts, info) {
if (is.null(self$seed))
NULL
else
sprintf("```{r}\nset.seed(%d)\n```", self$seed)
},
get_text = function(opts, info) {
self$text
},
get_inst_text = function(opts, info) {
if (!is.null(opts$numbered) && opts$numbered) {
paste0("**Question ", info$num, " :** ", self$instantiated_text)
} else
self$instantiated_text
},
get_inst_cookie = function(opts, info) {
stop("Abstract method")
},
get_inst_text_and_cookie = function(opts, info) {
sprintf("%s\n\n%s", self$get_inst_text(opts, info), self$get_inst_cookie(opts, info))
},
get_inst_text_and_number = function(opts, info) {
stopifnot(is.numeric(info$index))
sprintf("%s (%d)", self$get_inst_text(opts, info), info$index)
},
get_guess = function() {
},
get_is_correct_icon = function() {
},
instantiate_hidden_data_list = function(var_list = NULL, seed_init = FALSE) {
if (!seed_init & !is_empty_language(self$hidden_data) & is.null(self$hidden_seed))
stop("Some hidden data but no seed to instantiate them")
# VAR_LIST must be a named list for list2env to work
if (is.null(var_list)) var_list <- list()
env <- list2env(var_list, envir = empty_env())
# Set seed if any and eval hidden data
if (!is.null(self$hidden_seed))
eval(bquote(set.seed(.(self$hidden_seed))), envir = env)
eval(self$hidden_data, envir = env)
# Update variables in VAR_LIST with ENV
new_var_list <- update_list(var_list, as.list(env, all.names = TRUE))
self$hidden_data_list <- new_var_list
## private$.hidden_data_list <- new_var_list
## private$is_hidden_data_list_available <- TRUE
},
instantiate_feedback_list = function(feedback, var_list) {
text <- instantiate_object(feedback$text, self$hidden_data_list)
noeval_text <- instantiate_object(feedback$noeval_text, self$hidden_data_list)
feedback$text <- text
feedback$noeval_text <- noeval_text
feedback
},
hidden_data_names = function() {
env <- empty_env()
if (!is.null(self$hidden_seed))
eval(bquote(set.seed(.(self$hidden_seed))), envir = env)
eval(self$hidden_data, envir = env)
ls(envir = env, all.names = TRUE)
},
rename = function(prefix, names = self$hidden_data_names()) {
self$rename_text(prefix, names)
self$rename_header(prefix, names)
self$rename_answer(prefix, names)
self$rename_feedback(prefix, names)
self$rename_data(prefix, names)
self$rename_hidden_data(prefix, names)
self$invalidate_text()
self$invalidate_answer()
self$invalidate_hidden_data()
self$invalidate_hidden_data_list()
self$invalidate_feedback()
self$invalidate_data()
self
},
rename_header = function(prefix, names = self$hidden_data_names()) {
self$header <- prefix_object(prefix, names, self$header)
self
},
rename_text = function(prefix, names = self$hidden_data_names()) {
self$text <- prefix_object(prefix, names, private$.text)
self$invalidate_text()
self
},
rename_answer = function(prefix, names = self$hidden_data_names()) {
self$answer <- prefix_object(prefix, names, self$answer)
self$invalidate_answer()
self
},
rename_feedback = function(prefix, names = self$hidden_data_names()) {
feedback <- self$feedback
text <- prefix_object(prefix, names, feedback$text)
noeval_text <- prefix_object(prefix, names, feedback$noeval_text)
if (!is.null(text))
feedback$text <- text
if (!is.null(noeval_text))
feedback$noeval_text <- noeval_text
self$feedback <- feedback
self$invalidate_feedback()
self
},
rename_data = function(prefix, names = self$hidden_data_names()) {
self$data <- prefix_object(prefix, names, self$data)
self$invalidate_data()
self
},
rename_hidden_data = function(prefix, names = self$hidden_data_names()) {
self$hidden_data <- prefix_object(prefix, names, self$hidden_data)
self$invalidate_hidden_data()
self
},
copy = function() {
Question(self$text,
type = self$type,
seed = self$seed,
hidden_seed = self$hidden_seed,
hidden_data = self$hidden_data,
data = self$data,
answer = self$answer,
feedback = self$feedback)
}
),
active = list(
title = function(title) {
if (missing(title)) {
if (is.null(private$.title)) {
if (nchar(self$instantiated_text) > 60)
paste0(substr(self$instantiated_text, 0, 57), "...")
else
self$instantiated_text
} else private$.title
} else {
private$.title <- title
}
},
header = function(header) {
if (missing(header)) {
private$.header
} else {
private$.header <- header
self$invalidate_header()
private$.header
}
},
ancestor = function(ancestor) {
if (missing(ancestor)) {
private$.ancestor
} else {
private$.ancestor <- ancestor
self$invalidate_ancestor()
private$.ancestor
}
},
text = function(text) {
if (missing(text)) {
paste(c(trimws(self$header), trimws(private$.text)), collapse = "\n\n")
} else {
private$.text <- text
self$invalidate_inst_text()
}
},
instantiated_text = function() {
if (private$is_text_instantiated)
private$.instantiated_text
else {
private$.instantiated_text <- instantiate_text_list(
self$text,
self$hidden_data_list)
private$is_text_instantiated <- TRUE
private$.instantiated_text
}
},
answer = function(answer) {
if (missing(answer)) {
private$.answer
} else {
private$.answer <- answer
self$invalidate_inst_answer()
}
},
instantiated_answer = function() {
if (private$is_answer_instantiated)
private$.instantiated_answer
else {
private$.instantiated_answer <- instantiate_object(
self$answer,
self$hidden_data_list)
private$is_answer_instantiated <- TRUE
private$.instantiated_answer
}
},
feedback = function(feedback) {
if (missing(feedback)) {
if (private$is_feedback_available)
private$.feedback
else {
private$.feedback <- self$get_feedback_from_field(private$.feedback)
private$is_feedback_available <- TRUE
private$.feedback
}
} else {
private$.feedback <- self$get_feedback_from_field(feedback)
self$invalidate_inst_feedback()
private$is_feedback_available <- TRUE
}
},
instantiated_feedback = function() {
if (private$is_feedback_instantiated)
private$.instantiated_feedback
else {
inst_feedback <- self$instantiate_feedback_list(self$feedback, self$hidden_data_list)
private$is_feedback_instantiated <- TRUE
private$.instantiated_feedback <- inst_feedback
}
},
hidden_seed = function(seed) {
if (missing(seed))
private$.hidden_seed
else {
private$.hidden_seed <- seed
self$invalidate_ancestor()
}
},
hidden_data = function(hidden_data) {
if (missing(hidden_data)) {
private$.hidden_data
} else {
private$.hidden_data <- hidden_data
self$invalidate_ancestor()
}
},
hidden_data_list = function(hidden_data_list) {
if (missing(hidden_data_list)) {
if (private$is_hidden_data_list_available)
private$.hidden_data_list
else {
root <- self
while (!is.null(root$ancestor))
root <- root$ancestor
root$instantiate_hidden_data_list()
private$is_hidden_data_list_available <- TRUE
private$.hidden_data_list
}
} else {
private$is_hidden_data_list_available <- TRUE
private$.hidden_data_list <- hidden_data_list
## Need to be recomputed
self$invalidate_inst_text()
self$invalidate_inst_answer()
self$invalidate_inst_feedback()
self$invalidate_inst_data()
}
},
seed = function(seed) {
if (missing(seed))
private$.seed
else {
private$.seed <- seed
}
},
data = function(data) {
if (missing(data)) {
private$.data
} else {
private$.data <- data
self$invalidate_inst_data()
}
},
instantiated_data = function() {
if (private$is_data_instantiated)
private$.instantiated_data
else {
private$.instantiated_data <- instantiate_data_list(
private$.data,
self$hidden_data_list)
private$is_data_instantiated <- TRUE
private$.instantiated_data
}
},
local_instantiated_data = function() {
if(is.null(self$seed))
self$instantiated_data
else
merge_languages(
instantiate_data_list(
bquote(set.seed(.(self$seed))),
self$hidden_data_list),
self$instantiated_data)
},
tags = function(tags) {
if (missing(tags)) {
private$.tags
} else {
private$.tags <- tags
}
}
),
private = list(
.title = NULL,
.header = NULL,
.ancestor = NULL,
.text = NULL,
is_text_available = FALSE,
.answer = NULL,
is_answer_available = FALSE,
.feedback = NULL,
is_feedback_available = FALSE,
.data = NULL,
is_data_available = FALSE,
.seed = NULL,
.hidden_seed = NULL,
.hidden_data = NULL,
is_hidden_data_available = FALSE,
.hidden_data_list = NULL,
is_hidden_data_list_available = FALSE,
.instantiated_text = NULL,
is_text_instantiated = FALSE,
.instantiated_answer = NULL,
is_answer_instantiated = FALSE,
.instantiated_feedback = NULL,
is_feedback_instantiated = FALSE,
.instantiated_data = NULL,
is_data_instantiated = FALSE,
.tags = NULL,
default_feedback_options = list(text = "", noeval_text = NULL, eval = TRUE, indent = 2),
default_options = list(numbered = TRUE,
export = "markdown",
feedback = TRUE,
quiet = FALSE),
xml_default_options = list(
numbered = FALSE,
indent = 0
),
feedback_options = NULL,
placeholders = NULL,
placeholders_regex = NULL,
validate_options = function(opts, info) {
if (!is.null(opts$numbered) && opts$numbered && is.null(info$num))
stop("`numbered` option is enabled but no `num` available")
},
validate_feedback_options = function(opts, info) {
## super$validate_feedback_options(opts, info)
},
xml_question_template = trimws("
<question type=\"@TYPE@\">
<name>
<text><![CDATA[@TITLE@]]></text>
</name>
<questiontext format=\"html\">
<text><![CDATA[@XML_QUESTION_TEXT@]]></text>
</questiontext>
<answer fraction=\"100\" format=\"plain_text\">
<text>@XML_ANSWER@</text>
</answer>
@XML_GENERALFEEDBACK@
<hidden>0</hidden>
</question>
"),
xml_placeholders = NULL))
|
# 30DayChartChallenge Day 27 - Educational
# load packages
library(tidyverse)
library(gganimate)
# load data
datasaurus <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-10-13/datasaurus.csv')
# compute summary statistics
dino_stats <- datasaurus %>% #datasauRus::datasaurus_dozen %>%
group_by(dataset) %>%
summarize(
mean_x = mean(x),
mean_y = mean(y),
std_dev_x = sd(x),
std_dev_y = sd(y),
corr_x_y = cor(x, y)
)
# merge raw data & summary stats
d <- datasaurus %>% #datasauRus::datasaurus_dozen %>%
left_join(dino_stats, by = "dataset")
# plot data
d %>%
filter(dataset %in% c("dino", "star", "circle", "x_shape")) %>%
ggplot(aes(x = x, y = y, colour = dataset)) +
geom_point(size = 2) +
xlim(0,100) + ylim(0,100) +
coord_fixed(clip = "off") +
scale_colour_manual(values = c("#298fca", "#4C5F28", "#ffe12b", "#E3242B")) +
labs(title = "Same stats, different graph",
subtitle = "The importance of visually inspecting your data",
caption = "Source: datasauRus package / TidyTuesday",
x = "",
y = "") +
geom_text(x = 10, y = 98, label = paste("r = ", signif(d$corr_x_y[1], 3), sep = ""), colour = "black", hjust = 0.5, size = 6) +
theme_bw() +
theme(legend.position = "none",
plot.title = element_text(size = 22, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 16, hjust = 0.5),
plot.caption = element_text(size = 12, face = "italic"),
plot.background = element_rect(fill = "#ffffff"),
panel.background = element_rect(fill = "#ffffff"),
axis.text = element_text(size = 16)) +
transition_states(dataset, state_length = 1)
# save gif
anim_save("Day27_Educational/Day27.gif")
| /Day27_Educational/Day27_Educational.R | no_license | timschoof/30DayChartChallenge | R | false | false | 1,793 | r | # 30DayChartChallenge Day 27 - Educational
# load packages
library(tidyverse)
library(gganimate)
# load data
datasaurus <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-10-13/datasaurus.csv')
# compute summary statistics
dino_stats <- datasaurus %>% #datasauRus::datasaurus_dozen %>%
group_by(dataset) %>%
summarize(
mean_x = mean(x),
mean_y = mean(y),
std_dev_x = sd(x),
std_dev_y = sd(y),
corr_x_y = cor(x, y)
)
# merge raw data & summary stats
d <- datasaurus %>% #datasauRus::datasaurus_dozen %>%
left_join(dino_stats, by = "dataset")
# plot data
d %>%
filter(dataset %in% c("dino", "star", "circle", "x_shape")) %>%
ggplot(aes(x = x, y = y, colour = dataset)) +
geom_point(size = 2) +
xlim(0,100) + ylim(0,100) +
coord_fixed(clip = "off") +
scale_colour_manual(values = c("#298fca", "#4C5F28", "#ffe12b", "#E3242B")) +
labs(title = "Same stats, different graph",
subtitle = "The importance of visually inspecting your data",
caption = "Source: datasauRus package / TidyTuesday",
x = "",
y = "") +
geom_text(x = 10, y = 98, label = paste("r = ", signif(d$corr_x_y[1], 3), sep = ""), colour = "black", hjust = 0.5, size = 6) +
theme_bw() +
theme(legend.position = "none",
plot.title = element_text(size = 22, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 16, hjust = 0.5),
plot.caption = element_text(size = 12, face = "italic"),
plot.background = element_rect(fill = "#ffffff"),
panel.background = element_rect(fill = "#ffffff"),
axis.text = element_text(size = 16)) +
transition_states(dataset, state_length = 1)
# save gif
anim_save("Day27_Educational/Day27.gif")
|
library(sfsmisc)
sapply(list.files(pattern="[.]R$", path="R/", full.names=TRUE), source);
filenames <- list.files("test/steve", pattern="autism*", full.names=TRUE)
matrices = list()
#for (i in 1:3) {
for (i in 1:length(filenames)) {
matrices[[i]] <- as.matrix(read.table(filenames[[i]], skip=1))
}
images = new("IMaGES", matrices = matrices, penalty=1.5)
| /call_gies.R | no_license | noahfl/pcalg | R | false | false | 362 | r | library(sfsmisc)
sapply(list.files(pattern="[.]R$", path="R/", full.names=TRUE), source);
filenames <- list.files("test/steve", pattern="autism*", full.names=TRUE)
matrices = list()
#for (i in 1:3) {
for (i in 1:length(filenames)) {
matrices[[i]] <- as.matrix(read.table(filenames[[i]], skip=1))
}
images = new("IMaGES", matrices = matrices, penalty=1.5)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tally.R
\name{prop}
\alias{count}
\alias{perc}
\alias{prop}
\alias{prop1}
\title{Compute proportions, percents, or counts for a single level}
\usage{
prop(x, data = parent.frame(), useNA = "no", ..., level = NULL,
long.names = TRUE, sep = ".", format = "proportion", quiet = TRUE,
pval.adjust = FALSE)
prop1(..., pval.adjust = TRUE)
count(x, data = parent.frame(), ..., format = "count")
perc(x, data = parent.frame(), ..., format = "percent")
}
\arguments{
\item{x}{an R object, usually a formula}
\item{data}{a data frame in which \code{x} is to be evaluated}
\item{useNA}{an indication of how NA's should be handled. By default, they are
ignored.}
\item{level}{the level for which counts, proportions or percents are
calculated}
\item{long.names}{a logical indicating whether long names should be
when there is a conditioning variable}
\item{sep}{a character used to separate portions of long names}
\item{format}{one of \code{proportion}, \code{percent}, or \code{count},
possibly abbrevaited}
\item{quiet}{a logical indicating whether messages regarding the
target level should be supressed.}
\item{pval.adjust}{a logical indicating whether the "p-value" adjustment should be
applied. This adjustment adds 1 to the numerator and denominator counts.}
\item{\dots}{arguments passed through to \code{\link{tally}}}
}
\description{
Compute proportions, percents, or counts for a single level
}
\details{
\code{prop1} is intended for the computation of p-values from randomization
distributions and differs from \code{prop} only in its default value of
\code{pval.adjust}.
}
\note{
For 0-1 data, level is set to 1 by default since that a standard
coding scheme for success and failure.
}
\examples{
if (require(mosaicData)) {
prop( ~sex, data=HELPrct)
prop( ~sex, data=HELPrct, level='male')
count( ~sex | substance, data=HELPrct)
prop( ~sex | substance, data=HELPrct)
perc( ~sex | substance, data=HELPrct)
}
}
| /man/prop.Rd | no_license | aaronbaggett/mosaic | R | false | true | 2,014 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tally.R
\name{prop}
\alias{count}
\alias{perc}
\alias{prop}
\alias{prop1}
\title{Compute proportions, percents, or counts for a single level}
\usage{
prop(x, data = parent.frame(), useNA = "no", ..., level = NULL,
long.names = TRUE, sep = ".", format = "proportion", quiet = TRUE,
pval.adjust = FALSE)
prop1(..., pval.adjust = TRUE)
count(x, data = parent.frame(), ..., format = "count")
perc(x, data = parent.frame(), ..., format = "percent")
}
\arguments{
\item{x}{an R object, usually a formula}
\item{data}{a data frame in which \code{x} is to be evaluated}
\item{useNA}{an indication of how NA's should be handled. By default, they are
ignored.}
\item{level}{the level for which counts, proportions or percents are
calculated}
\item{long.names}{a logical indicating whether long names should be
when there is a conditioning variable}
\item{sep}{a character used to separate portions of long names}
\item{format}{one of \code{proportion}, \code{percent}, or \code{count},
possibly abbrevaited}
\item{quiet}{a logical indicating whether messages regarding the
target level should be supressed.}
\item{pval.adjust}{a logical indicating whether the "p-value" adjustment should be
applied. This adjustment adds 1 to the numerator and denominator counts.}
\item{\dots}{arguments passed through to \code{\link{tally}}}
}
\description{
Compute proportions, percents, or counts for a single level
}
\details{
\code{prop1} is intended for the computation of p-values from randomization
distributions and differs from \code{prop} only in its default value of
\code{pval.adjust}.
}
\note{
For 0-1 data, level is set to 1 by default since that a standard
coding scheme for success and failure.
}
\examples{
if (require(mosaicData)) {
prop( ~sex, data=HELPrct)
prop( ~sex, data=HELPrct, level='male')
count( ~sex | substance, data=HELPrct)
prop( ~sex | substance, data=HELPrct)
perc( ~sex | substance, data=HELPrct)
}
}
|
# A242310: Ilya Lopatin and Juri-Stepan Gerasimov, May 10 2014
# A000056: _N. J. A. Sloane_
# A051419: Paul L. Chessin (pchess(AT)ix.netcom.com)
# A000045: _N. J. A. Sloane, 1964_
# A169890: _David Applegate_, _Marc LeBrun_ and _N. J. A. Sloane_, Jul 06 2010
# A169888: _N. J. A. Sloane_, Jul 07 2010, based on a letter from _Jean-Claude
# Babois_.
id <- "A169890"
test_that("OEIS sequence A169890 has three authors", {
testthat::expect_equal(id %>%
OEIS_author %>%
length, 3)
})
| /tests/testthat/test_OEIS_author.R | permissive | EnriquePH/OEIS.R | R | false | false | 539 | r | # A242310: Ilya Lopatin and Juri-Stepan Gerasimov, May 10 2014
# A000056: _N. J. A. Sloane_
# A051419: Paul L. Chessin (pchess(AT)ix.netcom.com)
# A000045: _N. J. A. Sloane, 1964_
# A169890: _David Applegate_, _Marc LeBrun_ and _N. J. A. Sloane_, Jul 06 2010
# A169888: _N. J. A. Sloane_, Jul 07 2010, based on a letter from _Jean-Claude
# Babois_.
id <- "A169890"
test_that("OEIS sequence A169890 has three authors", {
testthat::expect_equal(id %>%
OEIS_author %>%
length, 3)
})
|
## This following function takes a square invertible matrix
## and return a list of functions to:
## 1.- Set the matrix.
## 2.- Get the matrix.
## 3.- Set the inverse of matrix.
## 4.- Get the inverse of matrix.
## Finally, this is used as input in cacheSolve function.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
## Set the matrix
set <- function(y){
x <<- y
m <<- NULL
}
## Get the matrix
get <- function() x
## Set the inverse of matrix.
setinverse <- function(solve) m <<- solve
## Get the inverse of matrix.
getinverse <- function() m
## Return the list of functions.
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## In cacheSolve function, we take the matrix and
## calculate the inverse.
## Finally we return it.
cacheSolve <- function(x, ...) {
## Get
m <- x$getinverse()
if(!is.null(m)) {
## If the value already exists, returns m.
message("getting cached data")
return(m)
}
## Else
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
return(m)
}
## Running Example
# > matriz <- matrix(runif(9,1,100),3,3)
# Show "matriz"
# > matriz
# [,1] [,2] [,3]
# [1,] 90.15773 26.488523 72.41762
# [2,] 60.19124 9.766337 91.25337
# [3,] 96.55614 47.140896 67.54101
# Generating the cache matrix
# > matrixExample <- makeCacheMatrix(matriz)
# Finally calculate or retrieve the value of
# Inverted matrix using cacheSolve:
# > cacheSolve(matrixExample)
# [,1] [,2] [,3]
# [1,] -0.162284059 -0.12840965 0.22513611
# [2,] 0.194560788 -0.05779459 -0.08093354
# [3,] 0.004915707 0.20579781 -0.04886617
| /LexicalScoping.R | no_license | fsaavedraolmos/RProgramming | R | false | false | 1,706 | r | ## This following function takes a square invertible matrix
## and return a list of functions to:
## 1.- Set the matrix.
## 2.- Get the matrix.
## 3.- Set the inverse of matrix.
## 4.- Get the inverse of matrix.
## Finally, this is used as input in cacheSolve function.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
## Set the matrix
set <- function(y){
x <<- y
m <<- NULL
}
## Get the matrix
get <- function() x
## Set the inverse of matrix.
setinverse <- function(solve) m <<- solve
## Get the inverse of matrix.
getinverse <- function() m
## Return the list of functions.
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## In cacheSolve function, we take the matrix and
## calculate the inverse.
## Finally we return it.
cacheSolve <- function(x, ...) {
## Get
m <- x$getinverse()
if(!is.null(m)) {
## If the value already exists, returns m.
message("getting cached data")
return(m)
}
## Else
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
return(m)
}
## Running Example
# > matriz <- matrix(runif(9,1,100),3,3)
# Show "matriz"
# > matriz
# [,1] [,2] [,3]
# [1,] 90.15773 26.488523 72.41762
# [2,] 60.19124 9.766337 91.25337
# [3,] 96.55614 47.140896 67.54101
# Generating the cache matrix
# > matrixExample <- makeCacheMatrix(matriz)
# Finally calculate or retrieve the value of
# Inverted matrix using cacheSolve:
# > cacheSolve(matrixExample)
# [,1] [,2] [,3]
# [1,] -0.162284059 -0.12840965 0.22513611
# [2,] 0.194560788 -0.05779459 -0.08093354
# [3,] 0.004915707 0.20579781 -0.04886617
|
#
# IKI Bangladesh (MIOASI): S6 Exceedence Probabilities
#
# Make netcdf of estimates of mean, and low and upper credible intervals of mean gust speed.
# This process fits a GAM to the ensemble data, estimates a posterior distribution,
# and samples them to find the mean and credible interval based on a set of quantiles.
#
#
# Author: HS
# Created: Jan 2020
# QA: TE 17/3/20
library(RNetCDF)
library(abind)
library(doParallel)
registerDoParallel(cores = 10)
# Define Bangladesh tropical cyclon categories in knots converted to m/s
# WINDTHRESHOLDS <- c(17, 22, 28, 34, 48, 64, 120) * 0.514444
WINDTHRESHOLDS <- seq(0, 100, 1)
INDIR <- ""
VAR <- 'fg.T1Hmax'
RES <- '4p4'
# Load base netcdf to get lat/lon variables
filename <- paste('fp.', VAR, '.AILA.', RES, 'km.nc', sep = '')
stormsnc <- open.nc(paste(INDIR, filename, sep = '/'))
# Extract netcdf variables
lat <- var.get.nc(stormsnc, 'latitude')
lon <- var.get.nc(stormsnc, 'longitude')
nLon <- length(lon)
nLat <- length(lat)
nCells <- nLon*nLat
# Make lon-lat-ens grid
lonlat <- expand.grid(lon = lon, lat = lat)
# Load predictions data from RData object
predsfile <- "grand_fp_preds.Rdata"
load(predsfile)
# Calculate posterior stats from preds
# Use the simulated values at each gridpoint to calculate exceedend probabilities
# Do in parallel with %dopar% and write out to list qlist
plist <- foreach(i = seq_along(WINDTHRESHOLDS)) %dopar% {
# Make bool array of T/F values based on wind thresholds
bool <- preds >= WINDTHRESHOLDS[i]
# Find threshold probabilities based on total True values / total number of preds -> mean
# N.B. Here the mean is the Monte Carlo approximation to the integral -- so unrelated to the mean of any distribution
p <- apply(bool, 2, mean)
matrix(p, nrow = nLon, ncol = nLat)
}
# Convert list to 3D matrix
exceedm <- abind(plist, along=0)
# Write out netCDF file
outfile <- paste('pgrand.exceedance_tmp.', VAR, '.', RES, 'km.nc', sep = '')
outnc <- create.nc(outfile, large=T)
# Dimensions
dim.def.nc(outnc, 'latitude', nLat)
dim.def.nc(outnc, 'longitude', nLon)
dim.def.nc(outnc, 'gust_threshold', length(WINDTHRESHOLDS))
# Variables
var.def.nc(outnc, 'latitude', 'NC_FLOAT', 'latitude')
var.def.nc(outnc, 'longitude', 'NC_FLOAT', 'longitude')
var.def.nc(outnc, 'gust_threshold', 'NC_INT', 'gust_threshold')
var.def.nc(outnc, 'latitude_longitude', 'NC_CHAR', NA)
var.def.nc(outnc, 'probability_of_exceedance', 'NC_FLOAT', c('gust_threshold', 'longitude', 'latitude'))
# Define some attributes
att.put.nc(outnc, "latitude", "axis", "NC_CHAR", "Y")
att.put.nc(outnc, "latitude", "units", "NC_CHAR", "degrees_north")
att.put.nc(outnc, "latitude", "standard_name", "NC_CHAR", "latitude")
att.put.nc(outnc, "longitude", "axis", "NC_CHAR", "X")
att.put.nc(outnc, "longitude", "units", "NC_CHAR", "degrees_east")
att.put.nc(outnc, "longitude", "standard_name", "NC_CHAR", "longitude")
att.put.nc(outnc, "gust_threshold", "long_name", "NC_CHAR", "Gust speed classification threshold")
att.put.nc(outnc, "gust_threshold", "units", "NC_CHAR", "m s-1")
att.put.nc(outnc, "probability_of_exceedance", "long_name", "NC_CHAR", "posterior wind_speed_of_gust probability_of_exceedance")
att.put.nc(outnc, "probability_of_exceedance", "units", "NC_INT", 1)
att.put.nc(outnc, "probability_of_exceedance", "grid_mapping", "NC_CHAR", "latitude_longitude")
att.put.nc(outnc, "latitude_longitude", "grid_mapping_name", "NC_CHAR", "latitude_longitude")
att.put.nc(outnc, "latitude_longitude", "longitude_of_prime_meridian", "NC_INT", 0)
att.put.nc(outnc, "latitude_longitude", "earth_radius", "NC_INT", 6371229.)
att.put.nc(outnc, "latitude_longitude", "proj4", "NC_CHAR", "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
att.put.nc(outnc, "NC_GLOBAL", "comment", "NC_CHAR", "Supported by the International Climate Initiative (IKI) and the Federal Ministry for the Environment, Nature Conservation and Nuclear Safety, based on a decision of the Germany Bundestag")
att.put.nc(outnc, "NC_GLOBAL", "contact", "NC_CHAR", "enquiries@metoffice.gov.uk")
att.put.nc(outnc, "NC_GLOBAL", "data_type", "NC_CHAR", "grid")
att.put.nc(outnc, "NC_GLOBAL", "date_created", "NC_CHAR", format(Sys.time(), "%Y%m%dT%H:%M:%S"))
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lat_max", "NC_FLOAT", max(lat))
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lat_min", "NC_FLOAT", min(lat))
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lat_resolution", "NC_FLOAT", 0.04)
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lat_units", "NC_CHAR", "degrees_north")
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lon_max", "NC_FLOAT", max(lon))
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lon_min", "NC_FLOAT", min(lon))
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lon_resolution", "NC_FLOAT", 0.04)
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lon_units", "NC_CHAR", "degrees_east")
att.put.nc(outnc, "NC_GLOBAL", "history", "NC_CHAR", "(1.0) Initial release")
att.put.nc(outnc, "NC_GLOBAL", "id", "NC_CHAR", paste("fpgrandw.", VAR, ".", RES, "km.nc", sep=""))
att.put.nc(outnc, "NC_GLOBAL", "institution", "NC_CHAR", "Met Office, UK")
att.put.nc(outnc, "NC_GLOBAL", "licence", "NC_CHAR", "Creative Commons Attribution 4.0 International (CC BY 4.0)")
att.put.nc(outnc, "NC_GLOBAL", "product_version", "NC_CHAR", "v1.0")
att.put.nc(outnc, "NC_GLOBAL", "project", "NC_CHAR", "Oasis Platform for Climate and Catastrophe Risk Assessment โ Asia")
att.put.nc(outnc, "NC_GLOBAL", "institution", "NC_CHAR", "Met Office, UK")
att.put.nc(outnc, "NC_GLOBAL", "keywords", "NC_CHAR", "Bangladesh, footprint, quantiles, Met Office")
att.put.nc(outnc, "NC_GLOBAL", "source", "NC_CHAR", "Met Office UM RA2T CON")
att.put.nc(outnc, "NC_GLOBAL", "spatial_resolution", "NC_CHAR", "4.4km")
att.put.nc(outnc, "NC_GLOBAL", "summary", "NC_CHAR", "Tropical cyclone gust speed threshold exceedence probabilities over Bangladesh")
att.put.nc(outnc, "NC_GLOBAL", "keywords", "NC_CHAR", "Bangladesh, footprint, posterior, exceedence probabilities, Met Office")
# Write in data
var.put.nc(outnc, 'latitude', lat)
var.put.nc(outnc, 'longitude', lon)
var.put.nc(outnc, 'gust_threshold', WINDTHRESHOLDS)
var.put.nc(outnc, 'probability_of_exceedance', exceedm)
close.nc(outnc) | /r/s6_exceedence_probability.R | permissive | MetOffice/IKI-Oasis-Bangladesh | R | false | false | 6,191 | r | #
# IKI Bangladesh (MIOASI): S6 Exceedence Probabilities
#
# Make netcdf of estimates of mean, and low and upper credible intervals of mean gust speed.
# This process fits a GAM to the ensemble data, estimates a posterior distribution,
# and samples them to find the mean and credible interval based on a set of quantiles.
#
#
# Author: HS
# Created: Jan 2020
# QA: TE 17/3/20
library(RNetCDF)
library(abind)
library(doParallel)
registerDoParallel(cores = 10)
# Define Bangladesh tropical cyclon categories in knots converted to m/s
# WINDTHRESHOLDS <- c(17, 22, 28, 34, 48, 64, 120) * 0.514444
WINDTHRESHOLDS <- seq(0, 100, 1)
INDIR <- ""
VAR <- 'fg.T1Hmax'
RES <- '4p4'
# Load base netcdf to get lat/lon variables
filename <- paste('fp.', VAR, '.AILA.', RES, 'km.nc', sep = '')
stormsnc <- open.nc(paste(INDIR, filename, sep = '/'))
# Extract netcdf variables
lat <- var.get.nc(stormsnc, 'latitude')
lon <- var.get.nc(stormsnc, 'longitude')
nLon <- length(lon)
nLat <- length(lat)
nCells <- nLon*nLat
# Make lon-lat-ens grid
lonlat <- expand.grid(lon = lon, lat = lat)
# Load predictions data from RData object
predsfile <- "grand_fp_preds.Rdata"
load(predsfile)
# Calculate posterior stats from preds
# Use the simulated values at each gridpoint to calculate exceedend probabilities
# Do in parallel with %dopar% and write out to list qlist
plist <- foreach(i = seq_along(WINDTHRESHOLDS)) %dopar% {
# Make bool array of T/F values based on wind thresholds
bool <- preds >= WINDTHRESHOLDS[i]
# Find threshold probabilities based on total True values / total number of preds -> mean
# N.B. Here the mean is the Monte Carlo approximation to the integral -- so unrelated to the mean of any distribution
p <- apply(bool, 2, mean)
matrix(p, nrow = nLon, ncol = nLat)
}
# Convert list to 3D matrix
exceedm <- abind(plist, along=0)
# Write out netCDF file
outfile <- paste('pgrand.exceedance_tmp.', VAR, '.', RES, 'km.nc', sep = '')
outnc <- create.nc(outfile, large=T)
# Dimensions
dim.def.nc(outnc, 'latitude', nLat)
dim.def.nc(outnc, 'longitude', nLon)
dim.def.nc(outnc, 'gust_threshold', length(WINDTHRESHOLDS))
# Variables
var.def.nc(outnc, 'latitude', 'NC_FLOAT', 'latitude')
var.def.nc(outnc, 'longitude', 'NC_FLOAT', 'longitude')
var.def.nc(outnc, 'gust_threshold', 'NC_INT', 'gust_threshold')
var.def.nc(outnc, 'latitude_longitude', 'NC_CHAR', NA)
var.def.nc(outnc, 'probability_of_exceedance', 'NC_FLOAT', c('gust_threshold', 'longitude', 'latitude'))
# Define some attributes
att.put.nc(outnc, "latitude", "axis", "NC_CHAR", "Y")
att.put.nc(outnc, "latitude", "units", "NC_CHAR", "degrees_north")
att.put.nc(outnc, "latitude", "standard_name", "NC_CHAR", "latitude")
att.put.nc(outnc, "longitude", "axis", "NC_CHAR", "X")
att.put.nc(outnc, "longitude", "units", "NC_CHAR", "degrees_east")
att.put.nc(outnc, "longitude", "standard_name", "NC_CHAR", "longitude")
att.put.nc(outnc, "gust_threshold", "long_name", "NC_CHAR", "Gust speed classification threshold")
att.put.nc(outnc, "gust_threshold", "units", "NC_CHAR", "m s-1")
att.put.nc(outnc, "probability_of_exceedance", "long_name", "NC_CHAR", "posterior wind_speed_of_gust probability_of_exceedance")
att.put.nc(outnc, "probability_of_exceedance", "units", "NC_INT", 1)
att.put.nc(outnc, "probability_of_exceedance", "grid_mapping", "NC_CHAR", "latitude_longitude")
att.put.nc(outnc, "latitude_longitude", "grid_mapping_name", "NC_CHAR", "latitude_longitude")
att.put.nc(outnc, "latitude_longitude", "longitude_of_prime_meridian", "NC_INT", 0)
att.put.nc(outnc, "latitude_longitude", "earth_radius", "NC_INT", 6371229.)
att.put.nc(outnc, "latitude_longitude", "proj4", "NC_CHAR", "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
att.put.nc(outnc, "NC_GLOBAL", "comment", "NC_CHAR", "Supported by the International Climate Initiative (IKI) and the Federal Ministry for the Environment, Nature Conservation and Nuclear Safety, based on a decision of the Germany Bundestag")
att.put.nc(outnc, "NC_GLOBAL", "contact", "NC_CHAR", "enquiries@metoffice.gov.uk")
att.put.nc(outnc, "NC_GLOBAL", "data_type", "NC_CHAR", "grid")
att.put.nc(outnc, "NC_GLOBAL", "date_created", "NC_CHAR", format(Sys.time(), "%Y%m%dT%H:%M:%S"))
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lat_max", "NC_FLOAT", max(lat))
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lat_min", "NC_FLOAT", min(lat))
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lat_resolution", "NC_FLOAT", 0.04)
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lat_units", "NC_CHAR", "degrees_north")
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lon_max", "NC_FLOAT", max(lon))
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lon_min", "NC_FLOAT", min(lon))
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lon_resolution", "NC_FLOAT", 0.04)
att.put.nc(outnc, "NC_GLOBAL", "geospatial_lon_units", "NC_CHAR", "degrees_east")
att.put.nc(outnc, "NC_GLOBAL", "history", "NC_CHAR", "(1.0) Initial release")
att.put.nc(outnc, "NC_GLOBAL", "id", "NC_CHAR", paste("fpgrandw.", VAR, ".", RES, "km.nc", sep=""))
att.put.nc(outnc, "NC_GLOBAL", "institution", "NC_CHAR", "Met Office, UK")
att.put.nc(outnc, "NC_GLOBAL", "licence", "NC_CHAR", "Creative Commons Attribution 4.0 International (CC BY 4.0)")
att.put.nc(outnc, "NC_GLOBAL", "product_version", "NC_CHAR", "v1.0")
att.put.nc(outnc, "NC_GLOBAL", "project", "NC_CHAR", "Oasis Platform for Climate and Catastrophe Risk Assessment โ Asia")
att.put.nc(outnc, "NC_GLOBAL", "institution", "NC_CHAR", "Met Office, UK")
att.put.nc(outnc, "NC_GLOBAL", "keywords", "NC_CHAR", "Bangladesh, footprint, quantiles, Met Office")
att.put.nc(outnc, "NC_GLOBAL", "source", "NC_CHAR", "Met Office UM RA2T CON")
att.put.nc(outnc, "NC_GLOBAL", "spatial_resolution", "NC_CHAR", "4.4km")
att.put.nc(outnc, "NC_GLOBAL", "summary", "NC_CHAR", "Tropical cyclone gust speed threshold exceedence probabilities over Bangladesh")
att.put.nc(outnc, "NC_GLOBAL", "keywords", "NC_CHAR", "Bangladesh, footprint, posterior, exceedence probabilities, Met Office")
# Write in data
var.put.nc(outnc, 'latitude', lat)
var.put.nc(outnc, 'longitude', lon)
var.put.nc(outnc, 'gust_threshold', WINDTHRESHOLDS)
var.put.nc(outnc, 'probability_of_exceedance', exceedm)
close.nc(outnc) |
#' Returns information on how to partition y_data
#'
#' This function returns information on how to partition (randomly or paired) y_data
#' importFrom magrittr "%>%"
#' @param data_object argument is the output produced by as.MLinput, which contains a single x data frame or a list of x data frames, a y data frames and attributes
#' @param partition_style one of 'random' or 'paired' (character string) indicating type of partition
#' @param folds integer of k for k-fold cross validation
#' @param repeats integer of number of iterations to repeat cross validation
#' @param holdout_perc numeric between 0 and 1 indicating the percentage of data to withold for the holdout validation set
#' @export
dataPartitioning = function(data_object, partition_style = "random", folds = 4, repeats = 100, holdout_perc = 0.25) {
# extract y_data from data_object and cnames
y_data = data_object$Y
group_identifier = attr(data_object, "cnames")$outcome_cname
pair_identifier = attr(data_object, "cnames")$pair_cname
# initial checks
if (!(partition_style %in% c("random", "paired")))
stop("'partition_style' must be one of, 'random' or 'paired'")
if (!inherits(y_data, "data.frame"))
stop("'y_data' must be of class 'data.frame'")
if (partition_style == "paired" & is.null(pair_identifier))
stop("'pair_identifier' is required for a 'paired' partition_style")
repeats_new = repeats * 1.5
repeats_extra = repeats_new - repeats
if (partition_style == "random") {
partition_result = pureRandomTestingTraining(data = y_data, group_identifier = group_identifier, folds = folds, repeats = repeats,
holdout_perc = holdout_perc)
# rearrrange partition_result data into test data and training data
train_list = lapply(partition_result, function(item) {
item$train
})
test_list = lapply(partition_result, function(item) {
item$test
})
# form fold and repeat lists
fld = rep(1:folds, repeats)
fld = as.list(fld)
reps = as.list(1:repeats)
reps = lapply(reps, rep, folds)
reps = unlist(reps)
reps = as.list(reps)
# form folds repeats data frames
train_df = mapply(fold_rep_train, train_list, fld, reps)
train_df = as.data.frame(train_df)
train_df = lapply(train_df, as.data.frame, stringsAsFactors = F)
train_df = do.call(rbind, train_df)
test_df = mapply(fold_rep_test, test_list, fld, reps)
test_df = as.data.frame(test_df)
test_df = lapply(test_df, as.data.frame, stringsAsFactors = F)
test_df = do.call(rbind, test_df)
final_result = list(train = train_df, test = test_df)
final_result = lapply(final_result, function(x) {
split(data.table::as.data.table(x), by = c("Fold", "Rep"))
})
########## partition data for repeats_extra########## ##########
partition_result_extra = pureRandomTestingTraining(data = y_data, group_identifier = group_identifier, folds = folds, repeats = repeats_extra,
holdout_perc = holdout_perc)
# rearrrange partition_result data into test data and training data
train_list = lapply(partition_result_extra, function(item) {
item$train
})
test_list = lapply(partition_result_extra, function(item) {
item$test
})
# form fold and repeat lists
fld = rep(1:folds, repeats_extra)
fld = as.list(fld)
reps = as.list(1:repeats_extra)
reps = lapply(reps, rep, folds)
reps = unlist(reps)
reps = as.list(reps)
# form folds repeats data frames
train_df = mapply(fold_rep_train, train_list, fld, reps)
train_df = as.data.frame(train_df)
train_df = lapply(train_df, as.data.frame, stringsAsFactors = F)
train_df = do.call(rbind, train_df)
test_df = mapply(fold_rep_test, test_list, fld, reps)
test_df = as.data.frame(test_df)
test_df = lapply(test_df, as.data.frame, stringsAsFactors = F)
test_df = do.call(rbind, test_df)
final_result_extra = list(train = train_df, test = test_df)
final_result_extra = lapply(final_result_extra, function(x) {
split(data.table::as.data.table(x), by = c("Fold", "Rep"))
})
} else if (partition_style == "paired") {
partition_result = pairedCaseControlTestingTraining(data = y_data, group_identifier = group_identifier, pair_identifier = pair_identifier,
folds = folds, repeats = repeats)
# rearrrange partition_result data into two tibble data frames, one for test data one for train data
train_list = lapply(partition_result, function(item) {
item$train
})
test_list = lapply(partition_result, function(item) {
item$test
})
# form fold and repeat lists
fld = rep(1:folds, repeats)
fld = as.list(fld)
reps = as.list(1:repeats)
reps = lapply(reps, rep, folds)
reps = unlist(reps)
reps = as.list(reps)
# form folds repeats data frames
train_df = mapply(fold_rep_train, train_list, fld, reps)
train_df = as.data.frame(train_df)
train_df = lapply(train_df, as.data.frame, stringsAsFactors = F)
train_df = do.call(rbind, train_df)
test_df = mapply(fold_rep_test, test_list, fld, reps)
test_df = as.data.frame(test_df)
test_df = lapply(test_df, as.data.frame, stringsAsFactors = F)
test_df = do.call(rbind, test_df)
final_result = list(train = train_df, test = test_df)
final_result = lapply(final_result, function(x) {
split(data.table::as.data.table(x), by = c("Fold", "Rep"))
})
########## partition data for repeats_extra########## ##########
partition_result_extra = pairedCaseControlTestingTraining(data = y_data, group_identifier = group_identifier, pair_identifier = pair_identifier,
folds = folds, repeats = repeats)
# rearrrange partition_result data into two tibble data frames, one for test data one for train data
train_list = lapply(partition_result_extra, function(item) {
item$train
})
test_list = lapply(partition_result_extra, function(item) {
item$test
})
# form fold and repeat lists
fld = rep(1:folds, repeats_extra)
fld = as.list(fld)
reps = as.list(1:repeats_extra)
reps = lapply(reps, rep, folds)
reps = unlist(reps)
reps = as.list(reps)
# form folds repeats data frames
train_df = mapply(fold_rep_train, train_list, fld, reps)
train_df = as.data.frame(train_df)
train_df = lapply(train_df, as.data.frame, stringsAsFactors = F)
train_df = do.call(rbind, train_df)
test_df = mapply(fold_rep_test, test_list, fld, reps)
test_df = as.data.frame(test_df)
test_df = lapply(test_df, as.data.frame, stringsAsFactors = F)
test_df = do.call(rbind, test_df)
final_result_extra = list(train = train_df, test = test_df)
final_result_extra = lapply(final_result_extra, function(x) {
split(data.table::as.data.table(x), by = c("Fold", "Rep"))
})
}
attr(data_object, "partition_info") = final_result
attr(data_object, "extra_partitions") = final_result_extra
attr(data_object, "foldrep") = list(folds = folds, repeats = repeats)
return(data_object)
}
pureRandomTestingTraining <- function(data, group_identifier, folds = 4, repeats = 100, holdout_perc = 0.25) {
groups <- data[, group_identifier]
seed <- 42
#------ determine training indeces ----------#
# returns a k x repeats size list of training indeces set seed for reproducability purposes
set.seed(42)
training_ind_list <- caret::createMultiFolds(y = groups, k = folds, times = repeats)
#------ diff data and train to return testing indeces -----#
# returns a k x repeats size list of grouped testing and training indeces
ind_list <- lapply(training_ind_list, function(training_inds) {
partition_list <- list()
partition_list$train <- training_inds
partition_list$test <- setdiff(1:nrow(data), training_inds)
return(partition_list)
})
return(ind_list)
}
pairedCaseControlTestingTraining <- function(data, group_identifier, pair_identifier, folds = 4, repeats = 100, holdout_set = NULL) {
tuning_set <- FALSE
seed <- 42
#------- create a map to keep orignal indexing in tact -----#
data$old_index <- 1:nrow(data)
#------- remove the holdout set if there is one ---------#
if (length(holdout_set) > 0) {
data <- data[-which(data[, pair_identifier] %in% holdout_set), ]
}
#------- use map to find unique pairs -----#
singled_pair_subset <- data %>% dplyr::arrange_(group_identifier) %>% dplyr::distinct_(pair_identifier, .keep_all = TRUE) #keep only unique identifiers for training subset
singled_pair_subset[1:nrow(singled_pair_subset)/2, group_identifier] <- 1 # convert half the labes to zeros for caret's balancing act
index_map <- data.frame(old = singled_pair_subset$old_index, new = 1:nrow(singled_pair_subset), pair = singled_pair_subset[, pair_identifier])
rownames(index_map) <- index_map$new
#------ vector of group labels to split ------#
group <- singled_pair_subset[, group_identifier]
#------ determine training indeces ----------#
# returns a k x repeats size list of training indeces
training_ind_list <- caret::createMultiFolds(y = group, k = folds, times = repeats)
#------- match case-control pairs --------#
ind_list <- lapply(training_ind_list, function(x) {
pairs <- index_map[x, "pair"]
partition_list <- list()
partition_list$train <- data[which(data[, pair_identifier] %in% pairs), "old_index"]
partition_list$test <- data[which(!data[, pair_identifier] %in% pairs), "old_index"]
return(partition_list)
})
#------ diff data and train to return testing indeces -----#
# returns a k x repeats size list of grouped testing and training indeces ind_list <- lapply(training_ind_list_mapped,
# function(training_inds){ pairs <- index_map[x, 'pair'] partition_list <- list() partition_list$train <- training_inds
# partition_list$test <- data[which(!data[,pair_identifier] %in% pairs), 'old_index'] return(partition_list) })
}
# folds repeats helper functions for training data and test data
fold_rep_train = function(train_vec, fold, repp) {
result = data.frame(Fold = rep(fold, length(train_vec)), Rep = rep(repp, length(train_vec)), Train = train_vec)
return(result)
}
fold_rep_test = function(train_vec, fold, repp) {
result = data.frame(Fold = rep(fold, length(train_vec)), Rep = rep(repp, length(train_vec)), Test = train_vec)
return(result)
}
| /R/dataPartitioning.R | permissive | pmartR/peppuR | R | false | false | 11,323 | r | #' Returns information on how to partition y_data
#'
#' This function returns information on how to partition (randomly or paired) y_data
#' importFrom magrittr "%>%"
#' @param data_object argument is the output produced by as.MLinput, which contains a single x data frame or a list of x data frames, a y data frames and attributes
#' @param partition_style one of 'random' or 'paired' (character string) indicating type of partition
#' @param folds integer of k for k-fold cross validation
#' @param repeats integer of number of iterations to repeat cross validation
#' @param holdout_perc numeric between 0 and 1 indicating the percentage of data to withold for the holdout validation set
#' @export
dataPartitioning = function(data_object, partition_style = "random", folds = 4, repeats = 100, holdout_perc = 0.25) {
# extract y_data from data_object and cnames
y_data = data_object$Y
group_identifier = attr(data_object, "cnames")$outcome_cname
pair_identifier = attr(data_object, "cnames")$pair_cname
# initial checks
if (!(partition_style %in% c("random", "paired")))
stop("'partition_style' must be one of, 'random' or 'paired'")
if (!inherits(y_data, "data.frame"))
stop("'y_data' must be of class 'data.frame'")
if (partition_style == "paired" & is.null(pair_identifier))
stop("'pair_identifier' is required for a 'paired' partition_style")
repeats_new = repeats * 1.5
repeats_extra = repeats_new - repeats
if (partition_style == "random") {
partition_result = pureRandomTestingTraining(data = y_data, group_identifier = group_identifier, folds = folds, repeats = repeats,
holdout_perc = holdout_perc)
# rearrrange partition_result data into test data and training data
train_list = lapply(partition_result, function(item) {
item$train
})
test_list = lapply(partition_result, function(item) {
item$test
})
# form fold and repeat lists
fld = rep(1:folds, repeats)
fld = as.list(fld)
reps = as.list(1:repeats)
reps = lapply(reps, rep, folds)
reps = unlist(reps)
reps = as.list(reps)
# form folds repeats data frames
train_df = mapply(fold_rep_train, train_list, fld, reps)
train_df = as.data.frame(train_df)
train_df = lapply(train_df, as.data.frame, stringsAsFactors = F)
train_df = do.call(rbind, train_df)
test_df = mapply(fold_rep_test, test_list, fld, reps)
test_df = as.data.frame(test_df)
test_df = lapply(test_df, as.data.frame, stringsAsFactors = F)
test_df = do.call(rbind, test_df)
final_result = list(train = train_df, test = test_df)
final_result = lapply(final_result, function(x) {
split(data.table::as.data.table(x), by = c("Fold", "Rep"))
})
########## partition data for repeats_extra########## ##########
partition_result_extra = pureRandomTestingTraining(data = y_data, group_identifier = group_identifier, folds = folds, repeats = repeats_extra,
holdout_perc = holdout_perc)
# rearrrange partition_result data into test data and training data
train_list = lapply(partition_result_extra, function(item) {
item$train
})
test_list = lapply(partition_result_extra, function(item) {
item$test
})
# form fold and repeat lists
fld = rep(1:folds, repeats_extra)
fld = as.list(fld)
reps = as.list(1:repeats_extra)
reps = lapply(reps, rep, folds)
reps = unlist(reps)
reps = as.list(reps)
# form folds repeats data frames
train_df = mapply(fold_rep_train, train_list, fld, reps)
train_df = as.data.frame(train_df)
train_df = lapply(train_df, as.data.frame, stringsAsFactors = F)
train_df = do.call(rbind, train_df)
test_df = mapply(fold_rep_test, test_list, fld, reps)
test_df = as.data.frame(test_df)
test_df = lapply(test_df, as.data.frame, stringsAsFactors = F)
test_df = do.call(rbind, test_df)
final_result_extra = list(train = train_df, test = test_df)
final_result_extra = lapply(final_result_extra, function(x) {
split(data.table::as.data.table(x), by = c("Fold", "Rep"))
})
} else if (partition_style == "paired") {
partition_result = pairedCaseControlTestingTraining(data = y_data, group_identifier = group_identifier, pair_identifier = pair_identifier,
folds = folds, repeats = repeats)
# rearrrange partition_result data into two tibble data frames, one for test data one for train data
train_list = lapply(partition_result, function(item) {
item$train
})
test_list = lapply(partition_result, function(item) {
item$test
})
# form fold and repeat lists
fld = rep(1:folds, repeats)
fld = as.list(fld)
reps = as.list(1:repeats)
reps = lapply(reps, rep, folds)
reps = unlist(reps)
reps = as.list(reps)
# form folds repeats data frames
train_df = mapply(fold_rep_train, train_list, fld, reps)
train_df = as.data.frame(train_df)
train_df = lapply(train_df, as.data.frame, stringsAsFactors = F)
train_df = do.call(rbind, train_df)
test_df = mapply(fold_rep_test, test_list, fld, reps)
test_df = as.data.frame(test_df)
test_df = lapply(test_df, as.data.frame, stringsAsFactors = F)
test_df = do.call(rbind, test_df)
final_result = list(train = train_df, test = test_df)
final_result = lapply(final_result, function(x) {
split(data.table::as.data.table(x), by = c("Fold", "Rep"))
})
########## partition data for repeats_extra########## ##########
partition_result_extra = pairedCaseControlTestingTraining(data = y_data, group_identifier = group_identifier, pair_identifier = pair_identifier,
folds = folds, repeats = repeats)
# rearrrange partition_result data into two tibble data frames, one for test data one for train data
train_list = lapply(partition_result_extra, function(item) {
item$train
})
test_list = lapply(partition_result_extra, function(item) {
item$test
})
# form fold and repeat lists
fld = rep(1:folds, repeats_extra)
fld = as.list(fld)
reps = as.list(1:repeats_extra)
reps = lapply(reps, rep, folds)
reps = unlist(reps)
reps = as.list(reps)
# form folds repeats data frames
train_df = mapply(fold_rep_train, train_list, fld, reps)
train_df = as.data.frame(train_df)
train_df = lapply(train_df, as.data.frame, stringsAsFactors = F)
train_df = do.call(rbind, train_df)
test_df = mapply(fold_rep_test, test_list, fld, reps)
test_df = as.data.frame(test_df)
test_df = lapply(test_df, as.data.frame, stringsAsFactors = F)
test_df = do.call(rbind, test_df)
final_result_extra = list(train = train_df, test = test_df)
final_result_extra = lapply(final_result_extra, function(x) {
split(data.table::as.data.table(x), by = c("Fold", "Rep"))
})
}
attr(data_object, "partition_info") = final_result
attr(data_object, "extra_partitions") = final_result_extra
attr(data_object, "foldrep") = list(folds = folds, repeats = repeats)
return(data_object)
}
pureRandomTestingTraining <- function(data, group_identifier, folds = 4, repeats = 100, holdout_perc = 0.25) {
groups <- data[, group_identifier]
seed <- 42
#------ determine training indeces ----------#
# returns a k x repeats size list of training indeces set seed for reproducability purposes
set.seed(42)
training_ind_list <- caret::createMultiFolds(y = groups, k = folds, times = repeats)
#------ diff data and train to return testing indeces -----#
# returns a k x repeats size list of grouped testing and training indeces
ind_list <- lapply(training_ind_list, function(training_inds) {
partition_list <- list()
partition_list$train <- training_inds
partition_list$test <- setdiff(1:nrow(data), training_inds)
return(partition_list)
})
return(ind_list)
}
pairedCaseControlTestingTraining <- function(data, group_identifier, pair_identifier, folds = 4, repeats = 100, holdout_set = NULL) {
tuning_set <- FALSE
seed <- 42
#------- create a map to keep orignal indexing in tact -----#
data$old_index <- 1:nrow(data)
#------- remove the holdout set if there is one ---------#
if (length(holdout_set) > 0) {
data <- data[-which(data[, pair_identifier] %in% holdout_set), ]
}
#------- use map to find unique pairs -----#
singled_pair_subset <- data %>% dplyr::arrange_(group_identifier) %>% dplyr::distinct_(pair_identifier, .keep_all = TRUE) #keep only unique identifiers for training subset
singled_pair_subset[1:nrow(singled_pair_subset)/2, group_identifier] <- 1 # convert half the labes to zeros for caret's balancing act
index_map <- data.frame(old = singled_pair_subset$old_index, new = 1:nrow(singled_pair_subset), pair = singled_pair_subset[, pair_identifier])
rownames(index_map) <- index_map$new
#------ vector of group labels to split ------#
group <- singled_pair_subset[, group_identifier]
#------ determine training indeces ----------#
# returns a k x repeats size list of training indeces
training_ind_list <- caret::createMultiFolds(y = group, k = folds, times = repeats)
#------- match case-control pairs --------#
ind_list <- lapply(training_ind_list, function(x) {
pairs <- index_map[x, "pair"]
partition_list <- list()
partition_list$train <- data[which(data[, pair_identifier] %in% pairs), "old_index"]
partition_list$test <- data[which(!data[, pair_identifier] %in% pairs), "old_index"]
return(partition_list)
})
#------ diff data and train to return testing indeces -----#
# returns a k x repeats size list of grouped testing and training indeces ind_list <- lapply(training_ind_list_mapped,
# function(training_inds){ pairs <- index_map[x, 'pair'] partition_list <- list() partition_list$train <- training_inds
# partition_list$test <- data[which(!data[,pair_identifier] %in% pairs), 'old_index'] return(partition_list) })
}
# folds repeats helper functions for training data and test data
fold_rep_train = function(train_vec, fold, repp) {
result = data.frame(Fold = rep(fold, length(train_vec)), Rep = rep(repp, length(train_vec)), Train = train_vec)
return(result)
}
fold_rep_test = function(train_vec, fold, repp) {
result = data.frame(Fold = rep(fold, length(train_vec)), Rep = rep(repp, length(train_vec)), Test = train_vec)
return(result)
}
|
library(Hmisc)
library(caret)
library(dplyr)
library(ggplot2)
library(data.table)
# ํ์ผ ๋ถ๋ฌ์ค๊ธฐ
test <- read.csv("test.csv")
train <- read.csv("train.csv")
train <- train %>% filter(!is.na(Upc))
# ๋ฐ์ดํฐ ํ์ธ
head(train)
describe(train)
str(train)
table(train$TripType)
table(train$Upc)
table(train$DepartmentDescription)
summary(train)
boxplot(train$ScanCount)
table(train$ScanCount)
hist(train$VisitNumber)
table(train$VisitNumber)
train %>% filter(TripType == 39) %>% select(VisitNumber)
train %>% filter(ScanCount < -4)
train %>% filter(is.na(Upc))
train %>% filter(VisitNumber == 8)
train %>% filter(DepartmentDescription == 'PHARMACY RX')
train %>% filter(ScanCount > 19) %>% arrange(TripType, VisitNumber, ScanCount)
train %>% filter(FinelineNumber == 9998)
train %>% filter(TripType == 999)
train %>% filter(FinelineNumber == 1000)
train %>% filter(floor(log10(train$Upc))+1 == 12)
# DepartmentDescription๊ณผ Weekday์์ ๊ด๊ณ ํ์ธ
week_depart <- list()
wd_data <- tapply(train$DepartmentDescription, train$Weekday, table)
for(i in 1 : NROW(unique(train$Weekday))){
week_depart <- c(week_depart, list(sort(wd_data[[i]], decreasing = T)))
}
names(week_depart) <- names(wd_data)
sort_week_depart <- c(week_depart['Monday'], week_depart['Tuesday'],
week_depart['Wednesday'], week_depart['Thursday'],
week_depart['Friday'], week_depart['Saturday'],
week_depart['Sunday'])
# DepartmentDescription๊ณผ TripType๊ณผ์ ๊ด๊ณ
triptype_depart <- list()
trde_data <- tapply(train$DepartmentDescription, train$TripType, table)
for(i in 1 : NROW(unique(train$TripType))){
triptype_depart <- c(triptype_depart, list(sort(trde_data[[i]], decreasing = T)))
}
names(triptype_depart) <- names(trde_data)
trde_top10 <- list()
for(i in 1 : NROW(unique(train$TripType))){
trde_top10 <- c(trde_top10, list(triptype_depart[[i]][1 : 10]))
}
names(trde_top10) <- names(trde_data)
trde_top10_plot_data <- train %>%
group_by(TripType, DepartmentDescription) %>%
summarise(n = n()) %>%
arrange(TripType, desc(n))
trde_top5_plot_result <- c()
for(i in unique(trde_top10_plot_data$TripType)){
trde_top5_plot_result <- rbind(trde_top5_plot_result,
trde_top10_plot_data %>% filter(TripType == i) %>% head(5))
}
mosaicplot(~ DepartmentDescription + TripType, data = trde_top5_plot_result, las = 1)
# Read Data File
test <- read.csv("test.csv")
train <- read.csv("train.csv")
train <- train %>% filter(!is.na(Upc))
options(scipen = 100)
# UPC 12์๋ฆฌ ์ฑ์ฐ๊ธฐ
# First : 11์๋ฆฌ๊น์ง ์ฑ์ฐ๊ธฐ(0์ผ๋ก)
Upc12 <- matrix(0, nrow = nrow(train), ncol = 1)
ZeroFillFunc <- function(x, y){
zeros <- c()
zeroc <- 11 - (floor(log10(x)) + 1)
for(j in 1 : zeroc){
zeros <- paste0(zeros, '0')
}
return (paste0(zeros, x))
}
for(i in 1 : nrow(train)){
Upc12[i] <- ifelse(floor(log10(train$Upc[i])) > 9, train$Upc[i],
ZeroFillFunc(train$Upc[i]))
}
Upc12 <- as.vector(Upc12)
# Second : CheckSumDigit ๋ง๋ค๊ธฐ
MakeCheckSumDigitFunc <- function(x){
odds_sum <- 0
evens_sum <- 0
digit11 <- ""
split_upc <- strsplit(x, "")[[1]]
for(i in 1 : 11){
digit11 <- paste0(digit11, split_upc[i])
if (i %% 2 == 1) {
odds_sum <- odds_sum + as.numeric(split_upc[i])
}else{
evens_sum <- evens_sum + as.numeric(split_upc[i])
}
}
total_sum <- odds_sum * 3 + evens_sum
sum_result <- ifelse(total_sum %% 10 == 0, 0, (10 - (total_sum %% 10)))
return (paste0(digit11, sum_result))
}
Upc12 <- sapply(Upc12, MakeCheckSumDigitFunc)
names(Upc12) <- NULL
# Final : Train๋ฐ์ดํฐ์ ํฉ์น๊ธฐ
train <- data.frame(train, Upc12)
head(train, 20)
colnames(train)[8] <- "UPC"
train$Upc <- NULL
# train Copy / Divide Company Code / Divide Product Code
cptrain <- train
MakeCompanyCodeFunc <- function(x){
ccode <- ""
upc <- strsplit(as.character(x), "")[[1]]
for(i in 1 : 6){
ccode <- paste0(ccode, upc[i])
}
return (ccode)
}
Upc6 <- sapply(cptrain$UPC, MakeCompanyCodeFunc)
cptrain <- data.frame(cptrain, Upc6)
colnames(cptrain)[8] <- "CompanyCode"
cptrain$UPC <- NULL
cptrain$CompanyCode <- as.character(cptrain$CompanyCode)
write.csv(cptrain, "htrain.csv", row.names = F)
pUpc <- matrix(0, nrow = nrow(cptrain), ncol = 5)
for(i in 1 : 5){
pUpc[, i] <- sapply(cptrain$UPC, function(x){
pcode <- ""
upc <- strsplit(as.character(x), "")[[1]]
for(j in (7 + (i - 1)) : 11){
pcode <- paste0(pcode, upc[j])
}
return (pcode)
})
}
cptrain <- data.frame(cptrain, pUpc)
colnames(cptrain)[8 : 12] <- c("ProductCode5", "ProductCode4", "ProductCode3",
"ProductCode2", "ProductCode1")
for(i in 8 : 12){
cptrain[, i] <- as.character(cptrain[, i])
}
write.csv(cptrain, "ptrain.csv", row.names = F)
write.table(cptrain, "ptrain.txt", sep = ",", col.names = T)
tt <- fread("ptrain.csv")
str(cptrain)
head(tt)
# Read Refined Data & Set Company Code len 6
library(data.table)
library(dplyr)
library(caret)
rtrain <- fread("htrain.csv", stringsAsFactors = T)
rtrain <- as.data.frame(rtrain)
rtrain <- rtrain %>% filter(TripType %in% c(3 : 20))
rtrain$TripType <- as.factor(rtrain$TripType)
rtrain <- rtrain %>% arrange(DepartmentDescription, FinelineNumber)
t <- rtrain %>% select(DepartmentDescription, FinelineNumber) %>%
arrange(DepartmentDescription, FinelineNumber) %>% distinct()
t$defi <- 1 : nrow(t)
rtrain <- merge(rtrain, t, by = c("DepartmentDescription", "FinelineNumber"), all.x = T)
# Divide Train / Validation
sample_idx <- rtrain %>% sample_frac(0.7) %>% rownames() %>% as.numeric()
rtrain_train <- rtrain[sample_idx, ]
rtrain_val <- rtrain[-sample_idx, ]
# Multinomial Logistic Regression
library(nnet)
mlr_model <- multinom(TripType ~ ., data = rtrain_train)
str(rtrain)
summary(mlr_model)
table(fitted(mlr_model))
# Decision Tree
library(party)
tree_control <- ctree_control(mincriterion = 0.9, minsplit = 10, maxdepth = 6)
tree_model <- ctree(TripType ~ ., data = rtrain_train)
plot(tree_model)
tree_pred <- predict(tree_model, newdata = rtrain_val)
confusionMatrix(tree_pred, rtrain_val$TripType_3)
# RandomForest
library(randomForest)
rf_model <- randomForest(TripType ~ ScanCount, data = rtrain_train)
rf_pred <- predict(rf_model, newdata = rtrain_val)
confusionMatrix(rf_pred, rtrain_val$TripType)
rf_model <- randomForest(TripType ~ VisitNumber, data = rtrain_train,
ntree = 200, importance = TRUE, do.trace = TRUE)
rf_pred <- predict(rf_model, newdata = rtrain_val)
rf_cfm <- confusionMatrix(rf_pred, rtrain_val$TripType)
rf_imp <- importance(rf_model)
plot(rf_model)
rf_model1 <- randomForest(TripType ~ VisitNumber + FinelineNumber, data = rtrain_train,
ntree = 200, importance = T, do.trace = T)
rf_pred1 <- predict(rf_model1, newdata = rtrain_val)
rf_cfm1 <- confusionMatrix(rf_pred1, rtrain_val$TripType)
rf_imp <- importance(rf_model1)
varImpPlot(rf_model1)
rf_model2 <- randomForest(TripType ~ VisitNumber + Weekday, data = rtrain_train,
ntree = 100, importance = T, do.trace = T)
rf_pred2 <- predict(rf_model2, newdata = rtrain_val)
rf_cfm <- confusionMatrix(rf_pred2, rtrain_val$TripType)
levels(rtrain_train$DepartmentDescription) <- c(1 : 68)
table(rtrain_train$DepartmentDescription)
rtrain_train$DepartmentDescription <- as.numeric(rtrain_train$DepartmentDescription)
levels(rtrain_val$DepartmentDescription) <- c(1 : 68)
table(rtrain_val$DepartmentDescription)
rtrain_val$DepartmentDescription <- as.numeric(rtrain_val$DepartmentDescription)
rf_model3 <- randomForest(TripType ~ VisitNumber + DepartmentDescription, data = rtrain_train,
ntree = 100, importance = T, do.trace = T)
rf_pred3 <- predict(rf_model3, newdata = rtrain_val)
rf_cfm3 <- confusionMatrix(rf_pred3, rtrain_val$TripType)
rf_model4 <- randomForest(TripType ~ DepartmentDescription, data = rtrain_train,
ntree = 100, importance = T, do.trace = T)
rf_pred4 <- predict(rf_model4, rtrain_val)
rf_cfm <- confusionMatrix(rf_pred4, rtrain_val$TripType)
rtrain_train <- rtrain_train %>% arrange(DepartmentDescription, FinelineNumber)
t <- rtrain_train %>%
select(DepartmentDescription, FinelineNumber) %>%
group_by(DepartmentDescription, FinelineNumber) %>%
arrange(DepartmentDescription, FinelineNumber) %>%
unique()
rtrain_train <- merge(rtrain_train, t, by = c("DepartmentDescription", "FinelineNumber"), all.x = T)
rtrain_val <- merge(rtrain_val, t, by = c("DepartmentDescription", "FinelineNumber"), all.x = T)
rf_model5 <- randomForest(TripType ~ VisitNumber + defi, data = rtrain_train,
ntree = 200, importance = T, do.trace = T)
rf_pred5 <- predict(rf_model5, newdata = rtrain_val)
rf_cfm5 <- confusionMatrix(rf_pred5, rtrain_val$TripType)
rf_imp5 <- importance(rf_model5)
varImpPlot(rf_model5)
t1 <- rtrain_train %>%
select(defi, CompanyCode) %>%
group_by(defi, CompanyCode) %>%
arrange(defi, CompanyCode) %>%
unique()
rtrain_train %>% filter(TripType %in% c(7, 8)) %>% head(20)
t1$CompanyCode <- as.character(t1$CompanyCode)
t1 <- t1 %>% arrange(CompanyCode, defi)
t1$defico <- 1 : nrow(t1)
rtrain_train <- merge(rtrain_train, t1, by = c("defi", "CompanyCode"), all.x = T)
rtrain_val <- merge(rtrain_val, t1, by = c("defi", "CompanyCode"), all.x = T)
rf_model6 <- randomForest(TripType ~ VisitNumber + defico, data = rtrain_train,
ntree = 200, importance = T, do.trace = T)
rf_pred6 <- predict(rf_model6, newdata = rtrain_val)
rf_cfm6 <- confusionMatrix(rf_pred6, rtrain_val$TripType)
# Make New DataSet
head(train)
sumscancount <- train %>%
filter(ScanCount > 0) %>%
group_by(VisitNumber) %>%
summarise(sumscancount = sum(ScanCount))
minuscancount <- train %>%
filter(ScanCount < 0) %>%
group_by(VisitNumber) %>%
summarise(n = n())
colnames(minuscancount) <- c("id", "RefundCount")
maxperdesc <- train %>%
group_by(VisitNumber, DepartmentDescription) %>%
summarise(n = n())
maxperdesc1 <- train %>%
group_by(VisitNumber) %>%
summarise(n = n())
maxperdescs <- merge(maxperdesc, maxperdesc1, by = "VisitNumber", all.x = T)
maxperdesc <- maxperdescs %>%
group_by(VisitNumber) %>%
summarise(maxdesc = max(n.x),
totaldesc = mean(n.y),
per = round(maxdesc / totaldesc, 3))
newdata <- read.csv("newdata.csv")
newdata <- merge(newdata, minuscancount, by = "id", all.x = T)
newdata$RefundCount <- ifelse(is.na(newdata$RefundCount), 0, newdata$RefundCount)
colnames(maxperdesc)[1] <- "id"
newdata <- merge(newdata, maxperdesc, by = "id", all.x = T)
head(newdata)
colnames(newdata)[5 : 7] <- c("MaxDescCount", "TotalDescCount", "max_prodrate")
write.csv(newdata, "newdata.csv", row.names = F)
library(dummies)
tt <- dummy(train$DepartmentDescription)
train <- data.frame(train, tt)
train$DepartmentDescription <- NULL
colnames(train)
head(tt)
library(dplyr)
# ๋ฐ์ดํฐ ์ฝ์ด์ค๊ธฐ & NA ์ง์ฐ๊ธฐ
train <- read.csv("train.csv")
train <- train %>% filter(!is.na(Upc))
# VisitNumber์ DepartmentDescription ๊ทธ๋ฃน์ผ๋ก ๋ฌถ์ด์
# ๊ฐ ๊ทธ๋ฃน์ ๋ํ ์ด ๊ฐฏ์ ๋ฐ์ดํฐ ์
๋ง๋ค๊ธฐ -- ใ
descdummy <- train %>%
group_by(VisitNumber, DepartmentDescription) %>%
summarise(sumcategory = n())
# DepartmentDescription ๋๋ฏธ๋ณ์๋ฅผ ๋ด์ ๋ฐ์ดํฐํ๋ ์ ๊ณต๊ฐ ๋ง๋ค๊ธฐ
# column ์ด๋ฆ ๋ณ๊ฒฝํด์ฃผ๊ธฐ
departdummy <- data.frame(unique(train$VisitNumber),
matrix(0, nrow = length(unique(train$VisitNumber)),
ncol = length(levels(train$DepartmentDescription))))
colnames(departdummy)[1] <- "id"
colnames(departdummy)[2 : ncol(departdummy)] <- levels(train$DepartmentDescription)
# ์ด ์ ์ฒ๋ฆฌ ๋ฐฉ์์ ํต์ฌ์ DepartmentDescription์ ๋ ๋ฒจ์ 1๋ถํฐ 69๊น์ง๋ก ๋ณ๊ฒฝ ํ์
# ๊ทธ ์ซ์ + 1์ ํด๋นํ๋ ์ธ๋ฑ์ค์ ์์ ใ ๋ถ๋ถ์์ ๊ตฌํ ์ด ๊ฐฏ์๋ฅผ ๋ฃ์ด์ฃผ๋ ๊ฒ
# ๋ํ, factorํ์์ ์ฌ์น์ฐ์ฐ์ ๋ํด์ ์๋ฏธ๊ฐ ์๊ธฐ ๋๋ฌธ์
# ์ด๋ฅผ numeric์ผ๋ก ๋ณ๊ฒฝ ํ์ ์ฐ์ฐ์ ์งํ
levels(descdummy$DepartmentDescription) <- 1 : length(levels(descdummy$DepartmentDescription))
descdummy$DepartmentDescription <- as.numeric(descdummy$DepartmentDescription)
departindex <- 1 # departdummy(๋ฐ์ดํฐ๋ฅผ ์ ์ฅํด์ผํ ๋ณ์)์ ํ์ ์ธ๋ฑ์ค
descindex <- 1 # descdummy(ใ ์ ๊ทธ๋ฃน์ ๋ํ ์ด ๊ฐฏ์๊ฐ ๋ด๊ธด ๋ณ์)์ ํ์ ์ธ๋ฑ์ค
repeat{
# ์๋ก์ VisitNumber๋ฅผ ๋น๊ต,
# departdummy์ id๊ฐ VisitNumber(์ด์ด๋ฆ๋ง ์์์ ๋ฐ๊พผ๊ฒ)
# why? ๋ฐ์์ ๋ฐ์ดํฐ ์
๊ณผ ์กฐ์ธํ๊ธฐ์ํด ์ด๋ฆ์ id๋ก ๋ณ๊ฒฝ ํด์ค๊ฒ.
if(descdummy$VisitNumber[descindex] == departdummy$id[departindex]){
# ์์์ ๊ฐ๋จํ ์ค๋ช
ํ๋๋ก VisitNumber๊ฐ ๊ฐ๋ค๋ฉด ์ด ๊ฐฏ์ ๋ฐ์ดํฐ๋ฅผ
# departmentdescription์ ๊ฐ + 1์ ์ธ๋ฑ์ค์ ์ฝ์
# ๊ทธ๋ฆฌ๊ณ ๋์ ์ด ๊ฐฏ์ ๋ฐ์ดํฐ๊ฐ ๋ด๊ธด descindex์ ๊ฐ์ 1์ฆ๊ฐํ์ฌ ์๋ ํ์ผ๋ก ์ด๋
departdummy[departindex,
descdummy$DepartmentDescription[descindex] + 1] <- descdummy$sumcategory[descindex]
descindex <- descindex + 1
}else{
# ๋ง์ฝ VisitNumber๊ฐ ๋ค๋ฅด๋ค๋ฉด ์ฐ๋ฆฌ๊ฐ ๋ด๊ณ ์ ํ๋ departdummy์ ์ธ๋ฑ์ค๋ฅผ 1 ์ฆ๊ฐ
departindex <- departindex + 1
}
print(paste(departindex, descindex))
if(descindex == (nrow(descdummy) + 1)){
break
}
}
# ๋ฐ์ดํฐ ์
๋ถ๋ฌ์จ ํ ๋๋ฏธ๋ณ์์ ์กฐ์ธ์ํจํ์ ์ ์ฅ
newdata <- read.csv("newdata.csv")
newdata <- merge(newdata, departdummy, by = "id", all.x = T)
write.csv(newdata, "newdata.csv", row.names = F)
colnames(newdata)
# TripType - a categorical id representing the type of shopping trip the customer made.
# This is the ground truth that you are predicting.
# TripType_999 is an "other" category.
# VisitNumber - an id corresponding to a single trip by a single customer
# Weekday - the weekday of the trip
# Upc - the UPC number of the product purchased
# ScanCount - the number of the given item that was purchased.
# A negative value indicates a product return.
# DepartmentDescription - a high-level description of the item's department
# FinelineNumber - a more refined category for each of the products, created by Walmart
# upc ์ค๋ช
: https://www.kaggle.com/c/walmart-recruiting-trip-type-classification/discussion/18158
# https://www.kaggle.com/c/walmart-recruiting-trip-type-classification/discussion/18163
# https://www.kaggle.com/c/walmart-recruiting-trip-type-classification/discussion/30345
# https://en.wikipedia.org/wiki/Check_digit | /Walmart.R | no_license | SubAkBa/Kaggle_Walmart | R | false | false | 14,666 | r | library(Hmisc)
library(caret)
library(dplyr)
library(ggplot2)
library(data.table)
# ํ์ผ ๋ถ๋ฌ์ค๊ธฐ
test <- read.csv("test.csv")
train <- read.csv("train.csv")
train <- train %>% filter(!is.na(Upc))
# ๋ฐ์ดํฐ ํ์ธ
head(train)
describe(train)
str(train)
table(train$TripType)
table(train$Upc)
table(train$DepartmentDescription)
summary(train)
boxplot(train$ScanCount)
table(train$ScanCount)
hist(train$VisitNumber)
table(train$VisitNumber)
train %>% filter(TripType == 39) %>% select(VisitNumber)
train %>% filter(ScanCount < -4)
train %>% filter(is.na(Upc))
train %>% filter(VisitNumber == 8)
train %>% filter(DepartmentDescription == 'PHARMACY RX')
train %>% filter(ScanCount > 19) %>% arrange(TripType, VisitNumber, ScanCount)
train %>% filter(FinelineNumber == 9998)
train %>% filter(TripType == 999)
train %>% filter(FinelineNumber == 1000)
train %>% filter(floor(log10(train$Upc))+1 == 12)
# DepartmentDescription๊ณผ Weekday์์ ๊ด๊ณ ํ์ธ
week_depart <- list()
wd_data <- tapply(train$DepartmentDescription, train$Weekday, table)
for(i in 1 : NROW(unique(train$Weekday))){
week_depart <- c(week_depart, list(sort(wd_data[[i]], decreasing = T)))
}
names(week_depart) <- names(wd_data)
sort_week_depart <- c(week_depart['Monday'], week_depart['Tuesday'],
week_depart['Wednesday'], week_depart['Thursday'],
week_depart['Friday'], week_depart['Saturday'],
week_depart['Sunday'])
# DepartmentDescription๊ณผ TripType๊ณผ์ ๊ด๊ณ
triptype_depart <- list()
trde_data <- tapply(train$DepartmentDescription, train$TripType, table)
for(i in 1 : NROW(unique(train$TripType))){
triptype_depart <- c(triptype_depart, list(sort(trde_data[[i]], decreasing = T)))
}
names(triptype_depart) <- names(trde_data)
trde_top10 <- list()
for(i in 1 : NROW(unique(train$TripType))){
trde_top10 <- c(trde_top10, list(triptype_depart[[i]][1 : 10]))
}
names(trde_top10) <- names(trde_data)
trde_top10_plot_data <- train %>%
group_by(TripType, DepartmentDescription) %>%
summarise(n = n()) %>%
arrange(TripType, desc(n))
trde_top5_plot_result <- c()
for(i in unique(trde_top10_plot_data$TripType)){
trde_top5_plot_result <- rbind(trde_top5_plot_result,
trde_top10_plot_data %>% filter(TripType == i) %>% head(5))
}
mosaicplot(~ DepartmentDescription + TripType, data = trde_top5_plot_result, las = 1)
# Read Data File
test <- read.csv("test.csv")
train <- read.csv("train.csv")
train <- train %>% filter(!is.na(Upc))
options(scipen = 100)
# UPC 12์๋ฆฌ ์ฑ์ฐ๊ธฐ
# First : 11์๋ฆฌ๊น์ง ์ฑ์ฐ๊ธฐ(0์ผ๋ก)
Upc12 <- matrix(0, nrow = nrow(train), ncol = 1)
ZeroFillFunc <- function(x, y){
zeros <- c()
zeroc <- 11 - (floor(log10(x)) + 1)
for(j in 1 : zeroc){
zeros <- paste0(zeros, '0')
}
return (paste0(zeros, x))
}
for(i in 1 : nrow(train)){
Upc12[i] <- ifelse(floor(log10(train$Upc[i])) > 9, train$Upc[i],
ZeroFillFunc(train$Upc[i]))
}
Upc12 <- as.vector(Upc12)
# Second : CheckSumDigit ๋ง๋ค๊ธฐ
MakeCheckSumDigitFunc <- function(x){
odds_sum <- 0
evens_sum <- 0
digit11 <- ""
split_upc <- strsplit(x, "")[[1]]
for(i in 1 : 11){
digit11 <- paste0(digit11, split_upc[i])
if (i %% 2 == 1) {
odds_sum <- odds_sum + as.numeric(split_upc[i])
}else{
evens_sum <- evens_sum + as.numeric(split_upc[i])
}
}
total_sum <- odds_sum * 3 + evens_sum
sum_result <- ifelse(total_sum %% 10 == 0, 0, (10 - (total_sum %% 10)))
return (paste0(digit11, sum_result))
}
Upc12 <- sapply(Upc12, MakeCheckSumDigitFunc)
names(Upc12) <- NULL
# Final : Train๋ฐ์ดํฐ์ ํฉ์น๊ธฐ
train <- data.frame(train, Upc12)
head(train, 20)
colnames(train)[8] <- "UPC"
train$Upc <- NULL
# train Copy / Divide Company Code / Divide Product Code
cptrain <- train
MakeCompanyCodeFunc <- function(x){
ccode <- ""
upc <- strsplit(as.character(x), "")[[1]]
for(i in 1 : 6){
ccode <- paste0(ccode, upc[i])
}
return (ccode)
}
Upc6 <- sapply(cptrain$UPC, MakeCompanyCodeFunc)
cptrain <- data.frame(cptrain, Upc6)
colnames(cptrain)[8] <- "CompanyCode"
cptrain$UPC <- NULL
cptrain$CompanyCode <- as.character(cptrain$CompanyCode)
write.csv(cptrain, "htrain.csv", row.names = F)
pUpc <- matrix(0, nrow = nrow(cptrain), ncol = 5)
for(i in 1 : 5){
pUpc[, i] <- sapply(cptrain$UPC, function(x){
pcode <- ""
upc <- strsplit(as.character(x), "")[[1]]
for(j in (7 + (i - 1)) : 11){
pcode <- paste0(pcode, upc[j])
}
return (pcode)
})
}
cptrain <- data.frame(cptrain, pUpc)
colnames(cptrain)[8 : 12] <- c("ProductCode5", "ProductCode4", "ProductCode3",
"ProductCode2", "ProductCode1")
for(i in 8 : 12){
cptrain[, i] <- as.character(cptrain[, i])
}
write.csv(cptrain, "ptrain.csv", row.names = F)
write.table(cptrain, "ptrain.txt", sep = ",", col.names = T)
tt <- fread("ptrain.csv")
str(cptrain)
head(tt)
# Read Refined Data & Set Company Code len 6
library(data.table)
library(dplyr)
library(caret)
rtrain <- fread("htrain.csv", stringsAsFactors = T)
rtrain <- as.data.frame(rtrain)
rtrain <- rtrain %>% filter(TripType %in% c(3 : 20))
rtrain$TripType <- as.factor(rtrain$TripType)
rtrain <- rtrain %>% arrange(DepartmentDescription, FinelineNumber)
t <- rtrain %>% select(DepartmentDescription, FinelineNumber) %>%
arrange(DepartmentDescription, FinelineNumber) %>% distinct()
t$defi <- 1 : nrow(t)
rtrain <- merge(rtrain, t, by = c("DepartmentDescription", "FinelineNumber"), all.x = T)
# Divide Train / Validation
sample_idx <- rtrain %>% sample_frac(0.7) %>% rownames() %>% as.numeric()
rtrain_train <- rtrain[sample_idx, ]
rtrain_val <- rtrain[-sample_idx, ]
# Multinomial Logistic Regression
library(nnet)
mlr_model <- multinom(TripType ~ ., data = rtrain_train)
str(rtrain)
summary(mlr_model)
table(fitted(mlr_model))
# Decision Tree
library(party)
tree_control <- ctree_control(mincriterion = 0.9, minsplit = 10, maxdepth = 6)
tree_model <- ctree(TripType ~ ., data = rtrain_train)
plot(tree_model)
tree_pred <- predict(tree_model, newdata = rtrain_val)
confusionMatrix(tree_pred, rtrain_val$TripType_3)
# RandomForest
library(randomForest)
rf_model <- randomForest(TripType ~ ScanCount, data = rtrain_train)
rf_pred <- predict(rf_model, newdata = rtrain_val)
confusionMatrix(rf_pred, rtrain_val$TripType)
rf_model <- randomForest(TripType ~ VisitNumber, data = rtrain_train,
ntree = 200, importance = TRUE, do.trace = TRUE)
rf_pred <- predict(rf_model, newdata = rtrain_val)
rf_cfm <- confusionMatrix(rf_pred, rtrain_val$TripType)
rf_imp <- importance(rf_model)
plot(rf_model)
rf_model1 <- randomForest(TripType ~ VisitNumber + FinelineNumber, data = rtrain_train,
ntree = 200, importance = T, do.trace = T)
rf_pred1 <- predict(rf_model1, newdata = rtrain_val)
rf_cfm1 <- confusionMatrix(rf_pred1, rtrain_val$TripType)
rf_imp <- importance(rf_model1)
varImpPlot(rf_model1)
rf_model2 <- randomForest(TripType ~ VisitNumber + Weekday, data = rtrain_train,
ntree = 100, importance = T, do.trace = T)
rf_pred2 <- predict(rf_model2, newdata = rtrain_val)
rf_cfm <- confusionMatrix(rf_pred2, rtrain_val$TripType)
levels(rtrain_train$DepartmentDescription) <- c(1 : 68)
table(rtrain_train$DepartmentDescription)
rtrain_train$DepartmentDescription <- as.numeric(rtrain_train$DepartmentDescription)
levels(rtrain_val$DepartmentDescription) <- c(1 : 68)
table(rtrain_val$DepartmentDescription)
rtrain_val$DepartmentDescription <- as.numeric(rtrain_val$DepartmentDescription)
rf_model3 <- randomForest(TripType ~ VisitNumber + DepartmentDescription, data = rtrain_train,
ntree = 100, importance = T, do.trace = T)
rf_pred3 <- predict(rf_model3, newdata = rtrain_val)
rf_cfm3 <- confusionMatrix(rf_pred3, rtrain_val$TripType)
rf_model4 <- randomForest(TripType ~ DepartmentDescription, data = rtrain_train,
ntree = 100, importance = T, do.trace = T)
rf_pred4 <- predict(rf_model4, rtrain_val)
rf_cfm <- confusionMatrix(rf_pred4, rtrain_val$TripType)
rtrain_train <- rtrain_train %>% arrange(DepartmentDescription, FinelineNumber)
t <- rtrain_train %>%
select(DepartmentDescription, FinelineNumber) %>%
group_by(DepartmentDescription, FinelineNumber) %>%
arrange(DepartmentDescription, FinelineNumber) %>%
unique()
rtrain_train <- merge(rtrain_train, t, by = c("DepartmentDescription", "FinelineNumber"), all.x = T)
rtrain_val <- merge(rtrain_val, t, by = c("DepartmentDescription", "FinelineNumber"), all.x = T)
rf_model5 <- randomForest(TripType ~ VisitNumber + defi, data = rtrain_train,
ntree = 200, importance = T, do.trace = T)
rf_pred5 <- predict(rf_model5, newdata = rtrain_val)
rf_cfm5 <- confusionMatrix(rf_pred5, rtrain_val$TripType)
rf_imp5 <- importance(rf_model5)
varImpPlot(rf_model5)
t1 <- rtrain_train %>%
select(defi, CompanyCode) %>%
group_by(defi, CompanyCode) %>%
arrange(defi, CompanyCode) %>%
unique()
rtrain_train %>% filter(TripType %in% c(7, 8)) %>% head(20)
t1$CompanyCode <- as.character(t1$CompanyCode)
t1 <- t1 %>% arrange(CompanyCode, defi)
t1$defico <- 1 : nrow(t1)
rtrain_train <- merge(rtrain_train, t1, by = c("defi", "CompanyCode"), all.x = T)
rtrain_val <- merge(rtrain_val, t1, by = c("defi", "CompanyCode"), all.x = T)
rf_model6 <- randomForest(TripType ~ VisitNumber + defico, data = rtrain_train,
ntree = 200, importance = T, do.trace = T)
rf_pred6 <- predict(rf_model6, newdata = rtrain_val)
rf_cfm6 <- confusionMatrix(rf_pred6, rtrain_val$TripType)
# Make New DataSet
head(train)
sumscancount <- train %>%
filter(ScanCount > 0) %>%
group_by(VisitNumber) %>%
summarise(sumscancount = sum(ScanCount))
minuscancount <- train %>%
filter(ScanCount < 0) %>%
group_by(VisitNumber) %>%
summarise(n = n())
colnames(minuscancount) <- c("id", "RefundCount")
maxperdesc <- train %>%
group_by(VisitNumber, DepartmentDescription) %>%
summarise(n = n())
maxperdesc1 <- train %>%
group_by(VisitNumber) %>%
summarise(n = n())
maxperdescs <- merge(maxperdesc, maxperdesc1, by = "VisitNumber", all.x = T)
maxperdesc <- maxperdescs %>%
group_by(VisitNumber) %>%
summarise(maxdesc = max(n.x),
totaldesc = mean(n.y),
per = round(maxdesc / totaldesc, 3))
newdata <- read.csv("newdata.csv")
newdata <- merge(newdata, minuscancount, by = "id", all.x = T)
newdata$RefundCount <- ifelse(is.na(newdata$RefundCount), 0, newdata$RefundCount)
colnames(maxperdesc)[1] <- "id"
newdata <- merge(newdata, maxperdesc, by = "id", all.x = T)
head(newdata)
colnames(newdata)[5 : 7] <- c("MaxDescCount", "TotalDescCount", "max_prodrate")
write.csv(newdata, "newdata.csv", row.names = F)
library(dummies)
tt <- dummy(train$DepartmentDescription)
train <- data.frame(train, tt)
train$DepartmentDescription <- NULL
colnames(train)
head(tt)
library(dplyr)
# ๋ฐ์ดํฐ ์ฝ์ด์ค๊ธฐ & NA ์ง์ฐ๊ธฐ
train <- read.csv("train.csv")
train <- train %>% filter(!is.na(Upc))
# VisitNumber์ DepartmentDescription ๊ทธ๋ฃน์ผ๋ก ๋ฌถ์ด์
# ๊ฐ ๊ทธ๋ฃน์ ๋ํ ์ด ๊ฐฏ์ ๋ฐ์ดํฐ ์
๋ง๋ค๊ธฐ -- ใ
descdummy <- train %>%
group_by(VisitNumber, DepartmentDescription) %>%
summarise(sumcategory = n())
# DepartmentDescription ๋๋ฏธ๋ณ์๋ฅผ ๋ด์ ๋ฐ์ดํฐํ๋ ์ ๊ณต๊ฐ ๋ง๋ค๊ธฐ
# column ์ด๋ฆ ๋ณ๊ฒฝํด์ฃผ๊ธฐ
departdummy <- data.frame(unique(train$VisitNumber),
matrix(0, nrow = length(unique(train$VisitNumber)),
ncol = length(levels(train$DepartmentDescription))))
colnames(departdummy)[1] <- "id"
colnames(departdummy)[2 : ncol(departdummy)] <- levels(train$DepartmentDescription)
# ์ด ์ ์ฒ๋ฆฌ ๋ฐฉ์์ ํต์ฌ์ DepartmentDescription์ ๋ ๋ฒจ์ 1๋ถํฐ 69๊น์ง๋ก ๋ณ๊ฒฝ ํ์
# ๊ทธ ์ซ์ + 1์ ํด๋นํ๋ ์ธ๋ฑ์ค์ ์์ ใ ๋ถ๋ถ์์ ๊ตฌํ ์ด ๊ฐฏ์๋ฅผ ๋ฃ์ด์ฃผ๋ ๊ฒ
# ๋ํ, factorํ์์ ์ฌ์น์ฐ์ฐ์ ๋ํด์ ์๋ฏธ๊ฐ ์๊ธฐ ๋๋ฌธ์
# ์ด๋ฅผ numeric์ผ๋ก ๋ณ๊ฒฝ ํ์ ์ฐ์ฐ์ ์งํ
levels(descdummy$DepartmentDescription) <- 1 : length(levels(descdummy$DepartmentDescription))
descdummy$DepartmentDescription <- as.numeric(descdummy$DepartmentDescription)
departindex <- 1 # departdummy(๋ฐ์ดํฐ๋ฅผ ์ ์ฅํด์ผํ ๋ณ์)์ ํ์ ์ธ๋ฑ์ค
descindex <- 1 # descdummy(ใ ์ ๊ทธ๋ฃน์ ๋ํ ์ด ๊ฐฏ์๊ฐ ๋ด๊ธด ๋ณ์)์ ํ์ ์ธ๋ฑ์ค
repeat{
# ์๋ก์ VisitNumber๋ฅผ ๋น๊ต,
# departdummy์ id๊ฐ VisitNumber(์ด์ด๋ฆ๋ง ์์์ ๋ฐ๊พผ๊ฒ)
# why? ๋ฐ์์ ๋ฐ์ดํฐ ์
๊ณผ ์กฐ์ธํ๊ธฐ์ํด ์ด๋ฆ์ id๋ก ๋ณ๊ฒฝ ํด์ค๊ฒ.
if(descdummy$VisitNumber[descindex] == departdummy$id[departindex]){
# ์์์ ๊ฐ๋จํ ์ค๋ช
ํ๋๋ก VisitNumber๊ฐ ๊ฐ๋ค๋ฉด ์ด ๊ฐฏ์ ๋ฐ์ดํฐ๋ฅผ
# departmentdescription์ ๊ฐ + 1์ ์ธ๋ฑ์ค์ ์ฝ์
# ๊ทธ๋ฆฌ๊ณ ๋์ ์ด ๊ฐฏ์ ๋ฐ์ดํฐ๊ฐ ๋ด๊ธด descindex์ ๊ฐ์ 1์ฆ๊ฐํ์ฌ ์๋ ํ์ผ๋ก ์ด๋
departdummy[departindex,
descdummy$DepartmentDescription[descindex] + 1] <- descdummy$sumcategory[descindex]
descindex <- descindex + 1
}else{
# ๋ง์ฝ VisitNumber๊ฐ ๋ค๋ฅด๋ค๋ฉด ์ฐ๋ฆฌ๊ฐ ๋ด๊ณ ์ ํ๋ departdummy์ ์ธ๋ฑ์ค๋ฅผ 1 ์ฆ๊ฐ
departindex <- departindex + 1
}
print(paste(departindex, descindex))
if(descindex == (nrow(descdummy) + 1)){
break
}
}
# ๋ฐ์ดํฐ ์
๋ถ๋ฌ์จ ํ ๋๋ฏธ๋ณ์์ ์กฐ์ธ์ํจํ์ ์ ์ฅ
newdata <- read.csv("newdata.csv")
newdata <- merge(newdata, departdummy, by = "id", all.x = T)
write.csv(newdata, "newdata.csv", row.names = F)
colnames(newdata)
# TripType - a categorical id representing the type of shopping trip the customer made.
# This is the ground truth that you are predicting.
# TripType_999 is an "other" category.
# VisitNumber - an id corresponding to a single trip by a single customer
# Weekday - the weekday of the trip
# Upc - the UPC number of the product purchased
# ScanCount - the number of the given item that was purchased.
# A negative value indicates a product return.
# DepartmentDescription - a high-level description of the item's department
# FinelineNumber - a more refined category for each of the products, created by Walmart
# upc ์ค๋ช
: https://www.kaggle.com/c/walmart-recruiting-trip-type-classification/discussion/18158
# https://www.kaggle.com/c/walmart-recruiting-trip-type-classification/discussion/18163
# https://www.kaggle.com/c/walmart-recruiting-trip-type-classification/discussion/30345
# https://en.wikipedia.org/wiki/Check_digit |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1_score.R
\name{log_loss}
\alias{log_loss}
\title{Logarithmic Loss}
\usage{
log_loss(act, pred, allow_inf = FALSE)
}
\arguments{
\item{act}{actual results}
\item{pred}{predicted probabilties}
\item{allow_inf}{pass through infinite loss? \code{FALSE} replaces Inf with large but finite penalties at the extremes. Default \code{FALSE}.}
}
\value{
numeric.
}
\description{
\code{log_loss()} returns the logarithmic loss obtained from a given prediction and known result
}
\details{
The allow_inf parameter controls whether infinite loss is allowed (default is FALSE)
Setting allow_inf to FALSE will cause large but finite penalties at the extremes
}
| /man/log_loss.Rd | no_license | deepfriar/corsicaUtils | R | false | true | 727 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1_score.R
\name{log_loss}
\alias{log_loss}
\title{Logarithmic Loss}
\usage{
log_loss(act, pred, allow_inf = FALSE)
}
\arguments{
\item{act}{actual results}
\item{pred}{predicted probabilties}
\item{allow_inf}{pass through infinite loss? \code{FALSE} replaces Inf with large but finite penalties at the extremes. Default \code{FALSE}.}
}
\value{
numeric.
}
\description{
\code{log_loss()} returns the logarithmic loss obtained from a given prediction and known result
}
\details{
The allow_inf parameter controls whether infinite loss is allowed (default is FALSE)
Setting allow_inf to FALSE will cause large but finite penalties at the extremes
}
|
โ R ์ ํ์ฉํ ๋จธ์ ๋ฌ๋ 11์ฅ - ๋ชจ๋ธ์ฑ๋ฅ๊ฐ์ 5. random forest - ์๋ํ๋
library(caret)
library(C50)
library(irr)
data(iris)
head(iris)
#0.shuffle ์ ๋จผ์ ํฉ๋๋ค.
set.seed(123)
iris_shuffle <- sample(1:150, 150)
iris_shuffle
iris2 <- iris[iris_shuffle,]
iris2
set.seed(123)
in_train <- createDataPartition(iris2$Species, p = 0.75, list = FALSE)
iris_train <- iris2[in_train, ] # ํ๋ จ ๋ฐ์ดํฐ ๊ตฌ์ฑ
iris_test <- iris2[-in_train, ] # ํ
์คํธ ๋ฐ์ดํฐ ๊ตฌ์ฑ
m <- train( Species~ . , data=iris_train, method="rf" )
# ๋๋คํฌ๋ ์คํธ: ์์ฌ๊ฒฐ์ ํธ๋ฆฌ + ์์๋ธ ๊ธฐ๋ฒ
m # ํ๋ํ ๊ฒฐ๊ณผ๋ฅผ ํ์ธํ ์ ์๋ค.
p <- predict( m , iris_test )
table(p, iris_test$Species)
library(gmodels)
y <- CrossTable(iris_test$Species ,p)
sum(y$prop.tbl * diag(3))
| /[R] R ์ ํ์ฉํ ๋จธ์ ๋ฌ๋ 11์ฅ - ๋ชจ๋ธ์ฑ๋ฅ๊ฐ์ 5. random forest - ์๋ํ๋.R | no_license | data-githu/R-MACHINE-LEARNING | R | false | false | 830 | r | โ R ์ ํ์ฉํ ๋จธ์ ๋ฌ๋ 11์ฅ - ๋ชจ๋ธ์ฑ๋ฅ๊ฐ์ 5. random forest - ์๋ํ๋
library(caret)
library(C50)
library(irr)
data(iris)
head(iris)
#0.shuffle ์ ๋จผ์ ํฉ๋๋ค.
set.seed(123)
iris_shuffle <- sample(1:150, 150)
iris_shuffle
iris2 <- iris[iris_shuffle,]
iris2
set.seed(123)
in_train <- createDataPartition(iris2$Species, p = 0.75, list = FALSE)
iris_train <- iris2[in_train, ] # ํ๋ จ ๋ฐ์ดํฐ ๊ตฌ์ฑ
iris_test <- iris2[-in_train, ] # ํ
์คํธ ๋ฐ์ดํฐ ๊ตฌ์ฑ
m <- train( Species~ . , data=iris_train, method="rf" )
# ๋๋คํฌ๋ ์คํธ: ์์ฌ๊ฒฐ์ ํธ๋ฆฌ + ์์๋ธ ๊ธฐ๋ฒ
m # ํ๋ํ ๊ฒฐ๊ณผ๋ฅผ ํ์ธํ ์ ์๋ค.
p <- predict( m , iris_test )
table(p, iris_test$Species)
library(gmodels)
y <- CrossTable(iris_test$Species ,p)
sum(y$prop.tbl * diag(3))
|
\name{get.ISFrag.results}
\alias{get.ISFrag.results}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Summarizes ISFrag Results
}
\description{
Creates a list containing ISFrag parent-fragment trees and feature table containing parent-fragment relationship information.
}
\usage{
get.ISFrag.results(ISF_List, featureTable)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{ISF_List}{
List of tables generated by find.level1() function.
}
\item{featureTable}{
featureTable generated by feature.annotaion() function
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
Returns a list containing [1]: a list of tree objects where each tree contains ISF relationship of each parent feature, [2]: a dataframe of the original feature table containing additional columns denoting each feature's ISF annotation/relationship.
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Sam Shen, Jian Guo, Tao Huan
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
library(ISFrag)
MS1directory <- "X:/Users/Sam_Shen/ISFtest20210127/HILIC(+)/HILIC(+)3/fullscan"
MS2directory <- "X:/Users/Sam_Shen/ISFtest20210127/HILIC(+)/HILIC(+)3/DDA"
type <- "single"
lib_directory <- "E:/SAM"
lib_name <- "convertedLibraryPos.msp"
featureTable <- generate.featuretable(MS1directory = MS1directory, type = type, ppm=10, peakwidth=c(10,120),
mzdiff = 0.01, snthresh = 6, integrate = 1, prefilter = c(3,100), noise = 100, bw = 5, mzwid = 0.015,
max = 100, CAMERA = F)
featureTable <- ms2.tofeaturetable(MS2directory = MS2directory, featureTable = featureTable)
featureTable <- feature.annotation(featureTable = featureTable, lib_directory = lib_directory, lib_name = lib_name, dp = 0.7)
level3 <- find.level3(MS1directory = MS1directory, MS1.files = MS1.files, featureTable = featureTable, type = type)
level2 <- find.level2(ISFtable = level3)
level1 <- find.level1(ISF_putative = level2)
results <- get.ISFrag.results(ISF_List = level1, featureTable = featureTable)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory (show via RShowDoc("KEYWORDS")):
% \keyword{ ~kwd1 }
% \keyword{ ~kwd2 }
% Use only one keyword per line.
% For non-standard keywords, use \concept instead of \keyword:
% \concept{ ~cpt1 }
% \concept{ ~cpt2 }
% Use only one concept per line.
| /man/get.ISFrag.results.Rd | no_license | shen420/ISFrag | R | false | false | 2,593 | rd | \name{get.ISFrag.results}
\alias{get.ISFrag.results}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Summarizes ISFrag Results
}
\description{
Creates a list containing ISFrag parent-fragment trees and feature table containing parent-fragment relationship information.
}
\usage{
get.ISFrag.results(ISF_List, featureTable)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{ISF_List}{
List of tables generated by find.level1() function.
}
\item{featureTable}{
featureTable generated by feature.annotaion() function
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
Returns a list containing [1]: a list of tree objects where each tree contains ISF relationship of each parent feature, [2]: a dataframe of the original feature table containing additional columns denoting each feature's ISF annotation/relationship.
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Sam Shen, Jian Guo, Tao Huan
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
library(ISFrag)
MS1directory <- "X:/Users/Sam_Shen/ISFtest20210127/HILIC(+)/HILIC(+)3/fullscan"
MS2directory <- "X:/Users/Sam_Shen/ISFtest20210127/HILIC(+)/HILIC(+)3/DDA"
type <- "single"
lib_directory <- "E:/SAM"
lib_name <- "convertedLibraryPos.msp"
featureTable <- generate.featuretable(MS1directory = MS1directory, type = type, ppm=10, peakwidth=c(10,120),
mzdiff = 0.01, snthresh = 6, integrate = 1, prefilter = c(3,100), noise = 100, bw = 5, mzwid = 0.015,
max = 100, CAMERA = F)
featureTable <- ms2.tofeaturetable(MS2directory = MS2directory, featureTable = featureTable)
featureTable <- feature.annotation(featureTable = featureTable, lib_directory = lib_directory, lib_name = lib_name, dp = 0.7)
level3 <- find.level3(MS1directory = MS1directory, MS1.files = MS1.files, featureTable = featureTable, type = type)
level2 <- find.level2(ISFtable = level3)
level1 <- find.level1(ISF_putative = level2)
results <- get.ISFrag.results(ISF_List = level1, featureTable = featureTable)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory (show via RShowDoc("KEYWORDS")):
% \keyword{ ~kwd1 }
% \keyword{ ~kwd2 }
% Use only one keyword per line.
% For non-standard keywords, use \concept instead of \keyword:
% \concept{ ~cpt1 }
% \concept{ ~cpt2 }
% Use only one concept per line.
|
# samples that did not sequence well
source("scripts/lab_helpers.R")
# library(ggplot2)
lab <- read_db("Laboratory")
# # get the list of missing samples from amphiprion that have been edited to just Ligation number in text editor using the find: (APCL_\d+)(L\d+) and the replace: \2 and the find: (APCL_)(L\d+) and the replace: \2
# fails <- read.csv("data/lowDP.indv.csv", header = F)
# names(fails) <- "ligation_id"
#
# # find sample_ids and extract ids
# lab <- read_db("Laboratory")
# ligs <- lab %>%
# tbl("ligation") %>%
# collect() %>%
# filter(ligation_id %in% fails$ligation_id) %>%
# select(ligation_id, digest_id, total_reads, retained)
#
# digs <- lab %>%
# tbl("digest") %>%
# collect() %>%
# filter(digest_id %in% ligs$digest_id) %>%
# select(digest_id, extraction_id, quant) %>%
# rename(dig_quant = quant)
#
# digs <- left_join(ligs, digs, by = "digest_id")
# rm(ligs)
#
# sample <- lab %>%
# tbl("extraction") %>%
# collect() %>%
# filter(extraction_id %in% digs$extraction_id) %>%
# select(extraction_id, sample_id, quant, gel) %>%
# rename(extr_quant = quant)
#
# sample <- left_join(digs, sample, by = "extraction_id")
#
# sample <- sample %>%
# select(sample_id, extraction_id, extr_quant, gel, digest_id, dig_quant, ligation_id, total_reads, retained)
#
# # write.csv(sample, "data/problem_samples.csv", row.names = F)
# pull in successful ligations from Rdata
ligs <- readRDS("data/passed_ligs.Rdata")
temp <- lab %>%
tbl("ligation") %>%
filter(ligation_id %in% ligs$ligation_id) %>%
select(ligation_id, digest_id, DNA, retained) %>%
collect()
ligs <- left_join(ligs, temp, by = "ligation_id")
failed_ligs <- lab %>%
tbl("ligation") %>%
filter(!ligation_id %in% ligs$ligation_id) %>%
select(ligation_id, digest_id, DNA, retained) %>%
collect()
temp <- lab %>%
tbl("digest") %>%
filter(digest_id %in% ligs$digest_id) %>%
select(digest_id, extraction_id, quant) %>%
collect()
ligs <- left_join(ligs, temp, by = "digest_id") %>%
rename(dig_quant = quant)
temp <- lab %>%
tbl("digest") %>%
filter(digest_id %in% failed_ligs$digest_id) %>%
select(digest_id, extraction_id, quant) %>%
collect()
failed_ligs <- left_join(failed_ligs, temp, by = "digest_id") %>%
rename(dig_quant = quant)
temp <- lab %>%
tbl("extraction") %>%
filter(extraction_id %in% ligs$extraction_id) %>%
select(extraction_id, quant, sample_id) %>%
collect()
ligs <- left_join(ligs, temp, by = "extraction_id") %>%
rename(extr_quant = quant)
temp <- lab %>%
tbl("extraction") %>%
filter(extraction_id %in% failed_ligs$extraction_id) %>%
select(extraction_id, quant, sample_id) %>%
collect()
failed_ligs <- left_join(failed_ligs, temp, by = "extraction_id") %>%
rename(extr_quant = quant) #1343
# remove any sample_ids from the failed_ligs that are also on the ligs
failed_ligs <- anti_join(failed_ligs, ligs, by = "sample_id") %>% #1052
select(sample_id, extraction_id, extr_quant, digest_id, dig_quant, ligation_id,DNA, retained) %>%
arrange(sample_id)
# remove samples that are not clarkii
removed <- failed_ligs %>%
filter(!grepl("APCL", sample_id))
failed_ligs <- failed_ligs %>%
filter(grepl("APCL", sample_id))
# separate out into low conc digest and regular digest
#
brks<-seq(0,800,50)
# plot concentration of failed seqs
hist(x = ligs$quant,col="blue",breaks=brks,ylim=c(0,150))
hist(x = sample$extr_quant,col="red",breaks=brks,ylim=c(0,150), add = T)
# how many digests with a quant < 10 led to successful sequences?
low_dig_succ <- ligs %>%
filter(ligs$dig_quant < 10)
low_dig_fail <- sample %>%
filter(sample$dig_quant < 10)
# rm(digs)
#
# # make a list of samples that need to be re-processed
# sample <- sample %>%
# select(sample_id, extraction_id, extr_quant, digest_id, dig_quant, ligation_id) %>%
# arrange(sample_id)
#
# # inform the leyte database that these samples failed sequencing
# nam <- sample %>%
# distinct(sample_id)
#
# leyte <- write_db("Leyte")
# clownfish <- dbReadTable(leyte, "clownfish")
# change <- clownfish %>%
# filter(sample_id %in% nam$sample_id)
#
# test <- anti_join(nam, change, by = "sample_id")
#
# # find out if these extractions were ligated successfully
# digs <- lab %>%
# tbl("digest") %>%
# filter(extraction_id %in% sample$extraction_id) %>%
# select(digest_id, extraction_id) %>%
# collect()
#
# ligs <- lab %>%
# tbl("ligation") %>%
# filter(digest_id %in% digs$digest_id,
# !ligation_id %in% sample$ligation_id,
# notes != "PCR failed",
# ligation_id != "L0548"
# ) %>%
# select(ligation_id, digest_id, notes) %>%
# collect()
#
# ligs <- left_join(ligs, digs, by = "digest_id")
# rm(digs)
#
# # remove successful ligations from the samples
# sample <- anti_join(sample, ligs, by = "extraction_id") %>%
# arrange(extr_quant, sample_id)
#
# # write.csv(sample, paste("data/samples_to_retry", Sys.Date(), ".csv", sep = ""), row.names = F)
#
# # plot a histogram of extr_quants
# ggplot(data = sample) +
# geom_histogram(mapping = aes(x=extr_quant), binwidth = 10)
#
# # based on this histogram, maybe do a low concentration enzyme digest with any samples with a quant lower than 10?
#
# low_conc <- sample %>%
# filter(extr_quant < 10)
#
#
# ggplot(data = low_conc) +
# geom_histogram(mapping = aes(x = extr_quant), binwidth = 0.5)
#
# # in order to make a plate of low concentration samples to digest, need to import locations from db
# low_conc <- low_conc %>%
# distinct(extraction_id)
#
# low_conc_plate <- lab %>%
# tbl("extraction") %>%
# filter(extraction_id %in% low_conc$extraction_id) %>%
# select(extraction_id, sample_id, quant, well, plate, notes) %>%
# collect() %>%
# filter(!grepl("empty", notes)) # remove empty wells
#
#
# # get a list of plate names and the count of samples from each plate
# counts <- low_conc_plate %>%
# group_by(plate) %>%
# summarise(samples = n()) %>% # count the number of samples to be digested on each plate
# arrange(plate)
#
# # add a digest plate name
# low_conc_plate <- low_conc_plate %>%
# mutate(dig_plate = "low_conc_plate") %>%
# arrange(extraction_id) # sort by extraction_id
#
# plate_master <- data.frame( row = rep(LETTERS[1:8], 12), col = unlist(lapply(1:12, rep, 8)))
# plate_master <- plate_master %>%
# mutate(dig_well = paste(row, col, sep = ""))
# plate_master <- plate_master[1:nrow(low_conc_plate), ]
# low_conc_plate <- cbind(low_conc_plate, plate_master)
# low_conc_plate <- low_conc_plate %>%
# mutate(col = formatC(as.numeric(col), width = 2, format = "d", flag = "0"))%>%
# arrange(col, row)
# low_conc_plate <- low_conc_plate %>%
# mutate(digest_id = 1:nrow(low_conc_plate))
#
#
#
#
# #
| /scripts/problem_samples.R | no_license | mstuart1/laboratory_old | R | false | false | 6,824 | r | # samples that did not sequence well
source("scripts/lab_helpers.R")
# library(ggplot2)
lab <- read_db("Laboratory")
# # get the list of missing samples from amphiprion that have been edited to just Ligation number in text editor using the find: (APCL_\d+)(L\d+) and the replace: \2 and the find: (APCL_)(L\d+) and the replace: \2
# fails <- read.csv("data/lowDP.indv.csv", header = F)
# names(fails) <- "ligation_id"
#
# # find sample_ids and extract ids
# lab <- read_db("Laboratory")
# ligs <- lab %>%
# tbl("ligation") %>%
# collect() %>%
# filter(ligation_id %in% fails$ligation_id) %>%
# select(ligation_id, digest_id, total_reads, retained)
#
# digs <- lab %>%
# tbl("digest") %>%
# collect() %>%
# filter(digest_id %in% ligs$digest_id) %>%
# select(digest_id, extraction_id, quant) %>%
# rename(dig_quant = quant)
#
# digs <- left_join(ligs, digs, by = "digest_id")
# rm(ligs)
#
# sample <- lab %>%
# tbl("extraction") %>%
# collect() %>%
# filter(extraction_id %in% digs$extraction_id) %>%
# select(extraction_id, sample_id, quant, gel) %>%
# rename(extr_quant = quant)
#
# sample <- left_join(digs, sample, by = "extraction_id")
#
# sample <- sample %>%
# select(sample_id, extraction_id, extr_quant, gel, digest_id, dig_quant, ligation_id, total_reads, retained)
#
# # write.csv(sample, "data/problem_samples.csv", row.names = F)
# pull in successful ligations from Rdata
ligs <- readRDS("data/passed_ligs.Rdata")
temp <- lab %>%
tbl("ligation") %>%
filter(ligation_id %in% ligs$ligation_id) %>%
select(ligation_id, digest_id, DNA, retained) %>%
collect()
ligs <- left_join(ligs, temp, by = "ligation_id")
failed_ligs <- lab %>%
tbl("ligation") %>%
filter(!ligation_id %in% ligs$ligation_id) %>%
select(ligation_id, digest_id, DNA, retained) %>%
collect()
temp <- lab %>%
tbl("digest") %>%
filter(digest_id %in% ligs$digest_id) %>%
select(digest_id, extraction_id, quant) %>%
collect()
ligs <- left_join(ligs, temp, by = "digest_id") %>%
rename(dig_quant = quant)
temp <- lab %>%
tbl("digest") %>%
filter(digest_id %in% failed_ligs$digest_id) %>%
select(digest_id, extraction_id, quant) %>%
collect()
failed_ligs <- left_join(failed_ligs, temp, by = "digest_id") %>%
rename(dig_quant = quant)
temp <- lab %>%
tbl("extraction") %>%
filter(extraction_id %in% ligs$extraction_id) %>%
select(extraction_id, quant, sample_id) %>%
collect()
ligs <- left_join(ligs, temp, by = "extraction_id") %>%
rename(extr_quant = quant)
temp <- lab %>%
tbl("extraction") %>%
filter(extraction_id %in% failed_ligs$extraction_id) %>%
select(extraction_id, quant, sample_id) %>%
collect()
failed_ligs <- left_join(failed_ligs, temp, by = "extraction_id") %>%
rename(extr_quant = quant) #1343
# remove any sample_ids from the failed_ligs that are also on the ligs
failed_ligs <- anti_join(failed_ligs, ligs, by = "sample_id") %>% #1052
select(sample_id, extraction_id, extr_quant, digest_id, dig_quant, ligation_id,DNA, retained) %>%
arrange(sample_id)
# remove samples that are not clarkii
removed <- failed_ligs %>%
filter(!grepl("APCL", sample_id))
failed_ligs <- failed_ligs %>%
filter(grepl("APCL", sample_id))
# separate out into low conc digest and regular digest
#
brks<-seq(0,800,50)
# plot concentration of failed seqs
hist(x = ligs$quant,col="blue",breaks=brks,ylim=c(0,150))
hist(x = sample$extr_quant,col="red",breaks=brks,ylim=c(0,150), add = T)
# how many digests with a quant < 10 led to successful sequences?
low_dig_succ <- ligs %>%
filter(ligs$dig_quant < 10)
low_dig_fail <- sample %>%
filter(sample$dig_quant < 10)
# rm(digs)
#
# # make a list of samples that need to be re-processed
# sample <- sample %>%
# select(sample_id, extraction_id, extr_quant, digest_id, dig_quant, ligation_id) %>%
# arrange(sample_id)
#
# # inform the leyte database that these samples failed sequencing
# nam <- sample %>%
# distinct(sample_id)
#
# leyte <- write_db("Leyte")
# clownfish <- dbReadTable(leyte, "clownfish")
# change <- clownfish %>%
# filter(sample_id %in% nam$sample_id)
#
# test <- anti_join(nam, change, by = "sample_id")
#
# # find out if these extractions were ligated successfully
# digs <- lab %>%
# tbl("digest") %>%
# filter(extraction_id %in% sample$extraction_id) %>%
# select(digest_id, extraction_id) %>%
# collect()
#
# ligs <- lab %>%
# tbl("ligation") %>%
# filter(digest_id %in% digs$digest_id,
# !ligation_id %in% sample$ligation_id,
# notes != "PCR failed",
# ligation_id != "L0548"
# ) %>%
# select(ligation_id, digest_id, notes) %>%
# collect()
#
# ligs <- left_join(ligs, digs, by = "digest_id")
# rm(digs)
#
# # remove successful ligations from the samples
# sample <- anti_join(sample, ligs, by = "extraction_id") %>%
# arrange(extr_quant, sample_id)
#
# # write.csv(sample, paste("data/samples_to_retry", Sys.Date(), ".csv", sep = ""), row.names = F)
#
# # plot a histogram of extr_quants
# ggplot(data = sample) +
# geom_histogram(mapping = aes(x=extr_quant), binwidth = 10)
#
# # based on this histogram, maybe do a low concentration enzyme digest with any samples with a quant lower than 10?
#
# low_conc <- sample %>%
# filter(extr_quant < 10)
#
#
# ggplot(data = low_conc) +
# geom_histogram(mapping = aes(x = extr_quant), binwidth = 0.5)
#
# # in order to make a plate of low concentration samples to digest, need to import locations from db
# low_conc <- low_conc %>%
# distinct(extraction_id)
#
# low_conc_plate <- lab %>%
# tbl("extraction") %>%
# filter(extraction_id %in% low_conc$extraction_id) %>%
# select(extraction_id, sample_id, quant, well, plate, notes) %>%
# collect() %>%
# filter(!grepl("empty", notes)) # remove empty wells
#
#
# # get a list of plate names and the count of samples from each plate
# counts <- low_conc_plate %>%
# group_by(plate) %>%
# summarise(samples = n()) %>% # count the number of samples to be digested on each plate
# arrange(plate)
#
# # add a digest plate name
# low_conc_plate <- low_conc_plate %>%
# mutate(dig_plate = "low_conc_plate") %>%
# arrange(extraction_id) # sort by extraction_id
#
# plate_master <- data.frame( row = rep(LETTERS[1:8], 12), col = unlist(lapply(1:12, rep, 8)))
# plate_master <- plate_master %>%
# mutate(dig_well = paste(row, col, sep = ""))
# plate_master <- plate_master[1:nrow(low_conc_plate), ]
# low_conc_plate <- cbind(low_conc_plate, plate_master)
# low_conc_plate <- low_conc_plate %>%
# mutate(col = formatC(as.numeric(col), width = 2, format = "d", flag = "0"))%>%
# arrange(col, row)
# low_conc_plate <- low_conc_plate %>%
# mutate(digest_id = 1:nrow(low_conc_plate))
#
#
#
#
# #
|
# put your ASV table and taxonomy table in Database/Tax4Fun2 folder...
library(Tax4Fun2)
library(seqinr)
setwd("~/R/Database/Tax4Fun2")
ASV.table <- read.table(file="ASV_table.txt",header=T,row.names=1)
ASV <- ASV.table [,1:(ncol(ASV.table)-6)] # I've changed 7--> 6
ASV <- cbind (rownames(ASV),ASV)
ASV <- rbind (colnames(ASV),ASV)
ASV[1,1] <- "ID"
rownames(ASV) <- NULL
colnames(ASV) <- NULL
write.table(ASV, "ASV.txt",sep="\t",col.names = F, row.names = F,quote=F)
taxonomy <- read.table(file="taxonomy.txt",header=T, row.names=1)
tax <- subset(taxonomy, Family != "Mitochondria" &
Class != "Chloroplast" &
Kingdom != "NA")
write.fasta (sequences = as.list(rownames(tax)),names = rownames(ASV.table),file.out="seqs.fasta")
dir.create("Tax4Fun2")
#Step 2: Generate your own reference datasets
#1. Extracting SSU seqeunces (16S rRNA and 18S rRNA)
extractSSU(genome_file = "OneProkaryoticGenome.fasta", file_extension = "fasta", path_to_reference_data ="Tax4Fun2_ReferenceData_v2")
#2. Assigning functions to prokayotic genomes
assignFunction(genome_file = "OneProkaryoticGenome.fasta", file_extension = "fasta", path_to_reference_data = "Tax4Fun2_ReferenceData_v2", num_of_threads = 1, fast = TRUE)
#3. Generate the reference data
generateUserData(path_to_reference_data ="Tax4Fun2_ReferenceData_v2", path_to_user_data = ".", name_of_user_data = "User_Ref0", SSU_file_extension = "_16SrRNA.ffn", KEGG_file_extension = "_funPro.txt")
#Step 3: Making functional predictions
#1. Making functional predictions using the default reference data only
#1. Run the reference blast
runRefBlast(path_to_otus = "seqs.fasta" , path_to_reference_data ="Tax4Fun2_ReferenceData_v2", path_to_temp_folder = "Tax4Fun2", database_mode = "Ref99NR", use_force = T, num_threads = 6)
# 2) Predicting functional profiles
# Remove the first row's # and the second row's # of "17_otu_97_table_taxonomy.txt" --> "17_otu_97_table_taxonomy_tax4fun.txt"
makeFunctionalPrediction(path_to_otu_table = "ASV.txt", path_to_reference_data = "Tax4Fun2_ReferenceData_v2", path_to_temp_folder = "Tax4Fun2", database_mode = "Ref99NR", normalize_by_copy_number = TRUE, min_identity_to_reference = 0.97, normalize_pathways = FALSE)
# note. normalize_pathways = FALSE will affiliate the rel. abundance of each KO to each pathway it belongs to. By setting it to true, the rel. abundance is equally distributed to all pathways it was assigned to.)
#Step 4: Calculating (multi-)functional redundancy indices (experimental)
calculateFunctionalRedundancy(path_to_otu_table = "ASV.txt", path_to_reference_data = "Tax4Fun2_ReferenceData_v2", path_to_temp_folder = "Tax4Fun2", database_mode = "Ref99NR", min_identity_to_reference = 0.97)
# Don't forget to move the output file (Tax4Fun2) for your project file
| /tax4fun2_from_dada2.R | no_license | HaihuaWang-hub/NGS-Data-Processing | R | false | false | 2,862 | r | # put your ASV table and taxonomy table in Database/Tax4Fun2 folder...
library(Tax4Fun2)
library(seqinr)
setwd("~/R/Database/Tax4Fun2")
ASV.table <- read.table(file="ASV_table.txt",header=T,row.names=1)
ASV <- ASV.table [,1:(ncol(ASV.table)-6)] # I've changed 7--> 6
ASV <- cbind (rownames(ASV),ASV)
ASV <- rbind (colnames(ASV),ASV)
ASV[1,1] <- "ID"
rownames(ASV) <- NULL
colnames(ASV) <- NULL
write.table(ASV, "ASV.txt",sep="\t",col.names = F, row.names = F,quote=F)
taxonomy <- read.table(file="taxonomy.txt",header=T, row.names=1)
tax <- subset(taxonomy, Family != "Mitochondria" &
Class != "Chloroplast" &
Kingdom != "NA")
write.fasta (sequences = as.list(rownames(tax)),names = rownames(ASV.table),file.out="seqs.fasta")
dir.create("Tax4Fun2")
#Step 2: Generate your own reference datasets
#1. Extracting SSU seqeunces (16S rRNA and 18S rRNA)
extractSSU(genome_file = "OneProkaryoticGenome.fasta", file_extension = "fasta", path_to_reference_data ="Tax4Fun2_ReferenceData_v2")
#2. Assigning functions to prokayotic genomes
assignFunction(genome_file = "OneProkaryoticGenome.fasta", file_extension = "fasta", path_to_reference_data = "Tax4Fun2_ReferenceData_v2", num_of_threads = 1, fast = TRUE)
#3. Generate the reference data
generateUserData(path_to_reference_data ="Tax4Fun2_ReferenceData_v2", path_to_user_data = ".", name_of_user_data = "User_Ref0", SSU_file_extension = "_16SrRNA.ffn", KEGG_file_extension = "_funPro.txt")
#Step 3: Making functional predictions
#1. Making functional predictions using the default reference data only
#1. Run the reference blast
runRefBlast(path_to_otus = "seqs.fasta" , path_to_reference_data ="Tax4Fun2_ReferenceData_v2", path_to_temp_folder = "Tax4Fun2", database_mode = "Ref99NR", use_force = T, num_threads = 6)
# 2) Predicting functional profiles
# Remove the first row's # and the second row's # of "17_otu_97_table_taxonomy.txt" --> "17_otu_97_table_taxonomy_tax4fun.txt"
makeFunctionalPrediction(path_to_otu_table = "ASV.txt", path_to_reference_data = "Tax4Fun2_ReferenceData_v2", path_to_temp_folder = "Tax4Fun2", database_mode = "Ref99NR", normalize_by_copy_number = TRUE, min_identity_to_reference = 0.97, normalize_pathways = FALSE)
# note. normalize_pathways = FALSE will affiliate the rel. abundance of each KO to each pathway it belongs to. By setting it to true, the rel. abundance is equally distributed to all pathways it was assigned to.)
#Step 4: Calculating (multi-)functional redundancy indices (experimental)
calculateFunctionalRedundancy(path_to_otu_table = "ASV.txt", path_to_reference_data = "Tax4Fun2_ReferenceData_v2", path_to_temp_folder = "Tax4Fun2", database_mode = "Ref99NR", min_identity_to_reference = 0.97)
# Don't forget to move the output file (Tax4Fun2) for your project file
|
setwd("~/Dropbox/umich_2017_winter/stats503/project")
library(stringr)
library(rvest)
########################################################
# players drafted
start_year = 2010
end_year = 2015
seasons <- seq(start_year, end_year, 1)
# scrape player names and ranking from nbadraft.net
draftplayer_read <- function(){
urls <- sapply(seasons, function(e)
str_c("http://www.nbadraft.net/nba_final_draft/",e))
page <- lapply(urls, read_html)
draftees <- lapply(1:length(seasons),
function(e) html_table(html_nodes(page[[e]], "table"))[[1]])
return(draftees)
}
player_list <- do.call(rbind, draftplayer_read())
names(player_list)
# convert wide format to long format
player_list <- rbind(player_list[,1:8], player_list[,9:16])
names(player_list)
# select only useful columns
player_list <- player_list[,c("#", "Player", "H", "P")]
sapply(draftplayer_read(), nrow)
# include draft_class lable
draft_class = as.numeric(rep(str_c("201", 0:5), each = 60))
# complete 360 rows
player_list_df <- cbind(player_list, draft_class)
# incomplete
player_name <- player_list_df$Player
player_name <- str_replace_all(player_name, " ", "-")
# to lower
player_name <- tolower(player_name)
# create list container
player_html <- vector("list", length(player_name))
# load each player's college stat into each element in the list
for (i in 1:length(player_name)){
player_html[[i]] <- tryCatch(read_html(str_c("http://www.sports-reference.com/cbb/players/",
player_name[i], "-1.html")),
error = function(w){print(str_c("error!", i, player_name[i]))},
warning = function(e){print(str_c("warning!",i, player_name[i]))}
)
}
player_html_orginal <- player_html
# save(player_html_orginal, file = "player_html_orginal.rda")
# leave only draf players whose college stats available
ind_original <- unlist(lapply(player_html_orginal, function(e){length(e) == 2}))
player_list_df <- player_list_df[ind_original, ]
head(player_list_df)
# save(player_list_df, file = "player_list_df")
load("player_list_df.rda")
# find college statistics
# convert html table to dataframes
player_ind <- player_html[ind_original]
playercollege_stats <- lapply(1:length(player_ind),
function(e) html_table(html_nodes(player_ind[[e]], "table"))[[1]])
# save(playercollege_stats, file = "playercollege_stats.rda")
load("playercollege_stats.rda")
# player names
playername <- player_list_df$Player
# check each player's stat's dimension
a <- sapply(1:length(playername), function(e) ncol(playercollege_stats[[e]]))
min(a) # 24
max(a) # 26
player_stats <- matrix(rep(NA, n = min(a)*149), ncol = min(a), nrow = length(playername))
player_stats <- as.data.frame(player_stats)
for(i in 1:length(playercollege_stats)){
player_stats[i,1:24]<- tail(playercollege_stats[[i]][,1:24], n = 1)
}
names(player_stats) <- names(playercollege_stats[[1]][,1:24])
# remove conference variable
player_stats <- player_stats[,-3]
rownames(player_stats) <- playername
head(player_stats)
# combine draft list and college stats
player_stats_original <- cbind(player_stats[, -c(1,2)],
player_list_df[, c("#", "H", "P", "draft_class")])
x = player_stats_original$H
a <- str_replace_all(x, "-[0-9]+", "")
a <- cm(as.numeric(a)*12)
b <- str_replace_all(x, "[0-9]+-", "")
b <- cm(as.numeric(b))
player_stats_original$H <- a + b
# save(player_stats_original, file = "player_stats_original.rda")
load("player_stats_original.rda")
head(player_stats_original)
nrow(player_stats_original)
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
########################################################
# players drafted
start_year = 2009
end_year = 2015
seasons <- seq(start_year, end_year, 1)
# scrape player names and ranking from nbadraft.net
draftplayer_read_1 <- function(){
urls <- sapply(seasons, function(e)
str_c("http://www.nbadraft.net/nba_final_draft/",e))
page <- lapply(urls, read_html)
draftees <- lapply(1:length(seasons),
function(e) html_table(html_nodes(page[[e]], "table"))[[1]])
return(draftees)
}
player_list_1 <- do.call(rbind, draftplayer_read_1())
player_list_1 <- rbind(player_list_1[,1:8], player_list_1[,9:16])
names(player_list_1)
draft_class = as.numeric(rep(c("2009",str_c("201", 0:5)), each = 60))
player_list_1 <- cbind(player_list_1, draft_class)
player_list_1 <- player_list_1[,c("#", "Player", "H", "P", "draft_class")]
names(player_list_1)
# players drafted
start_year = 2000
end_year = 2008
seasons <- seq(start_year, end_year, 1)
# scrape player names and ranking from nbadraft.net
draftplayer_read_2 <- function(){
urls <- sapply(seasons, function(e)
str_c("http://www.nbadraft.net/nba_draft_history/",e, ".html"))
page <- lapply(urls, read_html)
draftees <- lapply(1:length(seasons),
function(e) html_table(html_nodes(page[[e]], "table"))[[1]])
draftees
return(draftees)
}
ll <- draftplayer_read_2()
for(i in 1:9){
ll[[i]] <- cbind(ll[[i]],
draft_class = rep(str_c("200", i-1), each = nrow(ll[[i]])))
x <- ll[[i]]
x <- x[,-c(2, 6)]
names(x) <- c("#", "Team", "info", "#", "Team", "info", "draft_class")
player_list_21 <- cbind(x[,1:3], draft_class = x[,7])
player_list_22 <- cbind(x[,4:6], draft_class = x[,7])
ll[[i]] <- rbind(player_list_21, player_list_22)
}
player_list_2 <- do.call(rbind, ll)
# extract player names
x4 <- player_list_2$info
# x4 <- x4[-c(47, 82, 145)]
x4 <- str_replace_all(x4, "\\n", " ")
x4 <- str_replace_all(x4, "[0-9]-[0-9]*[[:print:]]*$", "")
x4 <- str_replace_all(x4, "[[:blank:]]*$", "")
x4 <- str_replace_all(x4, "\\*", "")
# extract player height
hei <- player_list_2$info
hei <- str_replace_all(hei, "\\n", " ")
hei <- as.vector(str_extract_all(hei, "[0-9]-[0-9]", ""))
# extract player position
pos <- player_list_2$info
pos <- str_replace_all(pos, "\\n", " ")
pos <- str_replace_all(pos, "^[[:print:]]*[0-9]-[0-9]*", "")
pos <- str_replace_all(pos, "^[[:blank:]]+", "")
pos <- str_replace_all(pos, "[[:digit:]]*", "")
pos <- str_replace_all(pos, "^[[:blank:]]+", "")
pos <- str_replace_all(pos, "[[:punct:]]+", "")
pos <- str_replace_all(pos, "^[[:blank:]]+", "")
pos <- strsplit(pos, " ")
pos <- unlist(lapply(pos, function(e) e[1]))
remov <- str_which(x4, "[[:punct:]]")
x4 <- x4[-remov] # remove everything irregular
hei <- hei[-remov]
pos <- pos[-remov]
draft_class <- player_list_2$draft_class
draft_class <- draft_class[-remov]
player_list_2 <- player_list_2[-remov,]
player_list_2 <- data.frame(player_list_2$`#`, x4, hei, pos, draft_class)
names(player_list_2) <- c("#", "Player", "H", "P", "draft_class")
# combine 2000-2015
player_list_tot <- rbind(player_list_1, player_list_2)
# save(player_list_tot, file = "player_list_tot.rda")
########################################################
load("player_list_tot.rda")
# webscraping format:
# add hyphen between first and last name
dt <- player_list_tot
player_name <- dt$Player
player_name <- str_replace_all(player_name, " ", "-")
# to lower
player_name <- tolower(player_name)
# create list container
player_html <- vector("list", length(player_name))
# load each player's college stat into each element in the list
for (i in 1:684){
player_html[[i]] <- tryCatch(read_html(str_c("http://www.sports-reference.com/cbb/players/",
player_name[i], "-1.html")),
error = function(w){print(str_c("error!", i, player_name[i]))},
warning = function(e){print(str_c("warning!",i, player_name[i]))}
)
}
for (i in 685:length(player_name)){
player_html[[i]] <- tryCatch(read_html(str_c("http://www.sports-reference.com/cbb/players/",
player_name[i], "-1.html")),
error = function(w){print(str_c("error!", i, player_name[i]))},
warning = function(e){print(str_c("warning!",i, player_name[i]))}
)
}
# leave only players whose college stats available
ind_tot <- unlist(lapply(player_html, function(e){length(e) == 2}))
# save(ind_tot, file = "ind_tot.rda")
load("ind_tot.rda")
player_list_tot <- player_list_tot[ind_tot, ]
head(player_list_tot)
# save(player_list_tot, file = "player_list_tot.rda")
player_ind <- player_html[ind_tot]
################################################################################
# convert html table to dataframes
playercollege_stats_tot <- lapply(1:length(player_ind),
function(e) html_table(html_nodes(player_ind[[e]], "table"))[[1]])
# save(playercollege_stats_tot, file = "playercollege_stats_tot.rda")
load("playercollege_stats_tot.rda")
load("player_list_tot.rda")
playername <- player_list_tot$Player
length(playername)
a <- sapply(1:length(playername), function(e) ncol(playercollege_stats_tot[[e]]))
min(a) # 24
max(a) # 26
player_stats <- matrix(rep(NA, n = min(a)*length(playername)),
ncol = min(a), nrow = length(playername))
dim(player_stats)
player_stats <- as.data.frame(player_stats)
for(i in 1:length(playercollege_stats_tot)){
player_stats[i,1:24]<- tail(playercollege_stats_tot[[i]][,1:24], n = 1)
}
names(player_stats) <- names(playercollege_stats_tot[[1]][,1:24])
# remove conference variable
player_stats <- player_stats[,-3]
# rownames
dupl <- which(duplicated(playername) == TRUE)
dup <- which(playername == playername[dupl])
rownames(player_stats)[1:dup[1]] <- playername[1:dup[1]]
rownames(player_stats)[dup[1]] <- "Marcus Williams.1"
rownames(player_stats)[(dup[1]+1):dup[2]] <- playername[(dup[1]+1):dup[2]]
rownames(player_stats)[dup[2]] <- "Marcus Williams.2"
rownames(player_stats)[(dup[2]+1):length(playername)] <-
playername[(dup[2]+1):length(playername)]
head(player_stats)
# combine draft list and college stats
player_stats <- cbind(player_stats[, -c(1,2)],
player_list_tot[, c("#", "H", "P", "draft_class")])
player_stats_tot <- player_stats
x = player_stats_tot$H
a <- str_replace_all(x, "-[0-9]+", "")
a <- cm(as.numeric(a)*12)
b <- str_replace_all(x, "[0-9]+-", "")
b <- cm(as.numeric(b))
player_stats_tot$H <- a + b
head(player_stats_tot)
# save(player_stats_tot, file = "player_stats_tot.rda")
load("player_stats_tot.rda")
##############################################################################
# original 2010 - 2015
load("player_stats_original.rda")
head(player_stats_original)
nrow(player_stats_original)
##############################################################################
# expanded 2000 - 2015
load("player_stats_tot.rda")
head(player_stats_tot)
nrow(player_stats_tot)
| /data_scraping.R | no_license | lizeyuyuz/NBA_Clustering | R | false | false | 11,406 | r | setwd("~/Dropbox/umich_2017_winter/stats503/project")
library(stringr)
library(rvest)
########################################################
# players drafted
start_year = 2010
end_year = 2015
seasons <- seq(start_year, end_year, 1)
# scrape player names and ranking from nbadraft.net
draftplayer_read <- function(){
urls <- sapply(seasons, function(e)
str_c("http://www.nbadraft.net/nba_final_draft/",e))
page <- lapply(urls, read_html)
draftees <- lapply(1:length(seasons),
function(e) html_table(html_nodes(page[[e]], "table"))[[1]])
return(draftees)
}
player_list <- do.call(rbind, draftplayer_read())
names(player_list)
# convert wide format to long format
player_list <- rbind(player_list[,1:8], player_list[,9:16])
names(player_list)
# select only useful columns
player_list <- player_list[,c("#", "Player", "H", "P")]
sapply(draftplayer_read(), nrow)
# include draft_class lable
draft_class = as.numeric(rep(str_c("201", 0:5), each = 60))
# complete 360 rows
player_list_df <- cbind(player_list, draft_class)
# incomplete
player_name <- player_list_df$Player
player_name <- str_replace_all(player_name, " ", "-")
# to lower
player_name <- tolower(player_name)
# create list container
player_html <- vector("list", length(player_name))
# load each player's college stat into each element in the list
for (i in 1:length(player_name)){
player_html[[i]] <- tryCatch(read_html(str_c("http://www.sports-reference.com/cbb/players/",
player_name[i], "-1.html")),
error = function(w){print(str_c("error!", i, player_name[i]))},
warning = function(e){print(str_c("warning!",i, player_name[i]))}
)
}
player_html_orginal <- player_html
# save(player_html_orginal, file = "player_html_orginal.rda")
# leave only draf players whose college stats available
ind_original <- unlist(lapply(player_html_orginal, function(e){length(e) == 2}))
player_list_df <- player_list_df[ind_original, ]
head(player_list_df)
# save(player_list_df, file = "player_list_df")
load("player_list_df.rda")
# find college statistics
# convert html table to dataframes
player_ind <- player_html[ind_original]
playercollege_stats <- lapply(1:length(player_ind),
function(e) html_table(html_nodes(player_ind[[e]], "table"))[[1]])
# save(playercollege_stats, file = "playercollege_stats.rda")
load("playercollege_stats.rda")
# player names
playername <- player_list_df$Player
# check each player's stat's dimension
a <- sapply(1:length(playername), function(e) ncol(playercollege_stats[[e]]))
min(a) # 24
max(a) # 26
player_stats <- matrix(rep(NA, n = min(a)*149), ncol = min(a), nrow = length(playername))
player_stats <- as.data.frame(player_stats)
for(i in 1:length(playercollege_stats)){
player_stats[i,1:24]<- tail(playercollege_stats[[i]][,1:24], n = 1)
}
names(player_stats) <- names(playercollege_stats[[1]][,1:24])
# remove conference variable
player_stats <- player_stats[,-3]
rownames(player_stats) <- playername
head(player_stats)
# combine draft list and college stats
player_stats_original <- cbind(player_stats[, -c(1,2)],
player_list_df[, c("#", "H", "P", "draft_class")])
x = player_stats_original$H
a <- str_replace_all(x, "-[0-9]+", "")
a <- cm(as.numeric(a)*12)
b <- str_replace_all(x, "[0-9]+-", "")
b <- cm(as.numeric(b))
player_stats_original$H <- a + b
# save(player_stats_original, file = "player_stats_original.rda")
load("player_stats_original.rda")
head(player_stats_original)
nrow(player_stats_original)
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
########################################################
# players drafted
start_year = 2009
end_year = 2015
seasons <- seq(start_year, end_year, 1)
# scrape player names and ranking from nbadraft.net
draftplayer_read_1 <- function(){
urls <- sapply(seasons, function(e)
str_c("http://www.nbadraft.net/nba_final_draft/",e))
page <- lapply(urls, read_html)
draftees <- lapply(1:length(seasons),
function(e) html_table(html_nodes(page[[e]], "table"))[[1]])
return(draftees)
}
player_list_1 <- do.call(rbind, draftplayer_read_1())
player_list_1 <- rbind(player_list_1[,1:8], player_list_1[,9:16])
names(player_list_1)
draft_class = as.numeric(rep(c("2009",str_c("201", 0:5)), each = 60))
player_list_1 <- cbind(player_list_1, draft_class)
player_list_1 <- player_list_1[,c("#", "Player", "H", "P", "draft_class")]
names(player_list_1)
# players drafted
start_year = 2000
end_year = 2008
seasons <- seq(start_year, end_year, 1)
# scrape player names and ranking from nbadraft.net
draftplayer_read_2 <- function(){
urls <- sapply(seasons, function(e)
str_c("http://www.nbadraft.net/nba_draft_history/",e, ".html"))
page <- lapply(urls, read_html)
draftees <- lapply(1:length(seasons),
function(e) html_table(html_nodes(page[[e]], "table"))[[1]])
draftees
return(draftees)
}
ll <- draftplayer_read_2()
for(i in 1:9){
ll[[i]] <- cbind(ll[[i]],
draft_class = rep(str_c("200", i-1), each = nrow(ll[[i]])))
x <- ll[[i]]
x <- x[,-c(2, 6)]
names(x) <- c("#", "Team", "info", "#", "Team", "info", "draft_class")
player_list_21 <- cbind(x[,1:3], draft_class = x[,7])
player_list_22 <- cbind(x[,4:6], draft_class = x[,7])
ll[[i]] <- rbind(player_list_21, player_list_22)
}
player_list_2 <- do.call(rbind, ll)
# extract player names
x4 <- player_list_2$info
# x4 <- x4[-c(47, 82, 145)]
x4 <- str_replace_all(x4, "\\n", " ")
x4 <- str_replace_all(x4, "[0-9]-[0-9]*[[:print:]]*$", "")
x4 <- str_replace_all(x4, "[[:blank:]]*$", "")
x4 <- str_replace_all(x4, "\\*", "")
# extract player height
hei <- player_list_2$info
hei <- str_replace_all(hei, "\\n", " ")
hei <- as.vector(str_extract_all(hei, "[0-9]-[0-9]", ""))
# extract player position
pos <- player_list_2$info
pos <- str_replace_all(pos, "\\n", " ")
pos <- str_replace_all(pos, "^[[:print:]]*[0-9]-[0-9]*", "")
pos <- str_replace_all(pos, "^[[:blank:]]+", "")
pos <- str_replace_all(pos, "[[:digit:]]*", "")
pos <- str_replace_all(pos, "^[[:blank:]]+", "")
pos <- str_replace_all(pos, "[[:punct:]]+", "")
pos <- str_replace_all(pos, "^[[:blank:]]+", "")
pos <- strsplit(pos, " ")
pos <- unlist(lapply(pos, function(e) e[1]))
remov <- str_which(x4, "[[:punct:]]")
x4 <- x4[-remov] # remove everything irregular
hei <- hei[-remov]
pos <- pos[-remov]
draft_class <- player_list_2$draft_class
draft_class <- draft_class[-remov]
player_list_2 <- player_list_2[-remov,]
player_list_2 <- data.frame(player_list_2$`#`, x4, hei, pos, draft_class)
names(player_list_2) <- c("#", "Player", "H", "P", "draft_class")
# combine 2000-2015
player_list_tot <- rbind(player_list_1, player_list_2)
# save(player_list_tot, file = "player_list_tot.rda")
########################################################
load("player_list_tot.rda")
# webscraping format:
# add hyphen between first and last name
dt <- player_list_tot
player_name <- dt$Player
player_name <- str_replace_all(player_name, " ", "-")
# to lower
player_name <- tolower(player_name)
# create list container
player_html <- vector("list", length(player_name))
# load each player's college stat into each element in the list
for (i in 1:684){
player_html[[i]] <- tryCatch(read_html(str_c("http://www.sports-reference.com/cbb/players/",
player_name[i], "-1.html")),
error = function(w){print(str_c("error!", i, player_name[i]))},
warning = function(e){print(str_c("warning!",i, player_name[i]))}
)
}
for (i in 685:length(player_name)){
player_html[[i]] <- tryCatch(read_html(str_c("http://www.sports-reference.com/cbb/players/",
player_name[i], "-1.html")),
error = function(w){print(str_c("error!", i, player_name[i]))},
warning = function(e){print(str_c("warning!",i, player_name[i]))}
)
}
# leave only players whose college stats available
ind_tot <- unlist(lapply(player_html, function(e){length(e) == 2}))
# save(ind_tot, file = "ind_tot.rda")
load("ind_tot.rda")
player_list_tot <- player_list_tot[ind_tot, ]
head(player_list_tot)
# save(player_list_tot, file = "player_list_tot.rda")
player_ind <- player_html[ind_tot]
################################################################################
# convert html table to dataframes
playercollege_stats_tot <- lapply(1:length(player_ind),
function(e) html_table(html_nodes(player_ind[[e]], "table"))[[1]])
# save(playercollege_stats_tot, file = "playercollege_stats_tot.rda")
load("playercollege_stats_tot.rda")
load("player_list_tot.rda")
playername <- player_list_tot$Player
length(playername)
a <- sapply(1:length(playername), function(e) ncol(playercollege_stats_tot[[e]]))
min(a) # 24
max(a) # 26
player_stats <- matrix(rep(NA, n = min(a)*length(playername)),
ncol = min(a), nrow = length(playername))
dim(player_stats)
player_stats <- as.data.frame(player_stats)
for(i in 1:length(playercollege_stats_tot)){
player_stats[i,1:24]<- tail(playercollege_stats_tot[[i]][,1:24], n = 1)
}
names(player_stats) <- names(playercollege_stats_tot[[1]][,1:24])
# remove conference variable
player_stats <- player_stats[,-3]
# rownames
dupl <- which(duplicated(playername) == TRUE)
dup <- which(playername == playername[dupl])
rownames(player_stats)[1:dup[1]] <- playername[1:dup[1]]
rownames(player_stats)[dup[1]] <- "Marcus Williams.1"
rownames(player_stats)[(dup[1]+1):dup[2]] <- playername[(dup[1]+1):dup[2]]
rownames(player_stats)[dup[2]] <- "Marcus Williams.2"
rownames(player_stats)[(dup[2]+1):length(playername)] <-
playername[(dup[2]+1):length(playername)]
head(player_stats)
# combine draft list and college stats
player_stats <- cbind(player_stats[, -c(1,2)],
player_list_tot[, c("#", "H", "P", "draft_class")])
player_stats_tot <- player_stats
x = player_stats_tot$H
a <- str_replace_all(x, "-[0-9]+", "")
a <- cm(as.numeric(a)*12)
b <- str_replace_all(x, "[0-9]+-", "")
b <- cm(as.numeric(b))
player_stats_tot$H <- a + b
head(player_stats_tot)
# save(player_stats_tot, file = "player_stats_tot.rda")
load("player_stats_tot.rda")
##############################################################################
# original 2010 - 2015
load("player_stats_original.rda")
head(player_stats_original)
nrow(player_stats_original)
##############################################################################
# expanded 2000 - 2015
load("player_stats_tot.rda")
head(player_stats_tot)
nrow(player_stats_tot)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vis.R
\name{print.vis}
\alias{print.vis}
\title{Print method for a vis object}
\usage{
\method{print}{vis}(x, min.prob = 0.3, print.full.model = FALSE, ...)
}
\arguments{
\item{x}{a \code{vis} object, the result of \code{\link{vis}}}
\item{min.prob}{a lower bound on the probability of
selection before the result is printed}
\item{print.full.model}{logical, determines if the full
model gets printed or not. Default=\code{FALSE}.}
\item{...}{further arguments (currently unused)}
}
\description{
Prints basic output of the bootstrap results of an
vis object.
}
| /man/print.vis.Rd | no_license | garthtarr/mplot | R | false | true | 644 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vis.R
\name{print.vis}
\alias{print.vis}
\title{Print method for a vis object}
\usage{
\method{print}{vis}(x, min.prob = 0.3, print.full.model = FALSE, ...)
}
\arguments{
\item{x}{a \code{vis} object, the result of \code{\link{vis}}}
\item{min.prob}{a lower bound on the probability of
selection before the result is printed}
\item{print.full.model}{logical, determines if the full
model gets printed or not. Default=\code{FALSE}.}
\item{...}{further arguments (currently unused)}
}
\description{
Prints basic output of the bootstrap results of an
vis object.
}
|
df<-data.frame(
sleep=c(7,8,9,6),
college=c("Saga","Cendana","Cendana","Saga")
)
aggregate(sleep~college, data=df, mean)
#we want to permute the data
#we will use SAMPLE
set.seed(123456789)
sample(c("a","b","c"))
#create a new data frame with only college as the permuted variable
df_p<-df
df_p$college<-sample(df$college)
aggregate(sleep~college, data=df_p, mean)
#simulating permutations in R
data(faithful)
cor(faithful$eruptions,faithful$waiting)
#0.90
trials<-10000
responses<-numeric(trials)
cor(sample(faithful$eruptions), faithful$waiting)
#0.04
for(i in 1:trials)
responses[i]<-cor(sample(faithful$eruptions), faithful$waiting)
hist(responses, xlab="Correlation")
#IN-CLASS PREP
titanic<-read.csv("titanic.csv", stringsAsFactors = F)
trials<-20
d<-5
res<-matrix(NA, trials, d)
for(i in 1:trials)
res[i,]<-sample(1:10, 5, replace=F)
res[2,]
res[,5]
#IN-CLASS WORK
#Coin flipping simulation: Is Tim right?
p_val<-seq(0.1, 0.9, by=0.1)
p_dim<-length(p_val)
#number of coin tosses
n<-10
#number of trials
trials<-10000
outcomes<-c("H","T")
results<-matrix(NA, trials, p_dim)
for(i in 1:p_dim){
for(j in 1:trials){
sim_coin<-sample(outcomes, size=n, replace=T, prob=c(p_val[i], 1-p_val[i]))
results[j,i]<-sum(sim_coin=="H")/n
}
}
par(mfrow=c(3,3))
for(i in 1:p_dim){
p<-hist(results[,i], probability = T, breaks=seq(0,1,by=0.1),
xlim=c(0,1),
main=paste("p=",p_val[i]),
xaxt="n")
}
#TITANIC DATA
titanic<-read.csv("titanic.csv", stringsAsFactors = F)
freq_tables<-function(cat_data){
tab<-table(cat_data) #create a table from the data that's been read in
print(tab)
print(tab/length(cat_data))
}
freq_tables(titanic$gender)
freq_tables(titanic$class)
freq_tables(titanic$survived)
freq_tables(titanic$age>=18)
table(titanic$survived, titanic$gender)
table(titanic$survived, titanic$gender) / nrow(titanic)*100
freq_tables(titanic$survived[titanic$gender=="Male"])*100
freq_tables(titanic$survived[titanic$gender=="Female"])*100
#simulation solution
response<-function(dying_people){
death_rate_males<- 1- mean(dying_people$survived[dying_people$gender=="Male"])
death_rate_females<- 1- mean(dying_people$survived[dying_people$gender=="Female"])
death_rate_males/death_rate_females #finding the ratio of death rate between males and females
}
response(titanic)
permute<-function(){
tmp<-titanic
#permute gender
tmp$gender<-sample(tmp$gender)
response(tmp)
}
permute() #is our observed difference of 3 actually statistacally significant??
trials<-10000
#
results<-numeric(trials)
for(i in 1:trials){
results[i]<-permute()
}
par(mfrow=c(1,1))
hist(results)
titanic$total_price<-titanic$ticket_pound + titanic$ticket_shilling/20 +titanic$ticket_penny/240
tit<-titanic[titanic$total_price!=0 & titanic$gender=="Male" & titanic$class=="2nd",]
model<-lm(log(tit$total_price)~tit$age, data=tit)
plot(log(tit$total_price)~tit$age, col="blue")
abline(model, lwd=3)
coef(model)
slope<-model$coefficients[2]
#is the slope significant
trials<-10000
results<-numeric(trials)
for(i in 1:trials){
results[i]<-lm(log(total_price)~sample(age), data=tit)$coefficients[2]
}
hist(results)
mean(results<=slope | results>=-slope)
#p=0.06 | /PERMUTATIONS/Permutations.R | no_license | stefanroata/quantitative-reasoning | R | false | false | 3,248 | r | df<-data.frame(
sleep=c(7,8,9,6),
college=c("Saga","Cendana","Cendana","Saga")
)
aggregate(sleep~college, data=df, mean)
#we want to permute the data
#we will use SAMPLE
set.seed(123456789)
sample(c("a","b","c"))
#create a new data frame with only college as the permuted variable
df_p<-df
df_p$college<-sample(df$college)
aggregate(sleep~college, data=df_p, mean)
#simulating permutations in R
data(faithful)
cor(faithful$eruptions,faithful$waiting)
#0.90
trials<-10000
responses<-numeric(trials)
cor(sample(faithful$eruptions), faithful$waiting)
#0.04
for(i in 1:trials)
responses[i]<-cor(sample(faithful$eruptions), faithful$waiting)
hist(responses, xlab="Correlation")
#IN-CLASS PREP
titanic<-read.csv("titanic.csv", stringsAsFactors = F)
trials<-20
d<-5
res<-matrix(NA, trials, d)
for(i in 1:trials)
res[i,]<-sample(1:10, 5, replace=F)
res[2,]
res[,5]
#IN-CLASS WORK
#Coin flipping simulation: Is Tim right?
p_val<-seq(0.1, 0.9, by=0.1)
p_dim<-length(p_val)
#number of coin tosses
n<-10
#number of trials
trials<-10000
outcomes<-c("H","T")
results<-matrix(NA, trials, p_dim)
for(i in 1:p_dim){
for(j in 1:trials){
sim_coin<-sample(outcomes, size=n, replace=T, prob=c(p_val[i], 1-p_val[i]))
results[j,i]<-sum(sim_coin=="H")/n
}
}
par(mfrow=c(3,3))
for(i in 1:p_dim){
p<-hist(results[,i], probability = T, breaks=seq(0,1,by=0.1),
xlim=c(0,1),
main=paste("p=",p_val[i]),
xaxt="n")
}
#TITANIC DATA
titanic<-read.csv("titanic.csv", stringsAsFactors = F)
freq_tables<-function(cat_data){
tab<-table(cat_data) #create a table from the data that's been read in
print(tab)
print(tab/length(cat_data))
}
freq_tables(titanic$gender)
freq_tables(titanic$class)
freq_tables(titanic$survived)
freq_tables(titanic$age>=18)
table(titanic$survived, titanic$gender)
table(titanic$survived, titanic$gender) / nrow(titanic)*100
freq_tables(titanic$survived[titanic$gender=="Male"])*100
freq_tables(titanic$survived[titanic$gender=="Female"])*100
#simulation solution
response<-function(dying_people){
death_rate_males<- 1- mean(dying_people$survived[dying_people$gender=="Male"])
death_rate_females<- 1- mean(dying_people$survived[dying_people$gender=="Female"])
death_rate_males/death_rate_females #finding the ratio of death rate between males and females
}
response(titanic)
permute<-function(){
tmp<-titanic
#permute gender
tmp$gender<-sample(tmp$gender)
response(tmp)
}
permute() #is our observed difference of 3 actually statistacally significant??
trials<-10000
#
results<-numeric(trials)
for(i in 1:trials){
results[i]<-permute()
}
par(mfrow=c(1,1))
hist(results)
titanic$total_price<-titanic$ticket_pound + titanic$ticket_shilling/20 +titanic$ticket_penny/240
tit<-titanic[titanic$total_price!=0 & titanic$gender=="Male" & titanic$class=="2nd",]
model<-lm(log(tit$total_price)~tit$age, data=tit)
plot(log(tit$total_price)~tit$age, col="blue")
abline(model, lwd=3)
coef(model)
slope<-model$coefficients[2]
#is the slope significant
trials<-10000
results<-numeric(trials)
for(i in 1:trials){
results[i]<-lm(log(total_price)~sample(age), data=tit)$coefficients[2]
}
hist(results)
mean(results<=slope | results>=-slope)
#p=0.06 |
I am done
| /third_party/virtualbox/src/VBox/ExtPacks/VBoxDTrace/onnv/cmd/dtrace/test/tst/common/dtraceUtil/tst.ZeroProviderProbes.d.ksh.out | permissive | thalium/icebox | R | false | false | 10 | out | I am done
|
library(OpenMx)
# Simulate some data
sampleSize <- 250
x=rnorm(sampleSize, mean=0, sd=1)
y= 0.5*x + rnorm(sampleSize, mean=0, sd=1)
tmpFrame <- data.frame(x, y)
tmpNames <- names(tmpFrame)
# Create a model that includes an expected covariance matrix,
# an expectation function, a fit function, and an observed covariance matrix
data <- mxData(cov(tmpFrame), type="cov", numObs = sampleSize)
expCov <- mxMatrix(type="Symm", nrow=2, ncol=2, values=c(.2,.1,.2), free=TRUE, name="expCov")
expFunction <- mxExpectationNormal(covariance="expCov", dimnames=tmpNames)
fitFunction <- mxFitFunctionML()
testModel <- mxModel(model="testModel", expCov, data, expFunction, fitFunction)
#Use mxRun to optimize the free parameters in the expected covariance matrix
modelOut <- mxRun(testModel, checkpoint = TRUE)
modelOut$expCov
# Save the ending state of modelOut in a checkpoint file
mxSave(modelOut)
modelRestored <- mxRestore(testModel)
omxCheckCloseEnough(modelRestored$expCov$values, modelOut$expCov$values, 1e-5)
| /inst/models/passing/mxSave.R | permissive | falkcarl/OpenMx | R | false | false | 1,012 | r | library(OpenMx)
# Simulate some data
sampleSize <- 250
x=rnorm(sampleSize, mean=0, sd=1)
y= 0.5*x + rnorm(sampleSize, mean=0, sd=1)
tmpFrame <- data.frame(x, y)
tmpNames <- names(tmpFrame)
# Create a model that includes an expected covariance matrix,
# an expectation function, a fit function, and an observed covariance matrix
data <- mxData(cov(tmpFrame), type="cov", numObs = sampleSize)
expCov <- mxMatrix(type="Symm", nrow=2, ncol=2, values=c(.2,.1,.2), free=TRUE, name="expCov")
expFunction <- mxExpectationNormal(covariance="expCov", dimnames=tmpNames)
fitFunction <- mxFitFunctionML()
testModel <- mxModel(model="testModel", expCov, data, expFunction, fitFunction)
#Use mxRun to optimize the free parameters in the expected covariance matrix
modelOut <- mxRun(testModel, checkpoint = TRUE)
modelOut$expCov
# Save the ending state of modelOut in a checkpoint file
mxSave(modelOut)
modelRestored <- mxRestore(testModel)
omxCheckCloseEnough(modelRestored$expCov$values, modelOut$expCov$values, 1e-5)
|
context("comparing get_kgram_freqs() and get_kgram_freqs_fast()")
test_that("coincidence on long char vector", {
freqs <- get_kgram_freqs(twitter_train[1:10000], 3, twitter_dict)
freqs_fast <- get_kgram_freqs_fast(twitter_train[1:10000], 3, twitter_dict)
transform <- . %>% arrange(across(starts_with("w")))
freqs_attr_bckp <- attributes(freqs)
freqs %<>% lapply(transform)
attributes(freqs) <- freqs_attr_bckp
freqs_fast_attr_bckp <- attributes(freqs_fast)
freqs_fast %<>% lapply(transform)
attributes(freqs_fast) <- freqs_fast_attr_bckp
attr(freqs_fast, ".preprocess") <- attr(freqs, ".preprocess")
expect_identical(freqs, freqs_fast)
})
| /tests/testthat/test-get_kgram_freqs_comparisons.R | no_license | minghao2016/sbo | R | false | false | 758 | r | context("comparing get_kgram_freqs() and get_kgram_freqs_fast()")
test_that("coincidence on long char vector", {
freqs <- get_kgram_freqs(twitter_train[1:10000], 3, twitter_dict)
freqs_fast <- get_kgram_freqs_fast(twitter_train[1:10000], 3, twitter_dict)
transform <- . %>% arrange(across(starts_with("w")))
freqs_attr_bckp <- attributes(freqs)
freqs %<>% lapply(transform)
attributes(freqs) <- freqs_attr_bckp
freqs_fast_attr_bckp <- attributes(freqs_fast)
freqs_fast %<>% lapply(transform)
attributes(freqs_fast) <- freqs_fast_attr_bckp
attr(freqs_fast, ".preprocess") <- attr(freqs, ".preprocess")
expect_identical(freqs, freqs_fast)
})
|
#' @title
#' Create a New Game.
#'
#' @description
#' `create_game()` randomly assigns a specified number of both goats and cars to the
#' number
#'
#' @details
#' This funtion creates a new game for the monty hall problem. There are three doors:
#' two goats and one car. this simulation gives you the opportunity to test the
#' probabilities!
#'
#'
#' @param x Numeric vector
#'
#' @param values must be whole integers greater than zero
#'
#' @return
#' Returns a length 3 charater vector showing the positions of the goats and the car
#'
#' @examples
#' create_game()
#'
#'
#' @export
create_game <- function()
{
a.game <- sample( x=c("goat","goat","car"), size=3, replace=F )
return( a.game )
}
#' @title
#' Select Door.
#'
#' @description
#' `select_door()` A door is randomly selected as the contestant choice.
#'
#' @details
#' This function assignes a random door as the contestant's first choice
#' of door before anything is revealed. The contestant can only choose between door 1, 2, or 3.
#'
#' @param x Numeric vector
#'
#' @return
#' an integer that represents the number of the door that the participant selects.
#'
#' @examples
#' select_door()
#'
#' @export
select_door <- function( )
{
doors <- c(1,2,3)
a.pick <- sample( doors, size=1 )
return( a.pick ) # number between 1 and 3
}
#' @title
#' Open the Goat Door.
#'
#' @description
#' `open_goat_door()` opens a door with a goat behind
#' it and that is also not the same door the contestant
#' chose.
#'
#' @details
#' The opened door must have a goat behind it,
#' meaning that the door must not have a car behind it.
#'
#' @param ... the arguments are `game` and `a.pick`
#'
#' @return
#' This returns a number representing the number of
#' the goat door that the host opened.
#'
#' @example
#' open_goat_door(game = newGame, a.pick = firstDoor
#'
#' @export
open_goat_door <- function( game, a.pick )
{
doors <- c(1,2,3)
# if contestant selected car,
# randomly select one of two goats
if( game[ a.pick ] == "car" )
{
goat.doors <- doors[ game != "car" ]
opened.door <- sample( goat.doors, size=1 )
}
if( game[ a.pick ] == "goat" )
{
opened.door <- doors[ game != "car" & doors != a.pick ]
}
return( opened.door ) # number between 1 and 3
}
#' @title
#' Changing Door Function.
#'
#' @description
#' `change_door()` the contestant is asked if they would like to change their
#' selection to a different door or if they would like to keep their first choice.
#'
#' @details
#' a door is generated that represents the door that the
#' contestant chose, whether they switched or stayed.
#'
#' @param ... If the argument is `TRUE` it means that the contestant stayed
#' and `FALSE` means that the contestant switched doors.
#'
#' @return
#' This function returns a number representing the number of the final door choice for the contestant.
#'
#' @examples
#' change_door(stay = F, opened.door = 2, a.pick = 1)
#'
#' @export
change_door <- function( stay=T, opened.door, a.pick )
{
doors <- c(1,2,3)
if( stay )
{
final.pick <- a.pick
}
if( ! stay )
{
final.pick <- doors[ doors != opened.door & doors != a.pick ]
}
return( final.pick ) # number between 1 and 3
}
#' @title
#' Determine Winner.
#'
#' @description
#' `determine_winner()` returns the item behind the final door chosen, and assigns
#' choosing the car as winning, and choosing the goat as losing.
#'
#' @details
#' Provides final door and determines whether they won or lost.
#'
#' @param ... `final.pick` and `game` are the arguments
#'
#' @return
#' the function returns a "WIN" or "LOSE" based on whether or not the game was won.
#'
#' @examples
#' determine_winner(final.pick = finalDoor, game = newGame)
#'
#' @export
determine_winner <- function( final.pick, game )
{
if( game[ final.pick ] == "car" )
{
return( "WIN" )
}
if( game[ final.pick ] == "goat" )
{
return( "LOSE" )
}
}
#' @title
#' Play the Game
#'
#' @description
#' `play_game()` combines the five functions above into this one function.
#'
#' @details
#' This function lists the corresponding outcomes for each strategy, which will vary each time.
#'
#' @param ... No parameters
#'
#' @return
#' This returns a dataframe with columns `strategy` and `outcome`
#' strategy can be either stay or switch
#' outcome can be either win or lose
#'
#' @examples
#' play_game()
#'
#' @export
play_game <- function( )
{
new.game <- create_game()
first.pick <- select_door()
opened.door <- open_goat_door( new.game, first.pick )
final.pick.stay <- change_door( stay=T, opened.door, first.pick )
final.pick.switch <- change_door( stay=F, opened.door, first.pick )
outcome.stay <- determine_winner( final.pick.stay, new.game )
outcome.switch <- determine_winner( final.pick.switch, new.game )
strategy <- c("stay","switch")
outcome <- c(outcome.stay,outcome.switch)
game.results <- data.frame( strategy, outcome,
stringsAsFactors=F )
return( game.results )}
#' @title
#' Play the Game n times (by default, n=1000).
#'
#' @description
#' creates the game structure on a loop to examine the outcomes
#' of running the game a multitude of times. Gives probability by
#' playing the game as many times as needed using `play_n_games()`.
#'
#' @details
#' The default for the game is 1000 times, but it can be played n time
#' Corresponding outcomes are listed for each game.
#'
#' @param ... "n", or the number of times to run the loop
#'
#' @return
#' This function creates a dataframe that combines the outcomes of total games and lists then out of 1
#'
#' @examples
#' play_n_games(n=1000)
#'
#' @export
play_n_games <- function( n=1000 )
{
library( dplyr )
results.list <- list() # collector
loop.count <- 1
for( i in 1:n ) # iterator
{
game.outcome <- play_game()
results.list[[ loop.count ]] <- game.outcome
loop.count <- loop.count + 1
}
results.df <- dplyr::bind_rows( results.list )
table( results.df ) %>%
prop.table( margin=1 ) %>% # row proportions
round( 2 ) %>%
print()
return( results.df )
} | /R/monty-hall-problem.R | no_license | brittanyharb/montyhall | R | false | false | 6,207 | r | #' @title
#' Create a New Game.
#'
#' @description
#' `create_game()` randomly assigns a specified number of both goats and cars to the
#' number
#'
#' @details
#' This funtion creates a new game for the monty hall problem. There are three doors:
#' two goats and one car. this simulation gives you the opportunity to test the
#' probabilities!
#'
#'
#' @param x Numeric vector
#'
#' @param values must be whole integers greater than zero
#'
#' @return
#' Returns a length 3 charater vector showing the positions of the goats and the car
#'
#' @examples
#' create_game()
#'
#'
#' @export
create_game <- function()
{
a.game <- sample( x=c("goat","goat","car"), size=3, replace=F )
return( a.game )
}
#' @title
#' Select Door.
#'
#' @description
#' `select_door()` A door is randomly selected as the contestant choice.
#'
#' @details
#' This function assignes a random door as the contestant's first choice
#' of door before anything is revealed. The contestant can only choose between door 1, 2, or 3.
#'
#' @param x Numeric vector
#'
#' @return
#' an integer that represents the number of the door that the participant selects.
#'
#' @examples
#' select_door()
#'
#' @export
select_door <- function( )
{
doors <- c(1,2,3)
a.pick <- sample( doors, size=1 )
return( a.pick ) # number between 1 and 3
}
#' @title
#' Open the Goat Door.
#'
#' @description
#' `open_goat_door()` opens a door with a goat behind
#' it and that is also not the same door the contestant
#' chose.
#'
#' @details
#' The opened door must have a goat behind it,
#' meaning that the door must not have a car behind it.
#'
#' @param ... the arguments are `game` and `a.pick`
#'
#' @return
#' This returns a number representing the number of
#' the goat door that the host opened.
#'
#' @example
#' open_goat_door(game = newGame, a.pick = firstDoor
#'
#' @export
open_goat_door <- function( game, a.pick )
{
doors <- c(1,2,3)
# if contestant selected car,
# randomly select one of two goats
if( game[ a.pick ] == "car" )
{
goat.doors <- doors[ game != "car" ]
opened.door <- sample( goat.doors, size=1 )
}
if( game[ a.pick ] == "goat" )
{
opened.door <- doors[ game != "car" & doors != a.pick ]
}
return( opened.door ) # number between 1 and 3
}
#' @title
#' Changing Door Function.
#'
#' @description
#' `change_door()` the contestant is asked if they would like to change their
#' selection to a different door or if they would like to keep their first choice.
#'
#' @details
#' a door is generated that represents the door that the
#' contestant chose, whether they switched or stayed.
#'
#' @param ... If the argument is `TRUE` it means that the contestant stayed
#' and `FALSE` means that the contestant switched doors.
#'
#' @return
#' This function returns a number representing the number of the final door choice for the contestant.
#'
#' @examples
#' change_door(stay = F, opened.door = 2, a.pick = 1)
#'
#' @export
change_door <- function( stay=T, opened.door, a.pick )
{
doors <- c(1,2,3)
if( stay )
{
final.pick <- a.pick
}
if( ! stay )
{
final.pick <- doors[ doors != opened.door & doors != a.pick ]
}
return( final.pick ) # number between 1 and 3
}
#' @title
#' Determine Winner.
#'
#' @description
#' `determine_winner()` returns the item behind the final door chosen, and assigns
#' choosing the car as winning, and choosing the goat as losing.
#'
#' @details
#' Provides final door and determines whether they won or lost.
#'
#' @param ... `final.pick` and `game` are the arguments
#'
#' @return
#' the function returns a "WIN" or "LOSE" based on whether or not the game was won.
#'
#' @examples
#' determine_winner(final.pick = finalDoor, game = newGame)
#'
#' @export
determine_winner <- function( final.pick, game )
{
if( game[ final.pick ] == "car" )
{
return( "WIN" )
}
if( game[ final.pick ] == "goat" )
{
return( "LOSE" )
}
}
#' @title
#' Play the Game
#'
#' @description
#' `play_game()` combines the five functions above into this one function.
#'
#' @details
#' This function lists the corresponding outcomes for each strategy, which will vary each time.
#'
#' @param ... No parameters
#'
#' @return
#' This returns a dataframe with columns `strategy` and `outcome`
#' strategy can be either stay or switch
#' outcome can be either win or lose
#'
#' @examples
#' play_game()
#'
#' @export
play_game <- function( )
{
new.game <- create_game()
first.pick <- select_door()
opened.door <- open_goat_door( new.game, first.pick )
final.pick.stay <- change_door( stay=T, opened.door, first.pick )
final.pick.switch <- change_door( stay=F, opened.door, first.pick )
outcome.stay <- determine_winner( final.pick.stay, new.game )
outcome.switch <- determine_winner( final.pick.switch, new.game )
strategy <- c("stay","switch")
outcome <- c(outcome.stay,outcome.switch)
game.results <- data.frame( strategy, outcome,
stringsAsFactors=F )
return( game.results )}
#' @title
#' Play the Game n times (by default, n=1000).
#'
#' @description
#' creates the game structure on a loop to examine the outcomes
#' of running the game a multitude of times. Gives probability by
#' playing the game as many times as needed using `play_n_games()`.
#'
#' @details
#' The default for the game is 1000 times, but it can be played n time
#' Corresponding outcomes are listed for each game.
#'
#' @param ... "n", or the number of times to run the loop
#'
#' @return
#' This function creates a dataframe that combines the outcomes of total games and lists then out of 1
#'
#' @examples
#' play_n_games(n=1000)
#'
#' @export
play_n_games <- function( n=1000 )
{
library( dplyr )
results.list <- list() # collector
loop.count <- 1
for( i in 1:n ) # iterator
{
game.outcome <- play_game()
results.list[[ loop.count ]] <- game.outcome
loop.count <- loop.count + 1
}
results.df <- dplyr::bind_rows( results.list )
table( results.df ) %>%
prop.table( margin=1 ) %>% # row proportions
round( 2 ) %>%
print()
return( results.df )
} |
\name{recreate.olddata}
\alias{recreate.olddata}
\title{Convert data stored in msm object to old format}
\description{
Converts the \code{data} element of msm objects to the old format.
}
\usage{
recreate.olddata(x)
}
\arguments{
\item{x}{Object returned by the \code{\link{msm}} function,
representing a fitted multi-state model.}
}
\value{
A list of vectors and matrices in the undocumented ad-hoc format used
for the \code{data} component of \code{msm} objects in \pkg{msm}
versions 1.3.1 and earlier.
}
\details{
This is just provided for convenience and to illustrate the changes.
It is not guaranteed to be complete, and is liable to be withdrawn.
Users who were relying on the previous undocumented format are advised
to upgrade their code to use the new format, which uses model frames
and model design matrices in the standard format used in version 1.4,
based on \code{\link{model.frame}} and \code{\link{model.matrix}}.
}
| /man/recreate.olddata.Rd | no_license | cran/msm | R | false | false | 963 | rd | \name{recreate.olddata}
\alias{recreate.olddata}
\title{Convert data stored in msm object to old format}
\description{
Converts the \code{data} element of msm objects to the old format.
}
\usage{
recreate.olddata(x)
}
\arguments{
\item{x}{Object returned by the \code{\link{msm}} function,
representing a fitted multi-state model.}
}
\value{
A list of vectors and matrices in the undocumented ad-hoc format used
for the \code{data} component of \code{msm} objects in \pkg{msm}
versions 1.3.1 and earlier.
}
\details{
This is just provided for convenience and to illustrate the changes.
It is not guaranteed to be complete, and is liable to be withdrawn.
Users who were relying on the previous undocumented format are advised
to upgrade their code to use the new format, which uses model frames
and model design matrices in the standard format used in version 1.4,
based on \code{\link{model.frame}} and \code{\link{model.matrix}}.
}
|
#' Correction Coefficient of Travel Speed in 2+1 Lane Road
#'
#' It correct travel speed in 2+1 lane road.
#' This function follows <Table 7-15> in KHCM(2013), p.188.
#' @param v Traffic volume in 2+1 lane road(pcphpl).
#' @keywords 2+1 lane road correction coefficient TDR total delay ratio
#' @export fs_pl_2lp1 Correction coefficient of travel speed in 2+1 lane road Section(f_pl)
#' @examples
#' fs_pl_2lp1(v = 1391)
#' fs_pl_2lp1(999)
fs_pl_2lp1 <- function(v = NULL){
if (v > 0 & v <= 100){f <- 1.025}
else if (v > 100 & v <= 200){f <- 1.034}
else if (v > 200 & v <= 300){f <- 1.042}
else if (v > 300 & v <= 400){f <- 1.050}
else if (v > 400 & v <= 500){f <- 1.057}
else if (v > 500 & v <= 600){f <- 1.063}
else if (v > 600 & v <= 700){f <- 1.069}
else if (v > 700 & v <= 800){f <- 1.074}
else if (v > 800 & v <= 900){f <- 1.078}
else if (v > 900 & v <= 1000){f <- 1.081}
else if (v > 1000 & v <= 1100){f <- 1.084}
else if (v > 1100 & v <= 1200){f <- 1.086}
else if (v > 1200 & v <= 1300){f <- 1.088}
else if (v > 1300 & v <= 1400){f <- 1.089}
else if (v > 1400 & v <= 1500){f <- 1.089}
else if (v > 1500 & v <= 1600){f <- 1.088}
else if (v > 1600 & v <= 1700){f <- 1.087}
else {f <- 'Error : [v] must be >= 0 and <= 1700(pcphpl). Please check that.'}
f
}
| /R/fs_pl_2lp1.R | no_license | regenesis90/KHCMinR | R | false | false | 1,304 | r | #' Correction Coefficient of Travel Speed in 2+1 Lane Road
#'
#' It correct travel speed in 2+1 lane road.
#' This function follows <Table 7-15> in KHCM(2013), p.188.
#' @param v Traffic volume in 2+1 lane road(pcphpl).
#' @keywords 2+1 lane road correction coefficient TDR total delay ratio
#' @export fs_pl_2lp1 Correction coefficient of travel speed in 2+1 lane road Section(f_pl)
#' @examples
#' fs_pl_2lp1(v = 1391)
#' fs_pl_2lp1(999)
fs_pl_2lp1 <- function(v = NULL){
if (v > 0 & v <= 100){f <- 1.025}
else if (v > 100 & v <= 200){f <- 1.034}
else if (v > 200 & v <= 300){f <- 1.042}
else if (v > 300 & v <= 400){f <- 1.050}
else if (v > 400 & v <= 500){f <- 1.057}
else if (v > 500 & v <= 600){f <- 1.063}
else if (v > 600 & v <= 700){f <- 1.069}
else if (v > 700 & v <= 800){f <- 1.074}
else if (v > 800 & v <= 900){f <- 1.078}
else if (v > 900 & v <= 1000){f <- 1.081}
else if (v > 1000 & v <= 1100){f <- 1.084}
else if (v > 1100 & v <= 1200){f <- 1.086}
else if (v > 1200 & v <= 1300){f <- 1.088}
else if (v > 1300 & v <= 1400){f <- 1.089}
else if (v > 1400 & v <= 1500){f <- 1.089}
else if (v > 1500 & v <= 1600){f <- 1.088}
else if (v > 1600 & v <= 1700){f <- 1.087}
else {f <- 'Error : [v] must be >= 0 and <= 1700(pcphpl). Please check that.'}
f
}
|
# discretises a list of vectors
# for each column we get a list of (discretised and breaks)
discretise <- function(
inp=NULL, # an input list with cols 'A', 'B' etc.
levels=5, # number of discrete levels
method='mltools'
){
whoami=paste0(match.call()[[1]],'()',collapse='')
cnames = names(inp)
ncnames = length(cnames)
ret=vector("list", ncnames)
names(ret) <- cnames
for(acolname in cnames){
ret[[acolname]] = discretise_vector(
x=inp[[acolname]],
levels=levels,
method=method
)
}
return(ret)
}
# discretises a single vector
# returned is a list of (discretised and breaks)
discretise_vector <- function(
x=NULL, # an input vector
levels=5, # number of discrete levels
method='mltools'
){
whoami=paste0(match.call()[[1]],'()',collapse='')
if( method == 'arules' ){
library(arules)
thecuts=arules::discretize(
x=x,
method="frequency",
categories=levels,
onlycuts = T
)
if( is.null(thecuts) ){
cat(whoami, " : call to arules::discretize() has failed.\n")
return(NULL)
}
} else if( method == 'mltools' ){
library(mltools)
library(stringr)
dummy <- mltools::bin_data(
x=x,
bins=levels,
binType='quantile',
returnDT=TRUE
)
if( is.null(dummy) ){
cat(whoami, " : call to mltools::bin_data() has failed.\n")
return(NULL)
}
badR = levels(dummy$Bin) # these are text levels!!!!
thecuts=c()
for(alevel in badR){
regres <- str_match(alevel,"[\\[\\(](.*?),\\s*(.*?)[\\]\\)]$")
thecuts=append(thecuts, as.numeric(regres[2]))
}
thecuts=append(thecuts, as.numeric(regres[3]))
} else {
cat(whoami, " : method '", method, "' is not recognised.\n")
return(NULL)
}
# extract unique breaks!
unique_cuts = c()
for(aval in thecuts){
if( ! (aval %in% unique_cuts) ){
unique_cuts=append(unique_cuts, aval)
}
}
# NA's are introduced at the boundaries so extends a bit the breaks
unique_cuts[1] = unique_cuts[1]-0.1
unique_cuts[length(unique_cuts)] = unique_cuts[length(unique_cuts)]+0.1
discre_data <- as.numeric(cut(x, breaks=unique_cuts))
ret=list()
ret[['discretised']] = discre_data
ret[['breaks']] = unique_cuts
ret[['frequencies']] = table(cut(x,unique_cuts, include.lowest=TRUE))
return(ret)
}
histo <- function(
inp=NULL, # input list
unique_vals=NULL # optional unique_vals list
){
whoami=paste0(match.call()[[1]],'()',collapse='')
cnames = names(inp)
ncnames = length(cnames)
ret=vector("list", ncnames)
names(ret) <- cnames
for(acolname in cnames){
ret[[acolname]] = histo_vector(
x=inp[[acolname]],
breaks=unique_vals[['histograms']][[acolname]]$breaks
)
}
return(ret)
}
histo_vector <- function(
x=NULL, # input vector
breaks=NULL # optional breaks
){
whoami=paste0(match.call()[[1]],'()',collapse='')
ahist = NULL
if( is.null(breaks) ){
ahist <- hist(x=x, plot=F)
} else {
ahist <- hist(x=x,breaks=breaks, plot=F)
}
ncounts = length(ahist$counts)
ret=matrix(ncol=2,nrow=ncounts)
colnames(ret) <- c('value', 'count')
for(i in 1:ncounts){
ret[i, 'value'] = ahist$breaks[i]+(ahist$breaks[i+1]-ahist$breaks[i])/2
ret[i, 'count'] = ahist$counts[i]
}
return(ret)
}
unique_values <- function(
inp=NULL
){
whoami=paste0(match.call()[[1]],'()',collapse='')
cnames = names(inp)
ncnames = length(cnames)
unix=vector("list", ncnames)
names(unix) <- cnames
num_unix=matrix(nrow=1,ncol=ncnames)
histos=vector("list", ncnames)
colnames(num_unix) <- cnames
for(acolname in cnames){
unix[[acolname]] = unique(inp[[acolname]])
num_unix[,acolname] = length(unix[[acolname]])
histos[[acolname]] = hist(inp[[acolname]], breaks=unix[[acolname]], plot=F)
}
ret=list()
ret[['unique_values']] = unix
ret[['num_unique_values']] = num_unix
ret[['histograms']] = histos
return(ret)
}
unique_values_vector <- function(
x=NULL
){
whoami=paste0(match.call()[[1]],'()',collapse='')
ret=list()
ret[['unique_values']] = unique(x)
ret[['num_unique_values']] = length(ret[['unique_values']])
ret[['histograms']] = hist(x=x, breaks=ret[['unique_values']], plot=F)
return(ret)
}
| /lib/DATA.R | no_license | hadjiprocopis/DUTC | R | false | false | 4,032 | r | # discretises a list of vectors
# for each column we get a list of (discretised and breaks)
discretise <- function(
inp=NULL, # an input list with cols 'A', 'B' etc.
levels=5, # number of discrete levels
method='mltools'
){
whoami=paste0(match.call()[[1]],'()',collapse='')
cnames = names(inp)
ncnames = length(cnames)
ret=vector("list", ncnames)
names(ret) <- cnames
for(acolname in cnames){
ret[[acolname]] = discretise_vector(
x=inp[[acolname]],
levels=levels,
method=method
)
}
return(ret)
}
# discretises a single vector
# returned is a list of (discretised and breaks)
discretise_vector <- function(
x=NULL, # an input vector
levels=5, # number of discrete levels
method='mltools'
){
whoami=paste0(match.call()[[1]],'()',collapse='')
if( method == 'arules' ){
library(arules)
thecuts=arules::discretize(
x=x,
method="frequency",
categories=levels,
onlycuts = T
)
if( is.null(thecuts) ){
cat(whoami, " : call to arules::discretize() has failed.\n")
return(NULL)
}
} else if( method == 'mltools' ){
library(mltools)
library(stringr)
dummy <- mltools::bin_data(
x=x,
bins=levels,
binType='quantile',
returnDT=TRUE
)
if( is.null(dummy) ){
cat(whoami, " : call to mltools::bin_data() has failed.\n")
return(NULL)
}
badR = levels(dummy$Bin) # these are text levels!!!!
thecuts=c()
for(alevel in badR){
regres <- str_match(alevel,"[\\[\\(](.*?),\\s*(.*?)[\\]\\)]$")
thecuts=append(thecuts, as.numeric(regres[2]))
}
thecuts=append(thecuts, as.numeric(regres[3]))
} else {
cat(whoami, " : method '", method, "' is not recognised.\n")
return(NULL)
}
# extract unique breaks!
unique_cuts = c()
for(aval in thecuts){
if( ! (aval %in% unique_cuts) ){
unique_cuts=append(unique_cuts, aval)
}
}
# NA's are introduced at the boundaries so extends a bit the breaks
unique_cuts[1] = unique_cuts[1]-0.1
unique_cuts[length(unique_cuts)] = unique_cuts[length(unique_cuts)]+0.1
discre_data <- as.numeric(cut(x, breaks=unique_cuts))
ret=list()
ret[['discretised']] = discre_data
ret[['breaks']] = unique_cuts
ret[['frequencies']] = table(cut(x,unique_cuts, include.lowest=TRUE))
return(ret)
}
histo <- function(
inp=NULL, # input list
unique_vals=NULL # optional unique_vals list
){
whoami=paste0(match.call()[[1]],'()',collapse='')
cnames = names(inp)
ncnames = length(cnames)
ret=vector("list", ncnames)
names(ret) <- cnames
for(acolname in cnames){
ret[[acolname]] = histo_vector(
x=inp[[acolname]],
breaks=unique_vals[['histograms']][[acolname]]$breaks
)
}
return(ret)
}
histo_vector <- function(
x=NULL, # input vector
breaks=NULL # optional breaks
){
whoami=paste0(match.call()[[1]],'()',collapse='')
ahist = NULL
if( is.null(breaks) ){
ahist <- hist(x=x, plot=F)
} else {
ahist <- hist(x=x,breaks=breaks, plot=F)
}
ncounts = length(ahist$counts)
ret=matrix(ncol=2,nrow=ncounts)
colnames(ret) <- c('value', 'count')
for(i in 1:ncounts){
ret[i, 'value'] = ahist$breaks[i]+(ahist$breaks[i+1]-ahist$breaks[i])/2
ret[i, 'count'] = ahist$counts[i]
}
return(ret)
}
unique_values <- function(
inp=NULL
){
whoami=paste0(match.call()[[1]],'()',collapse='')
cnames = names(inp)
ncnames = length(cnames)
unix=vector("list", ncnames)
names(unix) <- cnames
num_unix=matrix(nrow=1,ncol=ncnames)
histos=vector("list", ncnames)
colnames(num_unix) <- cnames
for(acolname in cnames){
unix[[acolname]] = unique(inp[[acolname]])
num_unix[,acolname] = length(unix[[acolname]])
histos[[acolname]] = hist(inp[[acolname]], breaks=unix[[acolname]], plot=F)
}
ret=list()
ret[['unique_values']] = unix
ret[['num_unique_values']] = num_unix
ret[['histograms']] = histos
return(ret)
}
unique_values_vector <- function(
x=NULL
){
whoami=paste0(match.call()[[1]],'()',collapse='')
ret=list()
ret[['unique_values']] = unique(x)
ret[['num_unique_values']] = length(ret[['unique_values']])
ret[['histograms']] = hist(x=x, breaks=ret[['unique_values']], plot=F)
return(ret)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Collection of Functions.R
\name{B_T_with_Qua_u_fhome}
\alias{B_T_with_Qua_u_fhome}
\title{Bradely-Terry Model Weighted Likelihood function with fixed home parameter, Lagrangian and Quadratic Penalty}
\usage{
B_T_with_Qua_u_fhome(df, ability, i0, theta, n, v, uij, u, home)
}
\description{
Bradely-Terry Model Weighted Likelihood function with fixed home parameter, Lagrangian and Quadratic Penalty
}
| /man/B_T_with_Qua_u_fhome.Rd | no_license | heilokchow/MWLE-Lasso | R | false | true | 478 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Collection of Functions.R
\name{B_T_with_Qua_u_fhome}
\alias{B_T_with_Qua_u_fhome}
\title{Bradely-Terry Model Weighted Likelihood function with fixed home parameter, Lagrangian and Quadratic Penalty}
\usage{
B_T_with_Qua_u_fhome(df, ability, i0, theta, n, v, uij, u, home)
}
\description{
Bradely-Terry Model Weighted Likelihood function with fixed home parameter, Lagrangian and Quadratic Penalty
}
|
##############################################################
### Simulating Moris'chance of beating the record of Babe Ruth
############################################################## | /codeSnippets/record-breaker-Ruth.R | no_license | Amitabh-G/rUtility | R | false | false | 188 | r | ##############################################################
### Simulating Moris'chance of beating the record of Babe Ruth
############################################################## |
# Computes the standard normal quantile function of the vector x,
# 0<x<1.
Phiinv <- function(x) {
val <- sqrt(2)*erfinv(2*x-1)
return(val)
}
| /Tรฉmata/09 Tobit model/R/Support/Phiinv.R | no_license | JanMelicharik/baan_python | R | false | false | 158 | r | # Computes the standard normal quantile function of the vector x,
# 0<x<1.
Phiinv <- function(x) {
val <- sqrt(2)*erfinv(2*x-1)
return(val)
}
|
\name{apprPower}
\alias{apprPower}
\title{ Approximate power (any rejection!) for many-to-one comparison of binomial proportions }
\description{
Approximative power to reject the hypothesis that all of the k differences
of proportions of treatment groups vs. control group are zero, i.e.:
probability to reject any H0[i]: p[i]-p[0] = 0, For a given setting of n[i], and p[i]
assumed under the alternative.
}
\usage{
apprPower(n, pH1, alpha = 0.05, alternative = "greater", method = "Add4")
}
\arguments{
\item{n}{ vector of integers specifying the number of observations in each group, where the first value is taken as sample size of control group}
\item{pH1}{ numeric vector with values between 0 and 1, specifying the proportions of success under the alternative hypothesis, should have the same length as n }
\item{alpha}{ pre-specified type-I-error }
\item{alternative}{ character string defining the alternative hypothesis, take care, that it fits to the parameters settings specified in pH1 }
\item{method}{ character sring defining the confidence interval method to be used, one of "Add4", "Add2", "Wald" }
}
\details{
This function uses approximative calculation of any-pair-power of a maximum test as described in Bretz and Hothorn (2002) for a
Wald test of multiple contrasts of binary data. Differing from Bretz and Hothorn (2002), unpooled variance estimators are used in the
present function. In case of "Add4" and "Add2"-method, the Wald expectation and variance are replaced by that of add-4 and add-2.
Since the approximate calculation assumes normality, this function can give misleading results, if sample size is small and/or proportions
of success are extreme.
The present function only calcualtes power for the test adjusting via the multivariate-normal-distribution.
For Bonferroni-adjusted or unadjusted tests, one can make use of well-known formulas for power and sample size for binary data.
The use of the function simPower in this package will result in power estimation closer to the true performance of the methods but is less convenient.
}
\value{
a single numeric value: the approximate any-pair power
}
\references{Bretz,F and Hothorn, LA (2002): Detecting dose-response using contrasts: asymptotic power and sample size determination for binomial data.
Statistics in Medicine 21, 3325-3335. }
\author{ Frank Schaarschmidt }
\note{
The results of this functions are roughly checked by comparison with results of power simualtion, which indicate that the approximations are reasonable for
at least moderate n and not too extreme proportions.
The performance of a corresponding test using the add-4 or add-2 adjustment is not described.
}
\seealso{ simPower }
\examples{
# Recalculate the power of the Dunnett-contrast
# for the first setting in Bretz and Hothorn (2002, Table III),
# using a balanced design and the allocation rule n0/ni=sqrt(k)
# of Dunnett(1955), desiring a power of 80 percent.
# Note that differing from Bretz and Hothorn (2002)
# in the present function unpooled variance estimators
# are used, what might lead to different results.
apprPower(n=c(196, 196, 196, 196, 196),
pH1=c(0.45, 0.45, 0.5, 0.5, 0.6),
alpha=0.05, alternative="greater", method="Wald")
apprPower(n=c(294, 147, 147, 147, 147 ),
pH1=c(0.45, 0.45, 0.5, 0.5, 0.6),
alpha=0.05, alternative="greater", method="Wald")
}
\keyword{ htest } | /man/apprPower.rd | no_license | cran/binMto | R | false | false | 3,462 | rd | \name{apprPower}
\alias{apprPower}
\title{ Approximate power (any rejection!) for many-to-one comparison of binomial proportions }
\description{
Approximative power to reject the hypothesis that all of the k differences
of proportions of treatment groups vs. control group are zero, i.e.:
probability to reject any H0[i]: p[i]-p[0] = 0, For a given setting of n[i], and p[i]
assumed under the alternative.
}
\usage{
apprPower(n, pH1, alpha = 0.05, alternative = "greater", method = "Add4")
}
\arguments{
\item{n}{ vector of integers specifying the number of observations in each group, where the first value is taken as sample size of control group}
\item{pH1}{ numeric vector with values between 0 and 1, specifying the proportions of success under the alternative hypothesis, should have the same length as n }
\item{alpha}{ pre-specified type-I-error }
\item{alternative}{ character string defining the alternative hypothesis, take care, that it fits to the parameters settings specified in pH1 }
\item{method}{ character sring defining the confidence interval method to be used, one of "Add4", "Add2", "Wald" }
}
\details{
This function uses approximative calculation of any-pair-power of a maximum test as described in Bretz and Hothorn (2002) for a
Wald test of multiple contrasts of binary data. Differing from Bretz and Hothorn (2002), unpooled variance estimators are used in the
present function. In case of "Add4" and "Add2"-method, the Wald expectation and variance are replaced by that of add-4 and add-2.
Since the approximate calculation assumes normality, this function can give misleading results, if sample size is small and/or proportions
of success are extreme.
The present function only calcualtes power for the test adjusting via the multivariate-normal-distribution.
For Bonferroni-adjusted or unadjusted tests, one can make use of well-known formulas for power and sample size for binary data.
The use of the function simPower in this package will result in power estimation closer to the true performance of the methods but is less convenient.
}
\value{
a single numeric value: the approximate any-pair power
}
\references{Bretz,F and Hothorn, LA (2002): Detecting dose-response using contrasts: asymptotic power and sample size determination for binomial data.
Statistics in Medicine 21, 3325-3335. }
\author{ Frank Schaarschmidt }
\note{
The results of this functions are roughly checked by comparison with results of power simualtion, which indicate that the approximations are reasonable for
at least moderate n and not too extreme proportions.
The performance of a corresponding test using the add-4 or add-2 adjustment is not described.
}
\seealso{ simPower }
\examples{
# Recalculate the power of the Dunnett-contrast
# for the first setting in Bretz and Hothorn (2002, Table III),
# using a balanced design and the allocation rule n0/ni=sqrt(k)
# of Dunnett(1955), desiring a power of 80 percent.
# Note that differing from Bretz and Hothorn (2002)
# in the present function unpooled variance estimators
# are used, what might lead to different results.
apprPower(n=c(196, 196, 196, 196, 196),
pH1=c(0.45, 0.45, 0.5, 0.5, 0.6),
alpha=0.05, alternative="greater", method="Wald")
apprPower(n=c(294, 147, 147, 147, 147 ),
pH1=c(0.45, 0.45, 0.5, 0.5, 0.6),
alpha=0.05, alternative="greater", method="Wald")
}
\keyword{ htest } |
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 4.94661240579261e+173, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613102680-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 343 | r | testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 4.94661240579261e+173, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
ResizeEtcDialog <- function() {
initializeDialog(title=gettextRcmdr("Resize Panels"))
resizeFrame <- tkframe(top)
c.listVar <- tclVar("")
condlevelsNameVar <- tclVar("")
x.sameVar <- tclVar("")
y.sameVar <- tclVar("")
layoutVar <- tclVar("")
strip.valuesVar <- tclVar("")
strip.left.valuesVar <- tclVar("")
strip.parVar <- tclVar("")
strip.left.parVar <- tclVar("")
resize.heightVar <- tclVar("")
resize.widthVar <- tclVar("")
mainVar <- tclVar("")
mainMiddleVar <- tclVar("")
c.listEntry <- tkentry(resizeFrame, width="48", textvariable=c.listVar)
condlevelsNameEntry <- tkentry(resizeFrame, width="48", textvariable=condlevelsNameVar)
x.sameEntry <- tkentry(resizeFrame, width="48", textvariable=x.sameVar)
y.sameEntry <- tkentry(resizeFrame, width="48", textvariable=y.sameVar)
layoutEntry <- tkentry(resizeFrame, width="48", textvariable=layoutVar)
strip.valuesEntry <- tkentry(resizeFrame, width="48", textvariable=strip.valuesVar)
strip.left.valuesEntry <- tkentry(resizeFrame, width="48", textvariable=strip.left.valuesVar)
strip.parEntry <- tkentry(resizeFrame, width="48", textvariable=strip.parVar)
strip.left.parEntry <- tkentry(resizeFrame, width="48", textvariable=strip.left.parVar)
resize.heightEntry <- tkentry(resizeFrame, width="48", textvariable=resize.heightVar)
resize.widthEntry <- tkentry(resizeFrame, width="48", textvariable=resize.widthVar)
mainEntry <- tkentry(resizeFrame, width="48", textvariable=mainVar)
mainMiddleEntry <- tkentry(resizeFrame, width="48", textvariable=mainMiddleVar)
onOK <- function() {
#on.exit(recover())
c.listValue <- tclvalue(c.listVar)
condlevelsNameValue <- tclvalue(condlevelsNameVar)
x.sameValue <- tclvalue(x.sameVar)
y.sameValue <- tclvalue(y.sameVar)
layoutValue <- tclvalue(layoutVar)
strip.valuesValue <- tclvalue(strip.valuesVar)
strip.left.valuesValue <- tclvalue(strip.left.valuesVar)
strip.parValue <- tclvalue(strip.parVar)
strip.left.parValue <- tclvalue(strip.left.parVar)
resize.heightValue <- tclvalue(resize.heightVar)
resize.widthValue <- tclvalue(resize.widthVar)
mainValue <- tclvalue(mainVar)
mainMiddleValue <- tclvalue(mainMiddleVar)
closeDialog()
if (nchar(c.listValue) == 0) {
errorCondition(recall=ResizeEtcDialog,
message=gettextRcmdr("c.list must be specified."))
return()
}
##command.xmiddle <- "x.middle <- diff(current.panel.limits()$xlim)/2"
##doItAndPrint(command.xmiddle)
if (nchar(mainMiddleValue)==0) mainMiddleValue <- ".5"
command <- paste("ResizeEtc(",
paste( "c.list=", c.listValue, sep=""),
if (nchar(condlevelsNameValue ) != 0) paste(", condlevelsName='", condlevelsNameValue , "'", sep=""),
if (nchar(x.sameValue ) != 0) paste(", x.same=", x.sameValue , sep=""),
if (nchar(y.sameValue ) != 0) paste(", y.same=", y.sameValue , sep=""),
if (nchar(layoutValue ) != 0) paste(", layout=", layoutValue , sep=""),
if (nchar(strip.valuesValue ) != 0) paste(", strip.values=", strip.valuesValue , sep=""),
if (nchar(strip.left.valuesValue) != 0) paste(", strip.left.values=", strip.left.valuesValue, sep=""),
if (nchar(strip.parValue ) != 0) paste(", strip.par=", strip.parValue , sep=""),
if (nchar(strip.left.parValue ) != 0) paste(", strip.left.par=", strip.left.parValue , sep=""),
if (nchar(resize.heightValue ) != 0) paste(", resize.height=", resize.heightValue , sep=""),
if (nchar(resize.widthValue ) != 0) paste(", resize.width=", resize.widthValue , sep=""),
if (nchar(mainValue ) != 0) paste(", main='", mainValue , "'", sep=""),
if (nchar(mainMiddleValue ) != 0) paste(", main.middle=", mainMiddleValue , sep=""),
")", sep="")
doItAndPrint(command)
activateMenus()
tkfocus(CommanderWindow())
if (version$os == "mingw32") justDoIt("bringToTop()")
}
OKCancelHelp(helpSubject="ResizeEtcDialog")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("c.list:")), c.listEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("condlevelsName:")), condlevelsNameEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("x.same:")), x.sameEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("y.same:")), y.sameEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("layout:")), layoutEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("strip.values:")), strip.valuesEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("strip.left.values:")), strip.left.valuesEntry, sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("strip.par:")), strip.parEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("strip.left.par:")), strip.left.parEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("resize.height:")), resize.heightEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("resize.width:")), resize.widthEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("main:")), mainEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("mainMiddle:")), mainMiddleEntry , sticky="w")
tkgrid(resizeFrame, sticky="w")
tkgrid(buttonsFrame, columnspan=2, sticky="w")
dialogSuffix(rows=13, columns=2)
}
listAllTrellisObjects <- function (envir = .GlobalEnv, ...) {
objects <- ls(envir = envir, ...)
if (length(objects) == 0)
return(NULL)
objects[sapply(objects, function(.x) {
"trellis" %in% class(get(.x, envir = envir))
})]
}
## source("c:/HOME/rmh/HH-R.package/RcmdrPlugin.HH2/R/ResizeEtcDialog.R")
| /R/ResizeEtcDialog.R | no_license | cran/RcmdrPlugin.HH | R | false | false | 6,893 | r | ResizeEtcDialog <- function() {
initializeDialog(title=gettextRcmdr("Resize Panels"))
resizeFrame <- tkframe(top)
c.listVar <- tclVar("")
condlevelsNameVar <- tclVar("")
x.sameVar <- tclVar("")
y.sameVar <- tclVar("")
layoutVar <- tclVar("")
strip.valuesVar <- tclVar("")
strip.left.valuesVar <- tclVar("")
strip.parVar <- tclVar("")
strip.left.parVar <- tclVar("")
resize.heightVar <- tclVar("")
resize.widthVar <- tclVar("")
mainVar <- tclVar("")
mainMiddleVar <- tclVar("")
c.listEntry <- tkentry(resizeFrame, width="48", textvariable=c.listVar)
condlevelsNameEntry <- tkentry(resizeFrame, width="48", textvariable=condlevelsNameVar)
x.sameEntry <- tkentry(resizeFrame, width="48", textvariable=x.sameVar)
y.sameEntry <- tkentry(resizeFrame, width="48", textvariable=y.sameVar)
layoutEntry <- tkentry(resizeFrame, width="48", textvariable=layoutVar)
strip.valuesEntry <- tkentry(resizeFrame, width="48", textvariable=strip.valuesVar)
strip.left.valuesEntry <- tkentry(resizeFrame, width="48", textvariable=strip.left.valuesVar)
strip.parEntry <- tkentry(resizeFrame, width="48", textvariable=strip.parVar)
strip.left.parEntry <- tkentry(resizeFrame, width="48", textvariable=strip.left.parVar)
resize.heightEntry <- tkentry(resizeFrame, width="48", textvariable=resize.heightVar)
resize.widthEntry <- tkentry(resizeFrame, width="48", textvariable=resize.widthVar)
mainEntry <- tkentry(resizeFrame, width="48", textvariable=mainVar)
mainMiddleEntry <- tkentry(resizeFrame, width="48", textvariable=mainMiddleVar)
onOK <- function() {
#on.exit(recover())
c.listValue <- tclvalue(c.listVar)
condlevelsNameValue <- tclvalue(condlevelsNameVar)
x.sameValue <- tclvalue(x.sameVar)
y.sameValue <- tclvalue(y.sameVar)
layoutValue <- tclvalue(layoutVar)
strip.valuesValue <- tclvalue(strip.valuesVar)
strip.left.valuesValue <- tclvalue(strip.left.valuesVar)
strip.parValue <- tclvalue(strip.parVar)
strip.left.parValue <- tclvalue(strip.left.parVar)
resize.heightValue <- tclvalue(resize.heightVar)
resize.widthValue <- tclvalue(resize.widthVar)
mainValue <- tclvalue(mainVar)
mainMiddleValue <- tclvalue(mainMiddleVar)
closeDialog()
if (nchar(c.listValue) == 0) {
errorCondition(recall=ResizeEtcDialog,
message=gettextRcmdr("c.list must be specified."))
return()
}
##command.xmiddle <- "x.middle <- diff(current.panel.limits()$xlim)/2"
##doItAndPrint(command.xmiddle)
if (nchar(mainMiddleValue)==0) mainMiddleValue <- ".5"
command <- paste("ResizeEtc(",
paste( "c.list=", c.listValue, sep=""),
if (nchar(condlevelsNameValue ) != 0) paste(", condlevelsName='", condlevelsNameValue , "'", sep=""),
if (nchar(x.sameValue ) != 0) paste(", x.same=", x.sameValue , sep=""),
if (nchar(y.sameValue ) != 0) paste(", y.same=", y.sameValue , sep=""),
if (nchar(layoutValue ) != 0) paste(", layout=", layoutValue , sep=""),
if (nchar(strip.valuesValue ) != 0) paste(", strip.values=", strip.valuesValue , sep=""),
if (nchar(strip.left.valuesValue) != 0) paste(", strip.left.values=", strip.left.valuesValue, sep=""),
if (nchar(strip.parValue ) != 0) paste(", strip.par=", strip.parValue , sep=""),
if (nchar(strip.left.parValue ) != 0) paste(", strip.left.par=", strip.left.parValue , sep=""),
if (nchar(resize.heightValue ) != 0) paste(", resize.height=", resize.heightValue , sep=""),
if (nchar(resize.widthValue ) != 0) paste(", resize.width=", resize.widthValue , sep=""),
if (nchar(mainValue ) != 0) paste(", main='", mainValue , "'", sep=""),
if (nchar(mainMiddleValue ) != 0) paste(", main.middle=", mainMiddleValue , sep=""),
")", sep="")
doItAndPrint(command)
activateMenus()
tkfocus(CommanderWindow())
if (version$os == "mingw32") justDoIt("bringToTop()")
}
OKCancelHelp(helpSubject="ResizeEtcDialog")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("c.list:")), c.listEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("condlevelsName:")), condlevelsNameEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("x.same:")), x.sameEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("y.same:")), y.sameEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("layout:")), layoutEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("strip.values:")), strip.valuesEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("strip.left.values:")), strip.left.valuesEntry, sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("strip.par:")), strip.parEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("strip.left.par:")), strip.left.parEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("resize.height:")), resize.heightEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("resize.width:")), resize.widthEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("main:")), mainEntry , sticky="w")
tkgrid(tklabel(resizeFrame, text=gettextRcmdr("mainMiddle:")), mainMiddleEntry , sticky="w")
tkgrid(resizeFrame, sticky="w")
tkgrid(buttonsFrame, columnspan=2, sticky="w")
dialogSuffix(rows=13, columns=2)
}
listAllTrellisObjects <- function (envir = .GlobalEnv, ...) {
objects <- ls(envir = envir, ...)
if (length(objects) == 0)
return(NULL)
objects[sapply(objects, function(.x) {
"trellis" %in% class(get(.x, envir = envir))
})]
}
## source("c:/HOME/rmh/HH-R.package/RcmdrPlugin.HH2/R/ResizeEtcDialog.R")
|
## These two functions cache the inverse of a matrix.
## Function 1 - makeCacheMatrix creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Function 2 - cacheSolve computes the inverse of the special "matrix" returned by makeCacheMatrix. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setInverse(inv)
inv
}
| /cachematrix.R | no_license | terryg40/ProgrammingAssignment2 | R | false | false | 985 | r | ## These two functions cache the inverse of a matrix.
## Function 1 - makeCacheMatrix creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Function 2 - cacheSolve computes the inverse of the special "matrix" returned by makeCacheMatrix. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setInverse(inv)
inv
}
|
# Generic
makeAinv <- function(pedigree, f = NULL, ggroups = NULL, fuzz = NULL, gOnTop = FALSE, det = FALSE, ...){
if(is(fuzz, "matrix") | is(fuzz, "Matrix")) class(fuzz) <- "fuzzy"
UseMethod("makeAinv", fuzz)
}
###############################################################################
# Methods:
makeAinv.default <- function(pedigree, f = NULL, ggroups = NULL, fuzz = NULL, gOnTop = FALSE, det = FALSE, ...){
if(is.null(ggroups)){
ptype <- "O"
renPed <- order(genAssign(pedigree), pedigree[, 2], pedigree[, 3], na.last = FALSE)
nPed <- numPed(pedigree[renPed, ])
groupRows <- NULL
nggroups <- 0
} else {
if(length(ggroups) == dim(pedigree)[1]){
stop("length(ggroups) should either be:\n == 1 and a numeric indicating the number of genetic groups (type 'A')\n == length(unique(ggroups)) and a character vector indicating the names of each unique genetic goup (type 'D')")
}
if(!is.null(fuzz)) stop("fuzzy genetic groups not yet implemented: fuzz must be NULL")
zerNA <- union(which(pedigree[, 2] == "0"), which(pedigree[, 3] == "0"))
astNA <- union(which(pedigree[, 2] == "*"), which(pedigree[, 3] == "*"))
naNA <- union(which(is.na(pedigree[, 2])), which(is.na(pedigree[, 3])))
naPed <- union(union(zerNA, astNA), naNA)
#### pedigree type "D" ####
if(!is.numeric(ggroups) && length(ggroups) == length(unique(ggroups))){
ptype <- "D"
if(is.null(fuzz) && length(naPed) > 0){
stop("When supplying a vector of unique genetic group names in the 'ggroups' argument, all individuals in the pedigree must have a genetic group when a parent is unknown (<NA>, '0' and '*' are considered unknown parents)")# or be identified as a phantom parent in 'fuzz'")
}
if(any(is.na(ggroups))) ggroups <- na.omit(ggroups)
pedalt <- data.frame(id = c(ggroups, as.character(pedigree[, 1])), dam = c(rep(NA, length(ggroups)), as.character(pedigree[, 2])), sire = c(rep(NA, length(ggroups)), as.character(pedigree[, 3])))
#TODO: write method for numPed to handle genetic group pedigree
## same genetic group for dam and sire won't throw warning about selfing
nPed <- suppressWarnings(numPed(pedalt)) # FIXME: remove suppressWarnings() throughout file
## return command below as attribute
groupRows <- nPed[which(nPed[, 2] == -998), 1]
}
#### pedigree type "A" ####
if(length(ggroups) == 1){
ptype <- "A"
if(length(naPed) == ggroups){
nPed <- suppressWarnings(numPed(pedigree))
groupRows <- naPed
} else {
stop("Only rows identifying genetic groups should have missing parents.\n All individuals in the pedigree must have a genetic group when a parent is unknown")# or be identified as a phantom parent in 'fuzz'")
}
}
nggroups <- length(groupRows)
renPed <- order(suppressWarnings(genAssign(nPed)), nPed[, 2], nPed[, 3])
nPed <- numPed(ronPed(nPed, renPed))
}
####
N <- nrow(nPed)
eN <- N - nggroups
dnmiss <- which(nPed[, 2] != -998)
snmiss <- which(nPed[, 3] != -998)
Tinv.row <- c(nPed[, 1][dnmiss], nPed[, 1][snmiss], 1:N)
Tinv.col <- c(nPed[, 2][dnmiss], nPed[, 3][snmiss], 1:N)
el.order <- order(Tinv.col + Tinv.row/(N + 1), decreasing = FALSE)
if(is.null(ggroups)){
sTinv <- sparseMatrix(i = as.integer(Tinv.row[el.order] - 1),
p = as.integer(c(match(1:N, Tinv.col[el.order]), length(el.order) + 1) - 1),
index1 = FALSE, dims = c(N, N), symmetric = FALSE,
dimnames = list(as.character(nPed[, 1]), NULL))
} else {
sTinv <- sparseMatrix(i = as.integer(Tinv.row[el.order] - 1),
p = as.integer(c(match(1:N, Tinv.col[el.order]), length(el.order) + 1) - 1),
index1 = FALSE, dims = c(N, N), symmetric = FALSE,
dimnames = list(as.character(nPed[, 1]), NULL))[-groupRows, ]
}
Ainv <- t(crossprod(sTinv)) # transpose gives lower triangle
# 1: Adds Ainv elements in same for loop as calculation of f
# 2: First checks to see if individual k has same dam and sire as k-1, if so then just assigns k-1's f
# 3: simplifies the calculation of the addition to the Ainv element (instead of alphai * 0.25 - defines alphai=alphai*0.25).
nPed[nPed == -998] <- N + 1
f <- c(rep(-1, nggroups), rep(0, eN), -1)
Cout <- .C("ainvml",
as.integer(nPed[, 2] - 1), #dam
as.integer(nPed[, 3] - 1), #sire
as.double(f), #f
as.double(rep(0, N)), #dii
as.integer(N), #n
as.integer(nggroups), #g
as.double(rep(0, length(Ainv@i))), #xA
as.integer(Ainv@i), #iA
as.integer(Ainv@p), #pA
as.integer(length(Ainv@i))) #nzmaxA
Ainv <- as(Ainv, "dsCMatrix")
Ainv@x <- Cout[[7]]
fsOrd <- as(as.integer(renPed), "pMatrix")
Ainv <- as(t(fsOrd) %*% Ainv %*% fsOrd, "dgCMatrix")
if(ptype == "D"){
Ainv@Dimnames <- list(as.character(pedalt[, 1]), NULL)
f <- Cout[[3]][t(fsOrd)@perm][-seq(nggroups)]
} else {
Ainv@Dimnames <- list(as.character(pedigree[, 1]), NULL)
f <- c(rep(0, nggroups), Cout[[3]][t(fsOrd)@perm][(nggroups+1):(nggroups + eN)])
}
if(!is.null(ggroups) && !gOnTop){
permute <- as(as.integer(c(seq(eN+1, N, 1), seq(eN))), "pMatrix")
Ainv <- t(permute) %*% Ainv %*% permute
}
if(det) logDet <- -1*determinant(Ainv, logarithm = TRUE)$modulus[1] else logDet <- NULL
return(list(Ainv = Ainv,
listAinv = sm2list(Ainv, rownames = rownames(Ainv), colnames = c("row", "column", "Ainv")),
f = f,
logDet = logDet))
}
###############################################################################
###############################################################################
makeAinv.fuzzy <- function(pedigree, f = NULL, ggroups = NULL, fuzz, gOnTop = FALSE, det = FALSE, ...){
if(!is.null(ggroups)){
stop("when 'fuzz' is non-NULL, 'ggroups' should not have any arguments (i.e., 'ggroups==NULL")
}
naPed2 <- which(pedigree[, 2] == "0")
naPed3 <- which(pedigree[, 3] == "0")
naPed2 <- union(naPed2, which(pedigree[, 2] == "*"))
naPed3 <- union(naPed3, which(pedigree[, 3] == "*"))
naPed2 <- union(naPed2, which(is.na(pedigree[, 2])))
naPed3 <- union(naPed3, which(is.na(pedigree[, 3])))
# checks on fuzzy classification matrix and pedigree consistency:
## 'fuzz' is a type of matrix
if(!is(fuzz, "matrix") && !is(fuzz, "Matrix")){
cat("'fuzz' of class", class(fuzz), "\n")
stop("class of 'fuzz' must be either 'matrix' or 'Matrix'")
}
## rows of 'fuzz' add up to 1
if(any(rowSums(fuzz) != 1)){
cat("rows:", which(rowSums(fuzz) != 1), "\ndo not equal 1\n")
stop("all rowSums(fuzz) must equal 1\n(check for possible rounding errors, e.g., 3*0.33333 != 1)")
}
## fuzz has dimnames
if(is.null(dimnames(fuzz)[[1]]) | is.null(dimnames(fuzz)[[2]])){
stop("'fuzz' must have row and column names")
}
## pedigree does not have genetic groups
if(any(colnames(fuzz) %in% pedigree[, 1])){
cat("colnames:", which(colnames(fuzz) %in% pedigree[, 1]), "\nare in 'pedigree'\n")
stop("colnames of 'fuzz' (genetic groups) must NOT be identities in the first column of 'pedigree'")
}
## pedigree has phantom parents in 'fuzz'
if(!all(rownames(fuzz) %in% pedigree[, 1])){
cat("rownames:", which(!rownames(fuzz) %in% pedigree[, 1]), "\nnot in 'pedigree'\n")
stop("rownames of 'fuzz' (phantom parents) must all be identities in the first column of 'pedigree'. See the `prepPed()` function to help prepare the pedigree")
}
## individuals can only have both parents missing in 'pedigree' or none
if(length(naPed2) != length(naPed3) | any(!naPed2 %in% naPed3)){
stop("Individuals must have either two or zero missing parents in 'pedigree'")
}
## IDs with missing parents (if passed above check, naPed2==naPed3) in 'pedigree' are phantom parents in 'fuzz'
if(!all(pedigree[naPed2, 1] %in% rownames(fuzz))){
cat("IDs for 'pedigree' rows:", naPed2[which(!pedigree[naPed2, 1] %in% rownames(fuzz))], "\nare not rownames in 'fuzz'\n")
stop("Individuals with missing parents (phantom individuals) must have a rowname in 'fuzz'")
}
# order of genetic groups in pedalt/A^-1 is same as column order of 'fuzz'
ggroups <- colnames(fuzz)
nggroups <- length(ggroups) # No. genetic groups
p <- nrow(fuzz) # No. phantom parents
eN <- nrow(pedigree) - p # No. observed IDs
N <- nggroups + eN # No. GGs + observed IDs
# order of phantom parents in pedalt is same as row order in 'fuzz'
phantomPars <- seq(p) + nggroups
groupRows <- seq(nggroups)
# order pedigree: generations, order phantom parents in fuzz, dam, & sire
## genetic groups first
renPed <- c(groupRows, nggroups + order(genAssign(pedigree), match(pedigree[, 1], rownames(fuzz), nomatch = p+1), pedigree[, 2], pedigree[, 3]))
pedalt <- data.frame(id = c(ggroups, as.character(pedigree[, 1])), dam = c(rep(NA, nggroups), as.character(pedigree[, 2])), sire = c(rep(NA, nggroups), as.character(pedigree[, 3])))[renPed, ]
nPed <- numPed(pedalt)
phantomless <- cbind(numPed(nPed[-phantomPars, ], check = FALSE), nPed[-phantomPars, -1])
if(!all(which(phantomless[, 4] == -998) == groupRows)){
stop("Something wicked happened with the dams in phantomless numeric pedigree:\n Contact package maintainer: <matthewwolak@gmail.com>\n or raise an issue on: <https://github.com/matthewwolak/nadiv/issues>")
}
if(!all(which(phantomless[, 5] == -998) == groupRows)){
stop("Something wicked happened with the sires in phantomless numeric pedigree:\n Contact package maintainer: <matthewwolak@gmail.com>\n or raise an issue on: <https://github.com/matthewwolak/nadiv/issues>")
}
groupFuzz <- Diagonal(x = 1, n = nggroups)
groupFuzz@Dimnames <- list(as.character(ggroups), as.character(ggroups))
fuzzmat <- rBind(groupFuzz, as(fuzz, "sparseMatrix"))
# predict non-zero elements of Astar
## make H from Quaas 1988:
## H = [-Pb Qb : Tinv]
## Astar = H' %*% D^-1 %*% H
### sTinv: n x n observed IDs (i.e., no GGs or phantom parent IDs)
dnmiss <- which(phantomless[, 2] != -998)
snmiss <- which(phantomless[, 3] != -998)
Tinv.row <- c(c(phantomless[, 1][dnmiss], phantomless[, 1][snmiss]) - nggroups, 1:eN)
Tinv.col <- c(c(phantomless[, 2][dnmiss], phantomless[, 3][snmiss]) - nggroups, 1:eN)
el.order <- order(Tinv.col + Tinv.row/(eN + 1), decreasing = FALSE)
sTinv <- sparseMatrix(i = as.integer(Tinv.row[el.order] - 1),
p = as.integer(c(match(1:eN, Tinv.col[el.order]), length(el.order) + 1) - 1),
index1 = FALSE, dims = c(eN, eN), symmetric = FALSE,
dimnames = list(as.character(phantomless[-groupRows, 1]), NULL))
### Pb: n x p version of sTinv
pdnmiss <- which(phantomless[, 4] %in% phantomPars)
psnmiss <- which(phantomless[, 5] %in% phantomPars)
Pb.row <- c(phantomless[, 1][pdnmiss], phantomless[, 1][psnmiss]) - nggroups
Pb.col <- c(phantomless[, 4][pdnmiss], phantomless[, 5][psnmiss]) - nggroups
el.order <- order(Pb.col + Pb.row/(p + 1), decreasing = FALSE)
sPb <- sparseMatrix(i = as.integer(Pb.row[el.order] - 1),
p = as.integer(c(match(1:p, Pb.col[el.order]), length(el.order) + 1) - 1),
index1 = FALSE, dims = c(eN, p), symmetric = FALSE,
dimnames = list(NULL, as.character(pedalt[phantomPars, 1])))
### Qb is the fuzzy classification matrix ('fuzz')
Qb <- as(fuzzmat[-groupRows, ][match(rownames(fuzzmat)[-groupRows], colnames(sPb)), ], "sparseMatrix")
sQb <- sparseMatrix(i = Qb@i,
p = Qb@p,
index1 = FALSE, dims = Qb@Dim, symmetric = FALSE,
dimnames = Qb@Dimnames)
## sH = [-(sPb %*% sQb) : sTinv]
sH <- cBind((sPb %*% sQb), sTinv)
Ainv <- t(crossprod(sH)) # transpose stores lower triangle
phantomless[phantomless == -998] <- N + 1
# for now, phantom parents cannot be inbred (just like genetic groups)
f <- c(rep(-1, nggroups), rep(0, eN), -1)
Cout <- .C("ainvfuzz",
as.integer(phantomless[, 2] - 1), #dam
as.integer(phantomless[, 3] - 1), #sire
as.integer(phantomless[, 4] - 1), #phantom dam
as.integer(phantomless[, 5] - 1), #phantom sire
as.double(f), #f
as.double(rep(0, N)), #dii
as.integer(N), #n
as.integer(nggroups), #g
as.double(fuzzmat@x), #xF
as.integer(fuzzmat@i), #iF
as.integer(fuzzmat@p), #pF
as.double(rep(0, length(Ainv@i))), #xA
as.integer(Ainv@i), #iA
as.integer(Ainv@p)) #pA
Ainv <- as(Ainv, "dsCMatrix")
Ainv@x <- Cout[[12]]
fsOrd1 <- as(as(as.integer(renPed), "pMatrix")[, -c(naPed2 + nggroups)], "CsparseMatrix")
fsOrd <- as(as(fsOrd1 %*% matrix(seq(N), nrow = N), "sparseMatrix")@x, "pMatrix")
Ainv <- as(t(fsOrd) %*% Ainv %*% fsOrd, "dgCMatrix")
Ainv@Dimnames <- list(as.character(pedalt[(t(fsOrd1) %*% matrix(seq(N+p), ncol = 1))@x, 1]), NULL)
f <- (fsOrd1 %*% Cout[[5]][-c(N+1)])@x[-groupRows]
if(!gOnTop){
permute <- as(as.integer(c(seq(eN+1, N, 1), seq(eN))), "pMatrix")
Ainv <- t(permute) %*% Ainv %*% permute
}
if(det) logDet <- -1*determinant(Ainv, logarithm = TRUE)$modulus[1] else logDet <- NULL
return(list(Ainv = Ainv,
listAinv = sm2list(Ainv, rownames = rownames(Ainv), colnames = c("row", "column", "Ainv")),
f = f,
logDet = logDet))
}
| /nadiv/R/makeAinv.R | no_license | ingted/R-Examples | R | false | false | 13,381 | r | # Generic
makeAinv <- function(pedigree, f = NULL, ggroups = NULL, fuzz = NULL, gOnTop = FALSE, det = FALSE, ...){
if(is(fuzz, "matrix") | is(fuzz, "Matrix")) class(fuzz) <- "fuzzy"
UseMethod("makeAinv", fuzz)
}
###############################################################################
# Methods:
makeAinv.default <- function(pedigree, f = NULL, ggroups = NULL, fuzz = NULL, gOnTop = FALSE, det = FALSE, ...){
if(is.null(ggroups)){
ptype <- "O"
renPed <- order(genAssign(pedigree), pedigree[, 2], pedigree[, 3], na.last = FALSE)
nPed <- numPed(pedigree[renPed, ])
groupRows <- NULL
nggroups <- 0
} else {
if(length(ggroups) == dim(pedigree)[1]){
stop("length(ggroups) should either be:\n == 1 and a numeric indicating the number of genetic groups (type 'A')\n == length(unique(ggroups)) and a character vector indicating the names of each unique genetic goup (type 'D')")
}
if(!is.null(fuzz)) stop("fuzzy genetic groups not yet implemented: fuzz must be NULL")
zerNA <- union(which(pedigree[, 2] == "0"), which(pedigree[, 3] == "0"))
astNA <- union(which(pedigree[, 2] == "*"), which(pedigree[, 3] == "*"))
naNA <- union(which(is.na(pedigree[, 2])), which(is.na(pedigree[, 3])))
naPed <- union(union(zerNA, astNA), naNA)
#### pedigree type "D" ####
if(!is.numeric(ggroups) && length(ggroups) == length(unique(ggroups))){
ptype <- "D"
if(is.null(fuzz) && length(naPed) > 0){
stop("When supplying a vector of unique genetic group names in the 'ggroups' argument, all individuals in the pedigree must have a genetic group when a parent is unknown (<NA>, '0' and '*' are considered unknown parents)")# or be identified as a phantom parent in 'fuzz'")
}
if(any(is.na(ggroups))) ggroups <- na.omit(ggroups)
pedalt <- data.frame(id = c(ggroups, as.character(pedigree[, 1])), dam = c(rep(NA, length(ggroups)), as.character(pedigree[, 2])), sire = c(rep(NA, length(ggroups)), as.character(pedigree[, 3])))
#TODO: write method for numPed to handle genetic group pedigree
## same genetic group for dam and sire won't throw warning about selfing
nPed <- suppressWarnings(numPed(pedalt)) # FIXME: remove suppressWarnings() throughout file
## return command below as attribute
groupRows <- nPed[which(nPed[, 2] == -998), 1]
}
#### pedigree type "A" ####
if(length(ggroups) == 1){
ptype <- "A"
if(length(naPed) == ggroups){
nPed <- suppressWarnings(numPed(pedigree))
groupRows <- naPed
} else {
stop("Only rows identifying genetic groups should have missing parents.\n All individuals in the pedigree must have a genetic group when a parent is unknown")# or be identified as a phantom parent in 'fuzz'")
}
}
nggroups <- length(groupRows)
renPed <- order(suppressWarnings(genAssign(nPed)), nPed[, 2], nPed[, 3])
nPed <- numPed(ronPed(nPed, renPed))
}
####
N <- nrow(nPed)
eN <- N - nggroups
dnmiss <- which(nPed[, 2] != -998)
snmiss <- which(nPed[, 3] != -998)
Tinv.row <- c(nPed[, 1][dnmiss], nPed[, 1][snmiss], 1:N)
Tinv.col <- c(nPed[, 2][dnmiss], nPed[, 3][snmiss], 1:N)
el.order <- order(Tinv.col + Tinv.row/(N + 1), decreasing = FALSE)
if(is.null(ggroups)){
sTinv <- sparseMatrix(i = as.integer(Tinv.row[el.order] - 1),
p = as.integer(c(match(1:N, Tinv.col[el.order]), length(el.order) + 1) - 1),
index1 = FALSE, dims = c(N, N), symmetric = FALSE,
dimnames = list(as.character(nPed[, 1]), NULL))
} else {
sTinv <- sparseMatrix(i = as.integer(Tinv.row[el.order] - 1),
p = as.integer(c(match(1:N, Tinv.col[el.order]), length(el.order) + 1) - 1),
index1 = FALSE, dims = c(N, N), symmetric = FALSE,
dimnames = list(as.character(nPed[, 1]), NULL))[-groupRows, ]
}
Ainv <- t(crossprod(sTinv)) # transpose gives lower triangle
# 1: Adds Ainv elements in same for loop as calculation of f
# 2: First checks to see if individual k has same dam and sire as k-1, if so then just assigns k-1's f
# 3: simplifies the calculation of the addition to the Ainv element (instead of alphai * 0.25 - defines alphai=alphai*0.25).
nPed[nPed == -998] <- N + 1
f <- c(rep(-1, nggroups), rep(0, eN), -1)
Cout <- .C("ainvml",
as.integer(nPed[, 2] - 1), #dam
as.integer(nPed[, 3] - 1), #sire
as.double(f), #f
as.double(rep(0, N)), #dii
as.integer(N), #n
as.integer(nggroups), #g
as.double(rep(0, length(Ainv@i))), #xA
as.integer(Ainv@i), #iA
as.integer(Ainv@p), #pA
as.integer(length(Ainv@i))) #nzmaxA
Ainv <- as(Ainv, "dsCMatrix")
Ainv@x <- Cout[[7]]
fsOrd <- as(as.integer(renPed), "pMatrix")
Ainv <- as(t(fsOrd) %*% Ainv %*% fsOrd, "dgCMatrix")
if(ptype == "D"){
Ainv@Dimnames <- list(as.character(pedalt[, 1]), NULL)
f <- Cout[[3]][t(fsOrd)@perm][-seq(nggroups)]
} else {
Ainv@Dimnames <- list(as.character(pedigree[, 1]), NULL)
f <- c(rep(0, nggroups), Cout[[3]][t(fsOrd)@perm][(nggroups+1):(nggroups + eN)])
}
if(!is.null(ggroups) && !gOnTop){
permute <- as(as.integer(c(seq(eN+1, N, 1), seq(eN))), "pMatrix")
Ainv <- t(permute) %*% Ainv %*% permute
}
if(det) logDet <- -1*determinant(Ainv, logarithm = TRUE)$modulus[1] else logDet <- NULL
return(list(Ainv = Ainv,
listAinv = sm2list(Ainv, rownames = rownames(Ainv), colnames = c("row", "column", "Ainv")),
f = f,
logDet = logDet))
}
###############################################################################
###############################################################################
makeAinv.fuzzy <- function(pedigree, f = NULL, ggroups = NULL, fuzz, gOnTop = FALSE, det = FALSE, ...){
if(!is.null(ggroups)){
stop("when 'fuzz' is non-NULL, 'ggroups' should not have any arguments (i.e., 'ggroups==NULL")
}
naPed2 <- which(pedigree[, 2] == "0")
naPed3 <- which(pedigree[, 3] == "0")
naPed2 <- union(naPed2, which(pedigree[, 2] == "*"))
naPed3 <- union(naPed3, which(pedigree[, 3] == "*"))
naPed2 <- union(naPed2, which(is.na(pedigree[, 2])))
naPed3 <- union(naPed3, which(is.na(pedigree[, 3])))
# checks on fuzzy classification matrix and pedigree consistency:
## 'fuzz' is a type of matrix
if(!is(fuzz, "matrix") && !is(fuzz, "Matrix")){
cat("'fuzz' of class", class(fuzz), "\n")
stop("class of 'fuzz' must be either 'matrix' or 'Matrix'")
}
## rows of 'fuzz' add up to 1
if(any(rowSums(fuzz) != 1)){
cat("rows:", which(rowSums(fuzz) != 1), "\ndo not equal 1\n")
stop("all rowSums(fuzz) must equal 1\n(check for possible rounding errors, e.g., 3*0.33333 != 1)")
}
## fuzz has dimnames
if(is.null(dimnames(fuzz)[[1]]) | is.null(dimnames(fuzz)[[2]])){
stop("'fuzz' must have row and column names")
}
## pedigree does not have genetic groups
if(any(colnames(fuzz) %in% pedigree[, 1])){
cat("colnames:", which(colnames(fuzz) %in% pedigree[, 1]), "\nare in 'pedigree'\n")
stop("colnames of 'fuzz' (genetic groups) must NOT be identities in the first column of 'pedigree'")
}
## pedigree has phantom parents in 'fuzz'
if(!all(rownames(fuzz) %in% pedigree[, 1])){
cat("rownames:", which(!rownames(fuzz) %in% pedigree[, 1]), "\nnot in 'pedigree'\n")
stop("rownames of 'fuzz' (phantom parents) must all be identities in the first column of 'pedigree'. See the `prepPed()` function to help prepare the pedigree")
}
## individuals can only have both parents missing in 'pedigree' or none
if(length(naPed2) != length(naPed3) | any(!naPed2 %in% naPed3)){
stop("Individuals must have either two or zero missing parents in 'pedigree'")
}
## IDs with missing parents (if passed above check, naPed2==naPed3) in 'pedigree' are phantom parents in 'fuzz'
if(!all(pedigree[naPed2, 1] %in% rownames(fuzz))){
cat("IDs for 'pedigree' rows:", naPed2[which(!pedigree[naPed2, 1] %in% rownames(fuzz))], "\nare not rownames in 'fuzz'\n")
stop("Individuals with missing parents (phantom individuals) must have a rowname in 'fuzz'")
}
# order of genetic groups in pedalt/A^-1 is same as column order of 'fuzz'
ggroups <- colnames(fuzz)
nggroups <- length(ggroups) # No. genetic groups
p <- nrow(fuzz) # No. phantom parents
eN <- nrow(pedigree) - p # No. observed IDs
N <- nggroups + eN # No. GGs + observed IDs
# order of phantom parents in pedalt is same as row order in 'fuzz'
phantomPars <- seq(p) + nggroups
groupRows <- seq(nggroups)
# order pedigree: generations, order phantom parents in fuzz, dam, & sire
## genetic groups first
renPed <- c(groupRows, nggroups + order(genAssign(pedigree), match(pedigree[, 1], rownames(fuzz), nomatch = p+1), pedigree[, 2], pedigree[, 3]))
pedalt <- data.frame(id = c(ggroups, as.character(pedigree[, 1])), dam = c(rep(NA, nggroups), as.character(pedigree[, 2])), sire = c(rep(NA, nggroups), as.character(pedigree[, 3])))[renPed, ]
nPed <- numPed(pedalt)
phantomless <- cbind(numPed(nPed[-phantomPars, ], check = FALSE), nPed[-phantomPars, -1])
if(!all(which(phantomless[, 4] == -998) == groupRows)){
stop("Something wicked happened with the dams in phantomless numeric pedigree:\n Contact package maintainer: <matthewwolak@gmail.com>\n or raise an issue on: <https://github.com/matthewwolak/nadiv/issues>")
}
if(!all(which(phantomless[, 5] == -998) == groupRows)){
stop("Something wicked happened with the sires in phantomless numeric pedigree:\n Contact package maintainer: <matthewwolak@gmail.com>\n or raise an issue on: <https://github.com/matthewwolak/nadiv/issues>")
}
groupFuzz <- Diagonal(x = 1, n = nggroups)
groupFuzz@Dimnames <- list(as.character(ggroups), as.character(ggroups))
fuzzmat <- rBind(groupFuzz, as(fuzz, "sparseMatrix"))
# predict non-zero elements of Astar
## make H from Quaas 1988:
## H = [-Pb Qb : Tinv]
## Astar = H' %*% D^-1 %*% H
### sTinv: n x n observed IDs (i.e., no GGs or phantom parent IDs)
dnmiss <- which(phantomless[, 2] != -998)
snmiss <- which(phantomless[, 3] != -998)
Tinv.row <- c(c(phantomless[, 1][dnmiss], phantomless[, 1][snmiss]) - nggroups, 1:eN)
Tinv.col <- c(c(phantomless[, 2][dnmiss], phantomless[, 3][snmiss]) - nggroups, 1:eN)
el.order <- order(Tinv.col + Tinv.row/(eN + 1), decreasing = FALSE)
sTinv <- sparseMatrix(i = as.integer(Tinv.row[el.order] - 1),
p = as.integer(c(match(1:eN, Tinv.col[el.order]), length(el.order) + 1) - 1),
index1 = FALSE, dims = c(eN, eN), symmetric = FALSE,
dimnames = list(as.character(phantomless[-groupRows, 1]), NULL))
### Pb: n x p version of sTinv
pdnmiss <- which(phantomless[, 4] %in% phantomPars)
psnmiss <- which(phantomless[, 5] %in% phantomPars)
Pb.row <- c(phantomless[, 1][pdnmiss], phantomless[, 1][psnmiss]) - nggroups
Pb.col <- c(phantomless[, 4][pdnmiss], phantomless[, 5][psnmiss]) - nggroups
el.order <- order(Pb.col + Pb.row/(p + 1), decreasing = FALSE)
sPb <- sparseMatrix(i = as.integer(Pb.row[el.order] - 1),
p = as.integer(c(match(1:p, Pb.col[el.order]), length(el.order) + 1) - 1),
index1 = FALSE, dims = c(eN, p), symmetric = FALSE,
dimnames = list(NULL, as.character(pedalt[phantomPars, 1])))
### Qb is the fuzzy classification matrix ('fuzz')
Qb <- as(fuzzmat[-groupRows, ][match(rownames(fuzzmat)[-groupRows], colnames(sPb)), ], "sparseMatrix")
sQb <- sparseMatrix(i = Qb@i,
p = Qb@p,
index1 = FALSE, dims = Qb@Dim, symmetric = FALSE,
dimnames = Qb@Dimnames)
## sH = [-(sPb %*% sQb) : sTinv]
sH <- cBind((sPb %*% sQb), sTinv)
Ainv <- t(crossprod(sH)) # transpose stores lower triangle
phantomless[phantomless == -998] <- N + 1
# for now, phantom parents cannot be inbred (just like genetic groups)
f <- c(rep(-1, nggroups), rep(0, eN), -1)
Cout <- .C("ainvfuzz",
as.integer(phantomless[, 2] - 1), #dam
as.integer(phantomless[, 3] - 1), #sire
as.integer(phantomless[, 4] - 1), #phantom dam
as.integer(phantomless[, 5] - 1), #phantom sire
as.double(f), #f
as.double(rep(0, N)), #dii
as.integer(N), #n
as.integer(nggroups), #g
as.double(fuzzmat@x), #xF
as.integer(fuzzmat@i), #iF
as.integer(fuzzmat@p), #pF
as.double(rep(0, length(Ainv@i))), #xA
as.integer(Ainv@i), #iA
as.integer(Ainv@p)) #pA
Ainv <- as(Ainv, "dsCMatrix")
Ainv@x <- Cout[[12]]
fsOrd1 <- as(as(as.integer(renPed), "pMatrix")[, -c(naPed2 + nggroups)], "CsparseMatrix")
fsOrd <- as(as(fsOrd1 %*% matrix(seq(N), nrow = N), "sparseMatrix")@x, "pMatrix")
Ainv <- as(t(fsOrd) %*% Ainv %*% fsOrd, "dgCMatrix")
Ainv@Dimnames <- list(as.character(pedalt[(t(fsOrd1) %*% matrix(seq(N+p), ncol = 1))@x, 1]), NULL)
f <- (fsOrd1 %*% Cout[[5]][-c(N+1)])@x[-groupRows]
if(!gOnTop){
permute <- as(as.integer(c(seq(eN+1, N, 1), seq(eN))), "pMatrix")
Ainv <- t(permute) %*% Ainv %*% permute
}
if(det) logDet <- -1*determinant(Ainv, logarithm = TRUE)$modulus[1] else logDet <- NULL
return(list(Ainv = Ainv,
listAinv = sm2list(Ainv, rownames = rownames(Ainv), colnames = c("row", "column", "Ainv")),
f = f,
logDet = logDet))
}
|
############################# DNN Model ##################################################
## Function DNN.test.training
## Parameters:
## list: data input
## epochs: number of epochs
## unitsi (i from 1 to 6): Number of neurons in each hidden layer
library(keras)
library(yardstick)
library(dplyr)
DNN.test.training<-function(list,epochs,units1,units2,units3,units4,units5,units6){
## Empty dataframes to save the results. According to the number of epochs
results.epochs.loss<-matrix(ncol = 10,nrow = epochs) #ncol number of folds, nrow = number of epochs
results.epochs.accuracy<-matrix(ncol = 10,nrow = epochs) #ncol number of folds, nrow = number of epochs
results.epochs.val_loss<-matrix(ncol = 10,nrow = epochs)
results.epochs.val_accuracy<-matrix(ncol = 10,nrow = epochs)
results.predictions<-list()
results.training<-list()
results.metrics<-setNames(data.frame(matrix(ncol = 7,nrow = 10)),c("Acurracy Test","Acurracy Training","AUC","PR AUC","Precision","Recall","F1"))
results.times<-setNames(data.frame(matrix(ncol = 3,nrow = 10)),c("start","end","end_time"))
for(i in 1:length(list)){
results.times[i,1]<-Sys.time()
### Prepare data ######
df<-list[[i]]
df$DEFAULT<-as.numeric(as.character(df$DEFAULT))
#Training
df.training<-df%>%filter(SET.IS == "TRAINING")%>%select(c(LIMIT_BAL:DEFAULT))
df.training.targer<-as.matrix(df.training%>%select(DEFAULT))
df.training<-df.training%>%select(LIMIT_BAL:PAY_AMT6)
df.training<-as.matrix(df.training)
dimnames(df.training)<-NULL
#Validation
df.validation<-df%>%filter(SET.IS == "TEST")%>%select(c(LIMIT_BAL:DEFAULT))
df.validation.targer<-as.matrix(df.validation%>%select(DEFAULT))
df.validation<-df.validation%>%select(LIMIT_BAL:PAY_AMT6)
df.validation<-as.matrix(df.validation)
dimnames(df.validation)<-NULL
### DNN Model ########
use_session_with_seed(1) # same seed
options(keras.view_metrics = FALSE) # show the loss and gain chart
model<-keras_model_sequential()%>%
layer_flatten(input_shape = dim(df.training)[2])%>% #input layer
layer_dense(units = units1, activation = "relu", use_bias = TRUE)%>% #hidden laye
layer_dense(units = units2, activation = "relu", use_bias = TRUE)%>% #hidden laye
layer_dense(units = units3, activation = "relu", use_bias = TRUE)%>% #hidden laye
layer_dense(units = units4, activation = "relu", use_bias = TRUE)%>% #hidden laye
layer_dense(units = units5, activation = "relu", use_bias = TRUE)%>% #hidden laye
layer_dense(units = units6, activation = "relu", use_bias = TRUE)%>% #hidden laye
layer_dense(1,activation = "sigmoid", use_bias = TRUE) #output layer
model %>%
compile(loss = "binary_crossentropy",optimizer = "adam",metrics = "accuracy")
history<-model %>%
fit(
df.training,
df.training.targer,
epochs = epochs,
batch_size = 32,
verbose = 0, # Print the epochs
validation_split = 0,
validation_data = list(df.validation, df.validation.targer))
results.epochs.loss[,i]<-history$metrics$loss # loss per epoch training
results.epochs.accuracy[,i]<-history$metrics$acc # accuracy per epoch training
results.epochs.val_loss[,i]<-history$metrics$val_loss # loss per epoch validation
results.epochs.val_accuracy[,i]<-history$metrics$val_acc #accuracy per epoch validation
predictions<-model%>%predict_classes(df.validation) # Predictions
predictions.prob<-model%>%predict_proba(df.validation) %>% as.vector() # Prob Predictions
predictions.training<-model%>%predict_classes(df.training) # Predictions Training
results.predictions[[i]]<-tibble::tibble(
Real = as.factor(df.validation.targer),
Estimate = as.factor(predictions),
Prob = predictions.prob)
results.training[[i]]<-tibble::tibble(
Real.training = as.factor(df.training.targer),
Estimate.training = as.factor(predictions.training))
options(yardstick.event_first = FALSE)
results.metrics[i,1]<-data.frame(results.predictions[[i]] %>% yardstick::metrics(Real, Estimate))[1,3] #accuracy test
results.metrics[i,2]<-data.frame(results.training[[i]] %>% yardstick::metrics(Real.training, Estimate.training))[1,3] #accuracy training
results.metrics[i,3]<-data.frame(results.predictions[[i]] %>% yardstick::roc_auc(Real, Prob))[,3] # ROC AUC
results.metrics[i,4]<-data.frame(results.predictions[[i]] %>% mutate(Estimate=as.numeric(as.character(Estimate)))%>%yardstick::pr_auc(Real,Estimate))[3] # PR AUC
results.metrics[i,5]<-data.frame(results.predictions[[i]] %>% yardstick::precision(Real, Estimate))[,3] #Precision
results.metrics[i,6]<-data.frame(results.predictions[[i]] %>% yardstick::recall(Real, Estimate))[,3] # Recall
results.metrics[i,7]<-data.frame(results.predictions[[i]] %>% yardstick::f_meas(Real, Estimate, beta = 1))[,3] # F1
results.times[i,2]<-Sys.time() #Time
results.times[i,3]<-(results.times[i,2]-results.times[i,1])/60 #Time
}
return(list(results.epochs.loss,results.epochs.accuracy,results.epochs.val_loss,results.epochs.val_accuracy,results.predictions,results.metrics,results.times))
}
############### 2 hidden layers ######################################
results.dnn.targetmean.2h.1000<-DNN.test.training(kfold.targetMean.scale,1000,23,23)
results.dnn.frequency.2h.1000<-DNN.test.training(kfold.frequencyEncoding.scale,1000,23,23)
results.dnn.onehot.2h.1000<-DNN.test.training(kfold.onehotEncoding.sale,1000,27,27)
############### 4 Hidden layers #######################################
results.dnn.targetmean.4h.1000<-DNN.test.training(kfold.targetMean.scale,1000,23,23,23,23)
results.dnn.frequency.4h.1000<-DNN.test.training(kfold.frequencyEncoding.scale,1000,23,23,23,23)
results.dnn.onehot.4h.1000<-DNN.test.training(kfold.onehotEncoding.sale,1000,27,27,27,27)
############### 6 Hidden layers #######################################
results.dnn.targetmean.6h.1800<-DNN.test.training(kfold.targetMean.scale,1800,23,23,23,23,23,23)
results.dnn.frequency.6h.1800<-DNN.test.training(kfold.frequencyEncoding.scale,1800,23,23,23,23,23,23)
results.dnn.onehot.6h.1800<-DNN.test.training(kfold.onehotEncoding.sale,1800,27,27,27,27,27,27)
| /DNN Model.R | no_license | oordenesg/deep_learning_research_project | R | false | false | 6,383 | r |
############################# DNN Model ##################################################
## Function DNN.test.training
## Parameters:
## list: data input
## epochs: number of epochs
## unitsi (i from 1 to 6): Number of neurons in each hidden layer
library(keras)
library(yardstick)
library(dplyr)
DNN.test.training<-function(list,epochs,units1,units2,units3,units4,units5,units6){
## Empty dataframes to save the results. According to the number of epochs
results.epochs.loss<-matrix(ncol = 10,nrow = epochs) #ncol number of folds, nrow = number of epochs
results.epochs.accuracy<-matrix(ncol = 10,nrow = epochs) #ncol number of folds, nrow = number of epochs
results.epochs.val_loss<-matrix(ncol = 10,nrow = epochs)
results.epochs.val_accuracy<-matrix(ncol = 10,nrow = epochs)
results.predictions<-list()
results.training<-list()
results.metrics<-setNames(data.frame(matrix(ncol = 7,nrow = 10)),c("Acurracy Test","Acurracy Training","AUC","PR AUC","Precision","Recall","F1"))
results.times<-setNames(data.frame(matrix(ncol = 3,nrow = 10)),c("start","end","end_time"))
for(i in 1:length(list)){
results.times[i,1]<-Sys.time()
### Prepare data ######
df<-list[[i]]
df$DEFAULT<-as.numeric(as.character(df$DEFAULT))
#Training
df.training<-df%>%filter(SET.IS == "TRAINING")%>%select(c(LIMIT_BAL:DEFAULT))
df.training.targer<-as.matrix(df.training%>%select(DEFAULT))
df.training<-df.training%>%select(LIMIT_BAL:PAY_AMT6)
df.training<-as.matrix(df.training)
dimnames(df.training)<-NULL
#Validation
df.validation<-df%>%filter(SET.IS == "TEST")%>%select(c(LIMIT_BAL:DEFAULT))
df.validation.targer<-as.matrix(df.validation%>%select(DEFAULT))
df.validation<-df.validation%>%select(LIMIT_BAL:PAY_AMT6)
df.validation<-as.matrix(df.validation)
dimnames(df.validation)<-NULL
### DNN Model ########
use_session_with_seed(1) # same seed
options(keras.view_metrics = FALSE) # show the loss and gain chart
model<-keras_model_sequential()%>%
layer_flatten(input_shape = dim(df.training)[2])%>% #input layer
layer_dense(units = units1, activation = "relu", use_bias = TRUE)%>% #hidden laye
layer_dense(units = units2, activation = "relu", use_bias = TRUE)%>% #hidden laye
layer_dense(units = units3, activation = "relu", use_bias = TRUE)%>% #hidden laye
layer_dense(units = units4, activation = "relu", use_bias = TRUE)%>% #hidden laye
layer_dense(units = units5, activation = "relu", use_bias = TRUE)%>% #hidden laye
layer_dense(units = units6, activation = "relu", use_bias = TRUE)%>% #hidden laye
layer_dense(1,activation = "sigmoid", use_bias = TRUE) #output layer
model %>%
compile(loss = "binary_crossentropy",optimizer = "adam",metrics = "accuracy")
history<-model %>%
fit(
df.training,
df.training.targer,
epochs = epochs,
batch_size = 32,
verbose = 0, # Print the epochs
validation_split = 0,
validation_data = list(df.validation, df.validation.targer))
results.epochs.loss[,i]<-history$metrics$loss # loss per epoch training
results.epochs.accuracy[,i]<-history$metrics$acc # accuracy per epoch training
results.epochs.val_loss[,i]<-history$metrics$val_loss # loss per epoch validation
results.epochs.val_accuracy[,i]<-history$metrics$val_acc #accuracy per epoch validation
predictions<-model%>%predict_classes(df.validation) # Predictions
predictions.prob<-model%>%predict_proba(df.validation) %>% as.vector() # Prob Predictions
predictions.training<-model%>%predict_classes(df.training) # Predictions Training
results.predictions[[i]]<-tibble::tibble(
Real = as.factor(df.validation.targer),
Estimate = as.factor(predictions),
Prob = predictions.prob)
results.training[[i]]<-tibble::tibble(
Real.training = as.factor(df.training.targer),
Estimate.training = as.factor(predictions.training))
options(yardstick.event_first = FALSE)
results.metrics[i,1]<-data.frame(results.predictions[[i]] %>% yardstick::metrics(Real, Estimate))[1,3] #accuracy test
results.metrics[i,2]<-data.frame(results.training[[i]] %>% yardstick::metrics(Real.training, Estimate.training))[1,3] #accuracy training
results.metrics[i,3]<-data.frame(results.predictions[[i]] %>% yardstick::roc_auc(Real, Prob))[,3] # ROC AUC
results.metrics[i,4]<-data.frame(results.predictions[[i]] %>% mutate(Estimate=as.numeric(as.character(Estimate)))%>%yardstick::pr_auc(Real,Estimate))[3] # PR AUC
results.metrics[i,5]<-data.frame(results.predictions[[i]] %>% yardstick::precision(Real, Estimate))[,3] #Precision
results.metrics[i,6]<-data.frame(results.predictions[[i]] %>% yardstick::recall(Real, Estimate))[,3] # Recall
results.metrics[i,7]<-data.frame(results.predictions[[i]] %>% yardstick::f_meas(Real, Estimate, beta = 1))[,3] # F1
results.times[i,2]<-Sys.time() #Time
results.times[i,3]<-(results.times[i,2]-results.times[i,1])/60 #Time
}
return(list(results.epochs.loss,results.epochs.accuracy,results.epochs.val_loss,results.epochs.val_accuracy,results.predictions,results.metrics,results.times))
}
############### 2 hidden layers ######################################
results.dnn.targetmean.2h.1000<-DNN.test.training(kfold.targetMean.scale,1000,23,23)
results.dnn.frequency.2h.1000<-DNN.test.training(kfold.frequencyEncoding.scale,1000,23,23)
results.dnn.onehot.2h.1000<-DNN.test.training(kfold.onehotEncoding.sale,1000,27,27)
############### 4 Hidden layers #######################################
results.dnn.targetmean.4h.1000<-DNN.test.training(kfold.targetMean.scale,1000,23,23,23,23)
results.dnn.frequency.4h.1000<-DNN.test.training(kfold.frequencyEncoding.scale,1000,23,23,23,23)
results.dnn.onehot.4h.1000<-DNN.test.training(kfold.onehotEncoding.sale,1000,27,27,27,27)
############### 6 Hidden layers #######################################
results.dnn.targetmean.6h.1800<-DNN.test.training(kfold.targetMean.scale,1800,23,23,23,23,23,23)
results.dnn.frequency.6h.1800<-DNN.test.training(kfold.frequencyEncoding.scale,1800,23,23,23,23,23,23)
results.dnn.onehot.6h.1800<-DNN.test.training(kfold.onehotEncoding.sale,1800,27,27,27,27,27,27)
|
## upload data onto wfu
library(RPostgreSQL)
# set up the connection
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, user='postgres', password='postgres', dbname='U01')
## PRACTICE FOR CODE SESSION 06/10/2020
upload_practice <- Olivier_Cocaine_df_sql %>% distinct(rfid, exp, .keep_all = T) ## temporary
dbWriteTable(con, c("u01_olivier_george_cocaine","olivier_rewards"), value = upload_practice, row.names = FALSE)
dbExecute(con,"ALTER TABLE u01_olivier_george_cocaine.olivier_rewards ADD PRIMARY KEY(rfid,exp)")
# troubleshoot
dbExecute(con,"drop table if exists u01_olivier_george_cocaine.olivier_rewards")
# write data frame to database
## to add dataframes (by cohort)
## write to a temporary table in the database with dbWriteTable
# then issue an SQL statement to insert into your table as you would within the db environment
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, user='postgres', password='postgres', dbname='U01')
# update_sql_db <- function(df, df2){
# df_to_add <- anti_join(df2, df)
#
# }
test1 <- Olivier_Cocaine_df_sql %>% distinct() %>% subset(cohort != "C11")
test2 <- Olivier_Cocaine_df_sql %>% distinct() %>% subset(cohort == "C11")
dbWriteTable(con, c("u01_olivier_george_cocaine","olivier_rewards"), value = test1, row.names = FALSE)
dbWriteTable(con, c("u01_olivier_george_cocaine","olivier_rewards_temp"), value = test2, row.names = FALSE)
dbExecute(con,"ALTER TABLE u01_olivier_george_cocaine.olivier_rewards ADD PRIMARY KEY(rfid,exp)")
dbExecute(con,"insert into u01_olivier_george_cocaine.olivier_rewards select * from u01_olivier_george_cocaine.olivier_rewards_temp")
dbExecute(con,"drop table if exists u01_olivier_george_cocaine.olivier_rewards_temp")
# for troubleshooting
dbExecute(con,"drop table if exists u01_olivier_george_cocaine.olivier_rewards")
# disconnect
dbDisconnect(con)
## in terminal
cd /tmp
sudo su postgres
pg_dump -d U01 -t u01_olivier_george_cocaine.olivier_rewards > olivier_rewards.sql
exit
cp olivier_rewards.sql /home/bonnie/Dropbox\ \(Palmer\ Lab\)/PalmerLab_Datasets/u01_george_oliviercocaine/database
## operation not permitted
# To move a file or directory, you need to have write permissions on both SOURCE and DESTINATION. Otherwise, you will receive a permission denied error.
##
mkdir {C01,C02,C03,C04,C05,C06,C07,C08,C09,C10,C11}
| /CREATE/R_sql_pgdump.R | no_license | BeverlyPeng/Olivier_U01Cocaine | R | false | false | 2,337 | r | ## upload data onto wfu
library(RPostgreSQL)
# set up the connection
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, user='postgres', password='postgres', dbname='U01')
## PRACTICE FOR CODE SESSION 06/10/2020
upload_practice <- Olivier_Cocaine_df_sql %>% distinct(rfid, exp, .keep_all = T) ## temporary
dbWriteTable(con, c("u01_olivier_george_cocaine","olivier_rewards"), value = upload_practice, row.names = FALSE)
dbExecute(con,"ALTER TABLE u01_olivier_george_cocaine.olivier_rewards ADD PRIMARY KEY(rfid,exp)")
# troubleshoot
dbExecute(con,"drop table if exists u01_olivier_george_cocaine.olivier_rewards")
# write data frame to database
## to add dataframes (by cohort)
## write to a temporary table in the database with dbWriteTable
# then issue an SQL statement to insert into your table as you would within the db environment
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, user='postgres', password='postgres', dbname='U01')
# update_sql_db <- function(df, df2){
# df_to_add <- anti_join(df2, df)
#
# }
test1 <- Olivier_Cocaine_df_sql %>% distinct() %>% subset(cohort != "C11")
test2 <- Olivier_Cocaine_df_sql %>% distinct() %>% subset(cohort == "C11")
dbWriteTable(con, c("u01_olivier_george_cocaine","olivier_rewards"), value = test1, row.names = FALSE)
dbWriteTable(con, c("u01_olivier_george_cocaine","olivier_rewards_temp"), value = test2, row.names = FALSE)
dbExecute(con,"ALTER TABLE u01_olivier_george_cocaine.olivier_rewards ADD PRIMARY KEY(rfid,exp)")
dbExecute(con,"insert into u01_olivier_george_cocaine.olivier_rewards select * from u01_olivier_george_cocaine.olivier_rewards_temp")
dbExecute(con,"drop table if exists u01_olivier_george_cocaine.olivier_rewards_temp")
# for troubleshooting
dbExecute(con,"drop table if exists u01_olivier_george_cocaine.olivier_rewards")
# disconnect
dbDisconnect(con)
## in terminal
cd /tmp
sudo su postgres
pg_dump -d U01 -t u01_olivier_george_cocaine.olivier_rewards > olivier_rewards.sql
exit
cp olivier_rewards.sql /home/bonnie/Dropbox\ \(Palmer\ Lab\)/PalmerLab_Datasets/u01_george_oliviercocaine/database
## operation not permitted
# To move a file or directory, you need to have write permissions on both SOURCE and DESTINATION. Otherwise, you will receive a permission denied error.
##
mkdir {C01,C02,C03,C04,C05,C06,C07,C08,C09,C10,C11}
|
load("family.rda")
# After loading the dataset above, you will have the following
# objects:
#
# > ls()
# [1] "fage" "fgender" "fheight" "fnames" "fweight"
# (1 point) Create a numeric vector fbmi
# A person's body mass index (BMI) should be calculated as their
# weight divided by their squared heights multiplied by 703
fbmi <- fweight / fheight^2 * 703
# (1 point) Create a logical vector foverWt
# A person should be considered overweight if their BMI is greater than 25
foverWt <- fbmi > 25
# (1 point) Create a dataframe family
# The dataframe should contain fnames, fgender, fage, fheight, fweight, fbmi,
# and foverWt. The names should be as follows:
#
# > names(family)
# [1] "name" "gender" "age" "height" "weight" "bmi" "overWt"
family <- data.frame(fnames, fgender, fage, fheight, fweight, fbmi, foverWt)
names(family) <- c("name", "gender", "age", "height", "weight", "bmi", "overWt")
# (5 points) Plot age vs bmi
# For each individual in the family dataframe, plot their age (x-axis)
# against their bmi (y-axis). Males should be represented with 'red'
# circles and females with 'blue' circles. You may need to set pch to
# 'o'. The x-axis should range from 23 to 80 and your y-axis from 16 to
# 31. Finally put a legend in the 'topright' corner of the plot with a
# 'red' circle in front the label 'male' and a 'blue' circle in front the
# label 'female'. You may want to look at the documentation for R's 'plot'
# function
family.male <- family[family$gender == 'm', ]
family.female <- family[family$gender == 'f', ]
plot(x = family.female$age, y=family.female$bmi, xlim=c(23,80),
ylim=c(16,31), col='blue', pch='o', xlab="bmi", ylab="age")
points(x = family.male$age, y=family.male$bmi, xlim=c(23,80),
ylim=c(16,31), col='red', pch='o', xlab="bmi", ylab="age")
legend("topright", c("male", "female"), pch = c('o', 'o'),
col=c('red', 'blue')) | /midterm/ex1.r | no_license | jhkim81691/stat133 | R | false | false | 1,881 | r | load("family.rda")
# After loading the dataset above, you will have the following
# objects:
#
# > ls()
# [1] "fage" "fgender" "fheight" "fnames" "fweight"
# (1 point) Create a numeric vector fbmi
# A person's body mass index (BMI) should be calculated as their
# weight divided by their squared heights multiplied by 703
fbmi <- fweight / fheight^2 * 703
# (1 point) Create a logical vector foverWt
# A person should be considered overweight if their BMI is greater than 25
foverWt <- fbmi > 25
# (1 point) Create a dataframe family
# The dataframe should contain fnames, fgender, fage, fheight, fweight, fbmi,
# and foverWt. The names should be as follows:
#
# > names(family)
# [1] "name" "gender" "age" "height" "weight" "bmi" "overWt"
family <- data.frame(fnames, fgender, fage, fheight, fweight, fbmi, foverWt)
names(family) <- c("name", "gender", "age", "height", "weight", "bmi", "overWt")
# (5 points) Plot age vs bmi
# For each individual in the family dataframe, plot their age (x-axis)
# against their bmi (y-axis). Males should be represented with 'red'
# circles and females with 'blue' circles. You may need to set pch to
# 'o'. The x-axis should range from 23 to 80 and your y-axis from 16 to
# 31. Finally put a legend in the 'topright' corner of the plot with a
# 'red' circle in front the label 'male' and a 'blue' circle in front the
# label 'female'. You may want to look at the documentation for R's 'plot'
# function
family.male <- family[family$gender == 'm', ]
family.female <- family[family$gender == 'f', ]
plot(x = family.female$age, y=family.female$bmi, xlim=c(23,80),
ylim=c(16,31), col='blue', pch='o', xlab="bmi", ylab="age")
points(x = family.male$age, y=family.male$bmi, xlim=c(23,80),
ylim=c(16,31), col='red', pch='o', xlab="bmi", ylab="age")
legend("topright", c("male", "female"), pch = c('o', 'o'),
col=c('red', 'blue')) |
## 24th May 2017
## BISCUIT postprocessing
##
## Code author SP
##
###
###
if(num_gene_batches ==1){
source("BISCUIT_parallel_impute_onegenebatch.R")
source("BISCUIT_extras_onegenebatch_1.R")
source("Per_marker_histogram.R")
}else{
source("BISCUIT_parallel_impute.R")
source("BISCUIT_extras.R")
}
| /superpixel_MIBI_python/BISCUIT_post_process.R | no_license | sandhya212/CSI_MIBI_Model | R | false | false | 333 | r | ## 24th May 2017
## BISCUIT postprocessing
##
## Code author SP
##
###
###
if(num_gene_batches ==1){
source("BISCUIT_parallel_impute_onegenebatch.R")
source("BISCUIT_extras_onegenebatch_1.R")
source("Per_marker_histogram.R")
}else{
source("BISCUIT_parallel_impute.R")
source("BISCUIT_extras.R")
}
|
# Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of Epi786FirstRound
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.createCohorts <- function(connection,
cdmDatabaseSchema,
vocabularyDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable,
oracleTempSchema,
outputFolder) {
# Create study cohort table structure:
sql <- SqlRender::loadRenderTranslateSql(sqlFilename = "CreateCohortTable.sql",
packageName = "Epi786FirstRound",
dbms = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema,
cohort_database_schema = cohortDatabaseSchema,
cohort_table = cohortTable)
DatabaseConnector::executeSql(connection, sql, progressBar = FALSE, reportOverallTime = FALSE)
# Instantiate cohorts:
pathToCsv <- system.file("settings", "CohortsToCreate.csv", package = "Epi786FirstRound")
cohortsToCreate <- read.csv(pathToCsv)
for (i in 1:nrow(cohortsToCreate)) {
writeLines(paste("Creating cohort:", cohortsToCreate$name[i]))
sql <- SqlRender::loadRenderTranslateSql(sqlFilename = paste0(cohortsToCreate$name[i], ".sql"),
packageName = "Epi786FirstRound",
dbms = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
vocabulary_database_schema = vocabularyDatabaseSchema,
target_database_schema = cohortDatabaseSchema,
target_cohort_table = cohortTable,
target_cohort_id = cohortsToCreate$cohortId[i])
DatabaseConnector::executeSql(connection, sql)
}
# Fetch cohort counts:
sql <- "SELECT cohort_definition_id, COUNT(*) AS count FROM @cohort_database_schema.@cohort_table GROUP BY cohort_definition_id"
sql <- SqlRender::render(sql,
cohort_database_schema = cohortDatabaseSchema,
cohort_table = cohortTable)
sql <- SqlRender::translate(sql, targetDialect = attr(connection, "dbms"))
counts <- DatabaseConnector::querySql(connection, sql)
names(counts) <- SqlRender::snakeCaseToCamelCase(names(counts))
counts <- merge(counts, data.frame(cohortDefinitionId = cohortsToCreate$cohortId,
cohortName = cohortsToCreate$name))
write.csv(counts, file.path(outputFolder, "CohortCounts.csv"))
}
| /Code/R/CreateCohorts.R | permissive | ChenyuL/Covid19EstimationFamotidine | R | false | false | 3,534 | r | # Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of Epi786FirstRound
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.createCohorts <- function(connection,
cdmDatabaseSchema,
vocabularyDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable,
oracleTempSchema,
outputFolder) {
# Create study cohort table structure:
sql <- SqlRender::loadRenderTranslateSql(sqlFilename = "CreateCohortTable.sql",
packageName = "Epi786FirstRound",
dbms = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema,
cohort_database_schema = cohortDatabaseSchema,
cohort_table = cohortTable)
DatabaseConnector::executeSql(connection, sql, progressBar = FALSE, reportOverallTime = FALSE)
# Instantiate cohorts:
pathToCsv <- system.file("settings", "CohortsToCreate.csv", package = "Epi786FirstRound")
cohortsToCreate <- read.csv(pathToCsv)
for (i in 1:nrow(cohortsToCreate)) {
writeLines(paste("Creating cohort:", cohortsToCreate$name[i]))
sql <- SqlRender::loadRenderTranslateSql(sqlFilename = paste0(cohortsToCreate$name[i], ".sql"),
packageName = "Epi786FirstRound",
dbms = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
vocabulary_database_schema = vocabularyDatabaseSchema,
target_database_schema = cohortDatabaseSchema,
target_cohort_table = cohortTable,
target_cohort_id = cohortsToCreate$cohortId[i])
DatabaseConnector::executeSql(connection, sql)
}
# Fetch cohort counts:
sql <- "SELECT cohort_definition_id, COUNT(*) AS count FROM @cohort_database_schema.@cohort_table GROUP BY cohort_definition_id"
sql <- SqlRender::render(sql,
cohort_database_schema = cohortDatabaseSchema,
cohort_table = cohortTable)
sql <- SqlRender::translate(sql, targetDialect = attr(connection, "dbms"))
counts <- DatabaseConnector::querySql(connection, sql)
names(counts) <- SqlRender::snakeCaseToCamelCase(names(counts))
counts <- merge(counts, data.frame(cohortDefinitionId = cohortsToCreate$cohortId,
cohortName = cohortsToCreate$name))
write.csv(counts, file.path(outputFolder, "CohortCounts.csv"))
}
|
////////////////////////////////////////////////////////////
#Create Map Graph
library (ggmap)
library (osmar)
library(prettymapr)
#Import data on Greater London
src <- osmsource_api ()
bb <- center_bbox(-0.129390, 51.532401, 300, 300)
#bb <- osmar::corner_bbox(-0.1430397, 51.5265924, -0.1214227, 51.5400746)
ua <- osmar::get_osm(bb, source = src)
#The osmdata contains an osmar object, which has all the 'nodes' and 'ways' associated with the data.
#Two ways of visualizing
osmar::plot_nodes(ua)
osmar::plot_ways(ua)
#Subset the data such that we only have data associated with the roads
hways_data <- subset(ua, way_ids = find(ua, way(tags(k == "highway"))))
hways <- find(hways_data, way(tags(k == "name")))
hways <- find_down(ua, way(hways))
hways_data <- subset(ua, ids = hways)
plot(hways_data)
#visualization
par(bg = "black")
osmar::plot_ways(hways_data, col="blue", bg ='green')
osmar::plot_nodes(hways_data, pch=19, cex=0.1, add=T, col="red")
#convert to igraph
hways_graph = as_igraph(hways_data)
hways_graph = as.undirected(hways_graph)
plot(hways_graph, vertex.label=NA, vertex.size=4, edge.color="blue")
////////////////////////////////////////////////////////////
#Let's get the trace data
library(plotKML)
library(urltools)
library(rgdal)
url = "http://api.openstreetmap.org/api/0.6/trackpoints?bbox=12&page=pagenumber"
mat = matrix(bb)[,1]
val = capture.output(mat[])
url <- param_set(url, key = "bbox", value = "-0.1430397,51.5265924,-0.1214227,51.5400746")
for (i in 0:20){
url <- param_set(url, key = "page", value = i)
filename <- paste(i,"trace_data.gpx")
download.file(url, destfile=filename)
}
#Read in GPX files that are located in the working directory
files <- dir(pattern = "\\.gpx")
#route <- readOGR(files[2],"tracks")
#coordinates = coordinates(route)[[1]][[1]]
#Set up the three vectors to hold your coordinate and path data
index <- c()
latitude <- c()
longitude <- c()
for (i in 1:length(files)) {
route <- readOGR(files[i], "tracks")
coordinates = coordinates(route)[[1]][[1]]
index_el <- 0 : nrow(coordinates)
index <- c(index, index_el)
latitude <- c(latitude, coordinates[,2])
longitude <- c(longitude, coordinates[,1])
}
routes <- data.frame(cbind(index, latitude, longitude))
#Get the map of Amsterdam from Google
london_map <- qmap('kings cross station', zoom =18, color = 'bw')
#Plot the map and the routes on top of that
london_map +
geom_path(aes(x = longitude, y = latitude, group = factor(index)),
colour="#5F35D8", data = routes, alpha=0.3)
| /OSM_ver/CW_GPS.R | no_license | soma11soma11/GPS_trace | R | false | false | 2,559 | r | ////////////////////////////////////////////////////////////
#Create Map Graph
library (ggmap)
library (osmar)
library(prettymapr)
#Import data on Greater London
src <- osmsource_api ()
bb <- center_bbox(-0.129390, 51.532401, 300, 300)
#bb <- osmar::corner_bbox(-0.1430397, 51.5265924, -0.1214227, 51.5400746)
ua <- osmar::get_osm(bb, source = src)
#The osmdata contains an osmar object, which has all the 'nodes' and 'ways' associated with the data.
#Two ways of visualizing
osmar::plot_nodes(ua)
osmar::plot_ways(ua)
#Subset the data such that we only have data associated with the roads
hways_data <- subset(ua, way_ids = find(ua, way(tags(k == "highway"))))
hways <- find(hways_data, way(tags(k == "name")))
hways <- find_down(ua, way(hways))
hways_data <- subset(ua, ids = hways)
plot(hways_data)
#visualization
par(bg = "black")
osmar::plot_ways(hways_data, col="blue", bg ='green')
osmar::plot_nodes(hways_data, pch=19, cex=0.1, add=T, col="red")
#convert to igraph
hways_graph = as_igraph(hways_data)
hways_graph = as.undirected(hways_graph)
plot(hways_graph, vertex.label=NA, vertex.size=4, edge.color="blue")
////////////////////////////////////////////////////////////
#Let's get the trace data
library(plotKML)
library(urltools)
library(rgdal)
url = "http://api.openstreetmap.org/api/0.6/trackpoints?bbox=12&page=pagenumber"
mat = matrix(bb)[,1]
val = capture.output(mat[])
url <- param_set(url, key = "bbox", value = "-0.1430397,51.5265924,-0.1214227,51.5400746")
for (i in 0:20){
url <- param_set(url, key = "page", value = i)
filename <- paste(i,"trace_data.gpx")
download.file(url, destfile=filename)
}
#Read in GPX files that are located in the working directory
files <- dir(pattern = "\\.gpx")
#route <- readOGR(files[2],"tracks")
#coordinates = coordinates(route)[[1]][[1]]
#Set up the three vectors to hold your coordinate and path data
index <- c()
latitude <- c()
longitude <- c()
for (i in 1:length(files)) {
route <- readOGR(files[i], "tracks")
coordinates = coordinates(route)[[1]][[1]]
index_el <- 0 : nrow(coordinates)
index <- c(index, index_el)
latitude <- c(latitude, coordinates[,2])
longitude <- c(longitude, coordinates[,1])
}
routes <- data.frame(cbind(index, latitude, longitude))
#Get the map of Amsterdam from Google
london_map <- qmap('kings cross station', zoom =18, color = 'bw')
#Plot the map and the routes on top of that
london_map +
geom_path(aes(x = longitude, y = latitude, group = factor(index)),
colour="#5F35D8", data = routes, alpha=0.3)
|
##################################################################################
##### 1 - INITIALIZE SCRIPT PARAMETERS
##################################################################################
## Load the needed libraries and functions
library(gdata)
library(tm)
library(SnowballC)
## Set script parameters
set.seed(12345) # for repeatability of random numbers
#datafileName<-"c:\\idm\\Step4ReducedNatGeoFileAP1.xlsx" # source file
datafileName<-"c:\\idm\\Step4ReducedNatGeoFileAPminusModelA.xlsx" # source file
outputFile<-"c:\\idm\\term.csv"
##################################################################################
##### 2 - READ DATA FILE INTO R AND PERFORM DATA PREPROCESSING
##################################################################################
## Read file into R
df<-read.xls(datafileName,sheet=1,header=TRUE)
numSuppliers<-length(unique(df$Supplier.ID))
## Create blank dataframe
supplier<-sort(unique(df$Supplier.ID),decreasing=FALSE) # supplierIDs in asc order
description<-c(rep("",numSuppliers))
procdf<-data.frame(supplier,description)
procdf$description<-as.character(procdf$description)
## populate dataframe with text from each supplier from original file
## Loop below operates once for each unique supplier referenced in both
## supplier and procdf variables
for (ctr in 1:numSuppliers){
# Loop is running for supplier referenced by supplier[ctr]
# Retrieve rows from original dataframe that have
# suppliers matching with supplier[ctr]
idx<-which(df$Supplier.ID==supplier[ctr])
# how many rows have this supplier?
l<-length(idx)
# initialize working variable
workingText<-c("")
# Loop through the matching records and concatenate
# text of interest into workingText variable
for (j in 1:l){
# get row reference
rowidx<-idx[j]
# retrieve and concatenate text
workingText<-paste0(workingText,df[rowidx,3])
}
# perform stemming on the text
workingText<-wordStem(workingText,language='porter')
# remove duplicate words from the text post stemming and assign
# back into procdf
procdf[ctr,2]<-
vapply(lapply(strsplit(workingText, " "), unique),
paste, character(1L), collapse = " ")
}
##################################################################################
##### 3 - CREATE DTM AND TFXIDF MATRICES
##################################################################################
## Create corpus object and remove english stop words
corpus<-Corpus(VectorSource(procdf$description))
cleancorpus<-tm_map(corpus,removeWords,stopwords("english"))
cleancorpus<-tm_map(cleancorpus,removeNumbers)
cleancorpus<-tm_map(cleancorpus,removePunctuation)
## Create document term matrix
dtm<-DocumentTermMatrix(cleancorpus)
## Create term frequency - Inverse Document frequency matrix
dtm_tfxidf<-weightTfIdf(dtm)
m<-as.matrix(dtm_tfxidf) # convert into matrix object
rownames(m) <- 1:nrow(m) # give appropriate row names (?)
test<-rowSums(m)
erridx<-which(test==0)
zerov<-rep(0,ncol(m))
##################################################################################
##### 4 - RUN K-MEANS CLUSTERING ON DATA
##################################################################################
norm_eucl <- function(m) m/apply(m, MARGIN=1, FUN=function(x) sum(x^2)^.5)
m_norm <- norm_eucl(m)
# we have four rows that are full of zeroes
cl <- kmeans(m_norm, 4)
##################################################################################
##### 5 - PREPARE DATAFRAME TO INSPECT TERMS MANUALLY
##################################################################################
tdm<-TermDocumentMatrix(cleancorpus)
workingm<-as.matrix(tdm)
v <- sort(rowSums(workingm),decreasing=TRUE)
termsFreqdf <- data.frame(word = names(v),freq=v)
terms<-as.character(tdm[[6]][1])
termofInterest<-c("royalties")
container<-subset(workingm,rownames(workingm)==termofInterest)
docsWithTerm<-which(container>0)
qualifyingSuppliers<-supplier[docsWithTerm]
write.csv(qualifyingSuppliers,outputFile)
| /R1.3/TextClusteringonNatGeoforClassification.R | no_license | nandatascientist/XeevaIDM | R | false | false | 4,305 | r |
##################################################################################
##### 1 - INITIALIZE SCRIPT PARAMETERS
##################################################################################
## Load the needed libraries and functions
library(gdata)
library(tm)
library(SnowballC)
## Set script parameters
set.seed(12345) # for repeatability of random numbers
#datafileName<-"c:\\idm\\Step4ReducedNatGeoFileAP1.xlsx" # source file
datafileName<-"c:\\idm\\Step4ReducedNatGeoFileAPminusModelA.xlsx" # source file
outputFile<-"c:\\idm\\term.csv"
##################################################################################
##### 2 - READ DATA FILE INTO R AND PERFORM DATA PREPROCESSING
##################################################################################
## Read file into R
df<-read.xls(datafileName,sheet=1,header=TRUE)
numSuppliers<-length(unique(df$Supplier.ID))
## Create blank dataframe
supplier<-sort(unique(df$Supplier.ID),decreasing=FALSE) # supplierIDs in asc order
description<-c(rep("",numSuppliers))
procdf<-data.frame(supplier,description)
procdf$description<-as.character(procdf$description)
## populate dataframe with text from each supplier from original file
## Loop below operates once for each unique supplier referenced in both
## supplier and procdf variables
for (ctr in 1:numSuppliers){
# Loop is running for supplier referenced by supplier[ctr]
# Retrieve rows from original dataframe that have
# suppliers matching with supplier[ctr]
idx<-which(df$Supplier.ID==supplier[ctr])
# how many rows have this supplier?
l<-length(idx)
# initialize working variable
workingText<-c("")
# Loop through the matching records and concatenate
# text of interest into workingText variable
for (j in 1:l){
# get row reference
rowidx<-idx[j]
# retrieve and concatenate text
workingText<-paste0(workingText,df[rowidx,3])
}
# perform stemming on the text
workingText<-wordStem(workingText,language='porter')
# remove duplicate words from the text post stemming and assign
# back into procdf
procdf[ctr,2]<-
vapply(lapply(strsplit(workingText, " "), unique),
paste, character(1L), collapse = " ")
}
##################################################################################
##### 3 - CREATE DTM AND TFXIDF MATRICES
##################################################################################
## Create corpus object and remove english stop words
corpus<-Corpus(VectorSource(procdf$description))
cleancorpus<-tm_map(corpus,removeWords,stopwords("english"))
cleancorpus<-tm_map(cleancorpus,removeNumbers)
cleancorpus<-tm_map(cleancorpus,removePunctuation)
## Create document term matrix
dtm<-DocumentTermMatrix(cleancorpus)
## Create term frequency - Inverse Document frequency matrix
dtm_tfxidf<-weightTfIdf(dtm)
m<-as.matrix(dtm_tfxidf) # convert into matrix object
rownames(m) <- 1:nrow(m) # give appropriate row names (?)
test<-rowSums(m)
erridx<-which(test==0)
zerov<-rep(0,ncol(m))
##################################################################################
##### 4 - RUN K-MEANS CLUSTERING ON DATA
##################################################################################
norm_eucl <- function(m) m/apply(m, MARGIN=1, FUN=function(x) sum(x^2)^.5)
m_norm <- norm_eucl(m)
# we have four rows that are full of zeroes
cl <- kmeans(m_norm, 4)
##################################################################################
##### 5 - PREPARE DATAFRAME TO INSPECT TERMS MANUALLY
##################################################################################
tdm<-TermDocumentMatrix(cleancorpus)
workingm<-as.matrix(tdm)
v <- sort(rowSums(workingm),decreasing=TRUE)
termsFreqdf <- data.frame(word = names(v),freq=v)
terms<-as.character(tdm[[6]][1])
termofInterest<-c("royalties")
container<-subset(workingm,rownames(workingm)==termofInterest)
docsWithTerm<-which(container>0)
qualifyingSuppliers<-supplier[docsWithTerm]
write.csv(qualifyingSuppliers,outputFile)
|
setwd("/media/vivekkulkarni/362480E21B181E95/Fodder/src")
d = read.csv('../real_data/ANMO_Zchannel.csv')
d = as.numeric(unlist(d))
plot(as.ts(d))
datas = as.ts(d[1:50000])
plot(datas)
a = acf(datas,100)
p = pacf(datas,100)
d = diff(d)
plot(d) | /src/fit_anmodata.R | permissive | vivekkulkarni1/signal_detection | R | false | false | 245 | r | setwd("/media/vivekkulkarni/362480E21B181E95/Fodder/src")
d = read.csv('../real_data/ANMO_Zchannel.csv')
d = as.numeric(unlist(d))
plot(as.ts(d))
datas = as.ts(d[1:50000])
plot(datas)
a = acf(datas,100)
p = pacf(datas,100)
d = diff(d)
plot(d) |
library(openxlsx)
library(ggplot2)
library(reshape2)
#source("~/documents/lab/workspace/Classification_scripts/multiplot.R")
source("~/workspace/classification_scripts/multiplot.R")
library(zoo)
library(gplots)
library(RColorBrewer)
library(abind)
library(gridGraphics)
library(grid)
library(gridExtra)
library(R.matlab)
saveAsPng <- T
#########
nhp_id <- '504'
if (nhp_id == '0059'){condensed <- readRDS('0059_total_condensed.rds')
}else if (nhp_id == '504'){condensed <- readRDS('504_total_condensed.rds')}
r0 <- which(condensed[,4] == 0)
r1 <- which(condensed[,4] == 1)
r2 <- which(condensed[,4] == 2)
r3 <- which(condensed[,4] == 3)
p0 <- which(condensed[,5] == 0)
p1 <- which(condensed[,5] == 1)
p2 <- which(condensed[,5] == 2)
p3 <- which(condensed[,5] == 3)
v_3 <- which(condensed[,7] == -3)
v_2 <- which(condensed[,7] == -2)
v_1 <- which(condensed[,7] == -1)
v0 <- which(condensed[,7] == 0)
v1 <- which(condensed[,7] == 1)
v2 <- which(condensed[,7] == 2)
v3 <- which(condensed[,7] == 3)
m0 <- which(condensed[,8] == 0)
m1 <- which(condensed[,8] == 1)
m2 <- which(condensed[,8] == 2)
m3 <- which(condensed[,8] == 3)
m4 <- which(condensed[,8] == 4)
m5 <- which(condensed[,8] == 5)
m6 <- which(condensed[,8] == 6)
res0 <- which(condensed[,6] == 0)
res1 <- which(condensed[,6] == 1)
r0_fail <- res0[which(res0 %in% r0)]
r1_fail <- res0[which(res0 %in% r1)]
r2_fail <- res0[which(res0 %in% r2)]
r3_fail <- res0[which(res0 %in% r3)]
r0_succ <- res1[which(res1 %in% r0)]
r1_succ <- res1[which(res1 %in% r1)]
r2_succ <- res1[which(res1 %in% r2)]
r3_succ <- res1[which(res1 %in% r3)]
p0_fail <- res0[which(res0 %in% p0)]
p1_fail <- res0[which(res0 %in% p1)]
p2_fail <- res0[which(res0 %in% p2)]
p3_fail <- res0[which(res0 %in% p3)]
p0_succ <- res1[which(res1 %in% p0)]
p1_succ <- res1[which(res1 %in% p1)]
p2_succ <- res1[which(res1 %in% p2)]
p3_succ <- res1[which(res1 %in% p3)]
#reward
png(paste("time_", nhp_id,"_reward.png",sep=""),width=8,height=6,units="in",res=500)
ravgs <- data.frame(r_values=c(0,1,2,3),reach_time = c(mean(condensed[r0,9]),mean(condensed[r1,9]),mean(condensed[r2,9]),mean(condensed[r3,9])),intertrial = c(mean(condensed[r0,10]),mean(condensed[r1,10]),mean(condensed[r2,10]),mean(condensed[r3,10])))
rstds <- data.frame(r_values=c(0,1,2,3),reach_time = c(sd(condensed[r0,9]),sd(condensed[r1,9]),sd(condensed[r2,9]),sd(condensed[r3,9])),intertrial = c(sd(condensed[r0,10]),sd(condensed[r1,10]),sd(condensed[r2,10]),sd(condensed[r3,10])))
avg_melt <- melt(ravgs,id="r_values",variable.name='type',value.name='avg')
std_melt <- melt(rstds,id="r_values",variable.name='type',value.name='std')
test <- merge(std_melt,avg_melt,row.names='r_values')
test[is.na(test)] <- 0
plt <- ggplot(data=test,aes(x=r_values,y=avg,ymax=avg+std,ymin=avg-std,fill=type)) + geom_bar(position="dodge",stat="identity") + geom_errorbar(position=position_dodge(width=0.9),color="gray32",width=0.25)
plt <- plt + scale_fill_manual(values=c("royalblue","seagreen")) + geom_text(aes(y=0.75),size=3,label=sprintf("%0.2f", round(test$avg, digits = 2)),position=position_dodge(width=0.9)) + theme_classic() + labs(title="Average Times",y="Time (s)",x="Reward",fill="")
plot(plt)
graphics.off()
#punishment
png(paste("time_", nhp_id,"_punishment.png",sep=""),width=8,height=6,units="in",res=500)
pavgs <- data.frame(p_values=c(0,1,2,3),reach_time = c(mean(condensed[p0,9]),mean(condensed[p1,9]),mean(condensed[p2,9]),mean(condensed[p3,9])),intertrial = c(mean(condensed[p0,10]),mean(condensed[p1,10]),mean(condensed[p2,10]),mean(condensed[p3,10])))
pstds <- data.frame(p_values=c(0,1,2,3),reach_time = c(sd(condensed[p0,9]),sd(condensed[p1,9]),sd(condensed[p2,9]),sd(condensed[p3,9])),intertrial = c(sd(condensed[p0,10]),sd(condensed[p1,10]),sd(condensed[p2,10]),sd(condensed[p3,10])))
avg_melt <- melt(pavgs,id="p_values",variable.name='type',value.name='avg')
std_melt <- melt(pstds,id="p_values",variable.name='type',value.name='std')
test <- merge(std_melt,avg_melt,row.names='p_values')
test[is.na(test)] <- 0
plt <- ggplot(data=test,aes(x=p_values,y=avg,ymax=avg+std,ymin=avg-std,fill=type)) + geom_bar(position="dodge",stat="identity") + geom_errorbar(position=position_dodge(width=0.9),color="gray32",width=0.25)
plt <- plt + scale_fill_manual(values=c("royalblue","seagreen")) + geom_text(aes(y=0.75),size=3,label=sprintf("%0.2f", round(test$avg, digits = 2)),position=position_dodge(width=0.9)) + theme_classic() + labs(title="Average Times",y="Time (s)",x="Punishment",fill="")
plot(plt)
graphics.off()
#result
png(paste("time_", nhp_id,"_result.png",sep=""),width=8,height=6,units="in",res=500)
resavgs <- data.frame(res_values=c(0,1),reach_time = c(mean(condensed[res0,9]),mean(condensed[res1,9])),intertrial = c(mean(condensed[res0,10]),mean(condensed[res1,10])))
resstds <- data.frame(res_values=c(0,1),reach_time = c(sd(condensed[res0,9]),sd(condensed[res1,9])),intertrial = c(sd(condensed[res0,10]),sd(condensed[res1,10])))
avg_melt <- melt(resavgs,id="res_values",variable.name='type',value.name='avg')
std_melt <- melt(resstds,id="res_values",variable.name='type',value.name='std')
test <- merge(std_melt,avg_melt,row.names='res_values')
test[is.na(test)] <- 0
plt <- ggplot(data=test,aes(x=res_values,y=avg,ymax=avg+std,ymin=avg-std,fill=type)) + geom_bar(position="dodge",stat="identity") + geom_errorbar(position=position_dodge(width=0.9),color="gray32",width=0.25)
plt <- plt + scale_fill_manual(values=c("royalblue","seagreen")) + geom_text(aes(y=0.75),size=3,label=sprintf("%0.2f", round(test$avg, digits = 2)),position=position_dodge(width=0.9)) + theme_classic() + labs(title="Average Times",y="Time (s)",x="Result",fill="") + scale_x_discrete(limits=0:1,labels=c("fail","succ"))
plot(plt)
graphics.off()
#value
png(paste("time_", nhp_id,"_value.png",sep=""),width=8,height=6,units="in",res=500)
vavgs <- data.frame(v_values=c(-3,-2,-1,0,1,2,3),reach_time = c(mean(condensed[v_3,9]),mean(condensed[v_2,9]),mean(condensed[v_1,9]),mean(condensed[v0,9]),mean(condensed[v1,9]),mean(condensed[v2,9]),mean(condensed[v3,9])),intertrial = c(mean(condensed[v_3,10]),mean(condensed[v_2,10]),mean(condensed[v_1,10]),mean(condensed[v0,10]),mean(condensed[v1,10]),mean(condensed[v2,10]),mean(condensed[v3,10])))
vstds <- data.frame(v_values=c(-3,-2,-1,0,1,2,3),reach_time = c(sd(condensed[v_3,9]),sd(condensed[v_2,9]),sd(condensed[v_1,9]),sd(condensed[v0,9]),sd(condensed[v1,9]),sd(condensed[v2,9]),sd(condensed[v3,9])),intertrial = c(sd(condensed[v_3,10]),sd(condensed[v_2,10]),sd(condensed[v_1,10]),sd(condensed[v0,10]),sd(condensed[v1,10]),sd(condensed[v2,10]),sd(condensed[v3,10])))
avg_melt <- melt(vavgs,id="v_values",variable.name='type',value.name='avg')
std_melt <- melt(vstds,id="v_values",variable.name='type',value.name='std')
test <- merge(std_melt,avg_melt,row.names='v_values')
test[is.na(test)] <- 0
plt <- ggplot(data=test,aes(x=v_values,y=avg,ymax=avg+std,ymin=avg-std,fill=type)) + geom_bar(position="dodge",stat="identity") + geom_errorbar(position=position_dodge(width=0.9),color="gray32",width=0.25)
plt <- plt + scale_fill_manual(values=c("royalblue","seagreen")) + geom_text(aes(y=0.75),size=3,label=sprintf("%0.2f", round(test$avg, digits = 2)),position=position_dodge(width=0.9)) + theme_classic() + labs(title="Average Times",y="Time (s)",x="Value",fill="")
plot(plt)
graphics.off()
#motivation
png(paste("time_", nhp_id,"_motivation.png",sep=""),width=8,height=6,units="in",res=500)
mavgs <- data.frame(m_values=c(0,1,2,3,4,5,6),reach_time = c(mean(condensed[m0,9]),mean(condensed[m1,9]),mean(condensed[m2,9]),mean(condensed[m3,9]),mean(condensed[m4,9]),mean(condensed[m5,9]),mean(condensed[m6,9])),intertrial = c(mean(condensed[m0,10]),mean(condensed[m1,10]),mean(condensed[m2,10]),mean(condensed[m3,10]),mean(condensed[m4,10]),mean(condensed[m5,10]),mean(condensed[m6,10])))
mstds <- data.frame(m_values=c(0,1,2,3,4,5,6),reach_time = c(sd(condensed[m0,9]),sd(condensed[m1,9]),sd(condensed[m2,9]),sd(condensed[m3,9]),sd(condensed[m4,9]),sd(condensed[m5,9]),sd(condensed[m6,9])),intertrial = c(sd(condensed[m0,10]),sd(condensed[m1,10]),sd(condensed[m2,10]),sd(condensed[m3,10]),sd(condensed[m4,10]),sd(condensed[m5,10]),sd(condensed[m6,10])))
avg_melt <- melt(mavgs,id="m_values",variable.name='type',value.name='avg')
std_melt <- melt(mstds,id="m_values",variable.name='type',value.name='std')
test <- merge(std_melt,avg_melt,row.names='m_values')
test[is.na(test)] <- 0
plt <- ggplot(data=test,aes(x=m_values,y=avg,ymax=avg+std,ymin=avg-std,fill=type)) + geom_bar(position="dodge",stat="identity") + geom_errorbar(position=position_dodge(width=0.9),color="gray32",width=0.25)
plt <- plt + scale_fill_manual(values=c("royalblue","seagreen")) + geom_text(aes(y=0.75),size=3,label=sprintf("%0.2f", round(test$avg, digits = 2)),position=position_dodge(width=0.9)) + theme_classic() + labs(title="Average Times",y="Time (s)",x="Motivation",fill="")
plot(plt)
graphics.off()
##########
##########
#reward sf
png(paste("time_", nhp_id,"_reward_sf.png",sep=""),width=8,height=6,units="in",res=500)
r_s_avgs <- data.frame(r_values=c(0,1,2,3),reach_time_s = c(mean(condensed[r0_succ,9]),mean(condensed[r1_succ,9]),mean(condensed[r2_succ,9]),mean(condensed[r3_succ,9])),intertrial_s = c(mean(condensed[r0_succ,10]),mean(condensed[r1_succ,10]),mean(condensed[r2_succ,10]),mean(condensed[r3_succ,10])))
r_s_stds <- data.frame(r_values=c(0,1,2,3),reach_time_s = c(sd(condensed[r0_succ,9]),sd(condensed[r1_succ,9]),sd(condensed[r2_succ,9]),sd(condensed[r3_succ,9])),intertrial_s = c(sd(condensed[r0_succ,10]),sd(condensed[r1_succ,10]),sd(condensed[r2_succ,10]),sd(condensed[r3_succ,10])))
r_f_avgs <- data.frame(r_values=c(0,1,2,3),reach_time_f = c(mean(condensed[r0_fail,9]),mean(condensed[r1_fail,9]),mean(condensed[r2_fail,9]),mean(condensed[r3_fail,9])),intertrial_f = c(mean(condensed[r0_fail,10]),mean(condensed[r1_fail,10]),mean(condensed[r2_fail,10]),mean(condensed[r3_fail,10])))
r_f_stds <- data.frame(r_values=c(0,1,2,3),reach_time_f = c(sd(condensed[r0_fail,9]),sd(condensed[r1_fail,9]),sd(condensed[r2_fail,9]),sd(condensed[r3_fail,9])),intertrial_f = c(sd(condensed[r0_fail,10]),sd(condensed[r1_fail,10]),sd(condensed[r2_fail,10]),sd(condensed[r3_fail,10])))
avg_s_melt <- melt(r_s_avgs,id="r_values",variable.name='type',value.name='avg')
std_s_melt <- melt(r_s_stds,id="r_values",variable.name='type',value.name='std')
avg_f_melt <- melt(r_f_avgs,id="r_values",variable.name='type',value.name='avg')
std_f_melt <- melt(r_f_stds,id="r_values",variable.name='type',value.name='std')
test_s <- merge(std_s_melt,avg_s_melt,row.names='r_values')
test_f <- merge(std_f_melt,avg_f_melt,row.names='r_values')
test <- rbind(test_s,test_f)
test[is.na(test)] <- 0
plt <- ggplot(data=test,aes(x=r_values,y=avg,ymax=avg+std,ymin=avg-std,fill=type)) + geom_bar(position="dodge",stat="identity") + geom_errorbar(position=position_dodge(width=0.9),color="gray32",width=0.25)
plt <- plt + scale_fill_manual(values=c("royalblue","seagreen","paleturquoise","lightgreen")) + theme_classic() + labs(title="Average Times",y="Time (s)",x="Reward",fill="") # + geom_text(aes(y=0.75),size=3,label=sprintf("%0.2f", round(test$avg, digits = 2)),position=position_dodge(width=0.9))
plot(plt)
graphics.off()
#punishment sf
png(paste("time_", nhp_id,"_punishment_sf.png",sep=""),width=8,height=6,units="in",res=500)
p_s_avgs <- data.frame(p_values=c(0,1,2,3),reach_time_s = c(mean(condensed[p0_succ,9]),mean(condensed[p1_succ,9]),mean(condensed[p2_succ,9]),mean(condensed[p3_succ,9])),intertrial_s = c(mean(condensed[p0_succ,10]),mean(condensed[p1_succ,10]),mean(condensed[p2_succ,10]),mean(condensed[p3_succ,10])))
p_s_stds <- data.frame(p_values=c(0,1,2,3),reach_time_s = c(sd(condensed[p0_succ,9]),sd(condensed[p1_succ,9]),sd(condensed[p2_succ,9]),sd(condensed[p3_succ,9])),intertrial_s = c(sd(condensed[p0_succ,10]),sd(condensed[p1_succ,10]),sd(condensed[p2_succ,10]),sd(condensed[p3_succ,10])))
p_f_avgs <- data.frame(p_values=c(0,1,2,3),reach_time_f = c(mean(condensed[p0_fail,9]),mean(condensed[p1_fail,9]),mean(condensed[p2_fail,9]),mean(condensed[p3_fail,9])),intertrial_f = c(mean(condensed[p0_fail,10]),mean(condensed[p1_fail,10]),mean(condensed[p2_fail,10]),mean(condensed[p3_fail,10])))
p_f_stds <- data.frame(p_values=c(0,1,2,3),reach_time_f = c(sd(condensed[p0_fail,9]),sd(condensed[p1_fail,9]),sd(condensed[p2_fail,9]),sd(condensed[p3_fail,9])),intertrial_f = c(sd(condensed[p0_fail,10]),sd(condensed[p1_fail,10]),sd(condensed[p2_fail,10]),sd(condensed[p3_fail,10])))
avg_s_melt <- melt(p_s_avgs,id="p_values",variable.name='type',value.name='avg')
std_s_melt <- melt(p_s_stds,id="p_values",variable.name='type',value.name='std')
avg_f_melt <- melt(p_f_avgs,id="p_values",variable.name='type',value.name='avg')
std_f_melt <- melt(p_f_stds,id="p_values",variable.name='type',value.name='std')
test_s <- merge(std_s_melt,avg_s_melt,row.names='p_values')
test_f <- merge(std_f_melt,avg_f_melt,row.names='p_values')
test <- rbind(test_s,test_f)
test[is.na(test)] <- 0
plt <- ggplot(data=test,aes(x=p_values,y=avg,ymax=avg+std,ymin=avg-std,fill=type)) + geom_bar(position="dodge",stat="identity") + geom_errorbar(position=position_dodge(width=0.9),color="gray32",width=0.25)
plt <- plt + scale_fill_manual(values=c("royalblue","seagreen","paleturquoise","lightgreen")) + theme_classic() + labs(title="Average Times",y="Time (s)",x="Punishment",fill="") # + geom_text(aes(y=0.75),size=3,label=sprintf("%0.2f", round(test$avg, digits = 2)),position=position_dodge(width=0.9))
plot(plt)
graphics.off()
rm(list=ls())
| /timing_plots_all.R | no_license | jhess90/classification_scripts | R | false | false | 13,598 | r | library(openxlsx)
library(ggplot2)
library(reshape2)
#source("~/documents/lab/workspace/Classification_scripts/multiplot.R")
source("~/workspace/classification_scripts/multiplot.R")
library(zoo)
library(gplots)
library(RColorBrewer)
library(abind)
library(gridGraphics)
library(grid)
library(gridExtra)
library(R.matlab)
saveAsPng <- T
#########
nhp_id <- '504'
if (nhp_id == '0059'){condensed <- readRDS('0059_total_condensed.rds')
}else if (nhp_id == '504'){condensed <- readRDS('504_total_condensed.rds')}
r0 <- which(condensed[,4] == 0)
r1 <- which(condensed[,4] == 1)
r2 <- which(condensed[,4] == 2)
r3 <- which(condensed[,4] == 3)
p0 <- which(condensed[,5] == 0)
p1 <- which(condensed[,5] == 1)
p2 <- which(condensed[,5] == 2)
p3 <- which(condensed[,5] == 3)
v_3 <- which(condensed[,7] == -3)
v_2 <- which(condensed[,7] == -2)
v_1 <- which(condensed[,7] == -1)
v0 <- which(condensed[,7] == 0)
v1 <- which(condensed[,7] == 1)
v2 <- which(condensed[,7] == 2)
v3 <- which(condensed[,7] == 3)
m0 <- which(condensed[,8] == 0)
m1 <- which(condensed[,8] == 1)
m2 <- which(condensed[,8] == 2)
m3 <- which(condensed[,8] == 3)
m4 <- which(condensed[,8] == 4)
m5 <- which(condensed[,8] == 5)
m6 <- which(condensed[,8] == 6)
res0 <- which(condensed[,6] == 0)
res1 <- which(condensed[,6] == 1)
r0_fail <- res0[which(res0 %in% r0)]
r1_fail <- res0[which(res0 %in% r1)]
r2_fail <- res0[which(res0 %in% r2)]
r3_fail <- res0[which(res0 %in% r3)]
r0_succ <- res1[which(res1 %in% r0)]
r1_succ <- res1[which(res1 %in% r1)]
r2_succ <- res1[which(res1 %in% r2)]
r3_succ <- res1[which(res1 %in% r3)]
p0_fail <- res0[which(res0 %in% p0)]
p1_fail <- res0[which(res0 %in% p1)]
p2_fail <- res0[which(res0 %in% p2)]
p3_fail <- res0[which(res0 %in% p3)]
p0_succ <- res1[which(res1 %in% p0)]
p1_succ <- res1[which(res1 %in% p1)]
p2_succ <- res1[which(res1 %in% p2)]
p3_succ <- res1[which(res1 %in% p3)]
#reward
png(paste("time_", nhp_id,"_reward.png",sep=""),width=8,height=6,units="in",res=500)
ravgs <- data.frame(r_values=c(0,1,2,3),reach_time = c(mean(condensed[r0,9]),mean(condensed[r1,9]),mean(condensed[r2,9]),mean(condensed[r3,9])),intertrial = c(mean(condensed[r0,10]),mean(condensed[r1,10]),mean(condensed[r2,10]),mean(condensed[r3,10])))
rstds <- data.frame(r_values=c(0,1,2,3),reach_time = c(sd(condensed[r0,9]),sd(condensed[r1,9]),sd(condensed[r2,9]),sd(condensed[r3,9])),intertrial = c(sd(condensed[r0,10]),sd(condensed[r1,10]),sd(condensed[r2,10]),sd(condensed[r3,10])))
avg_melt <- melt(ravgs,id="r_values",variable.name='type',value.name='avg')
std_melt <- melt(rstds,id="r_values",variable.name='type',value.name='std')
test <- merge(std_melt,avg_melt,row.names='r_values')
test[is.na(test)] <- 0
plt <- ggplot(data=test,aes(x=r_values,y=avg,ymax=avg+std,ymin=avg-std,fill=type)) + geom_bar(position="dodge",stat="identity") + geom_errorbar(position=position_dodge(width=0.9),color="gray32",width=0.25)
plt <- plt + scale_fill_manual(values=c("royalblue","seagreen")) + geom_text(aes(y=0.75),size=3,label=sprintf("%0.2f", round(test$avg, digits = 2)),position=position_dodge(width=0.9)) + theme_classic() + labs(title="Average Times",y="Time (s)",x="Reward",fill="")
plot(plt)
graphics.off()
#punishment
png(paste("time_", nhp_id,"_punishment.png",sep=""),width=8,height=6,units="in",res=500)
pavgs <- data.frame(p_values=c(0,1,2,3),reach_time = c(mean(condensed[p0,9]),mean(condensed[p1,9]),mean(condensed[p2,9]),mean(condensed[p3,9])),intertrial = c(mean(condensed[p0,10]),mean(condensed[p1,10]),mean(condensed[p2,10]),mean(condensed[p3,10])))
pstds <- data.frame(p_values=c(0,1,2,3),reach_time = c(sd(condensed[p0,9]),sd(condensed[p1,9]),sd(condensed[p2,9]),sd(condensed[p3,9])),intertrial = c(sd(condensed[p0,10]),sd(condensed[p1,10]),sd(condensed[p2,10]),sd(condensed[p3,10])))
avg_melt <- melt(pavgs,id="p_values",variable.name='type',value.name='avg')
std_melt <- melt(pstds,id="p_values",variable.name='type',value.name='std')
test <- merge(std_melt,avg_melt,row.names='p_values')
test[is.na(test)] <- 0
plt <- ggplot(data=test,aes(x=p_values,y=avg,ymax=avg+std,ymin=avg-std,fill=type)) + geom_bar(position="dodge",stat="identity") + geom_errorbar(position=position_dodge(width=0.9),color="gray32",width=0.25)
plt <- plt + scale_fill_manual(values=c("royalblue","seagreen")) + geom_text(aes(y=0.75),size=3,label=sprintf("%0.2f", round(test$avg, digits = 2)),position=position_dodge(width=0.9)) + theme_classic() + labs(title="Average Times",y="Time (s)",x="Punishment",fill="")
plot(plt)
graphics.off()
#result
png(paste("time_", nhp_id,"_result.png",sep=""),width=8,height=6,units="in",res=500)
resavgs <- data.frame(res_values=c(0,1),reach_time = c(mean(condensed[res0,9]),mean(condensed[res1,9])),intertrial = c(mean(condensed[res0,10]),mean(condensed[res1,10])))
resstds <- data.frame(res_values=c(0,1),reach_time = c(sd(condensed[res0,9]),sd(condensed[res1,9])),intertrial = c(sd(condensed[res0,10]),sd(condensed[res1,10])))
avg_melt <- melt(resavgs,id="res_values",variable.name='type',value.name='avg')
std_melt <- melt(resstds,id="res_values",variable.name='type',value.name='std')
test <- merge(std_melt,avg_melt,row.names='res_values')
test[is.na(test)] <- 0
plt <- ggplot(data=test,aes(x=res_values,y=avg,ymax=avg+std,ymin=avg-std,fill=type)) + geom_bar(position="dodge",stat="identity") + geom_errorbar(position=position_dodge(width=0.9),color="gray32",width=0.25)
plt <- plt + scale_fill_manual(values=c("royalblue","seagreen")) + geom_text(aes(y=0.75),size=3,label=sprintf("%0.2f", round(test$avg, digits = 2)),position=position_dodge(width=0.9)) + theme_classic() + labs(title="Average Times",y="Time (s)",x="Result",fill="") + scale_x_discrete(limits=0:1,labels=c("fail","succ"))
plot(plt)
graphics.off()
#value
png(paste("time_", nhp_id,"_value.png",sep=""),width=8,height=6,units="in",res=500)
vavgs <- data.frame(v_values=c(-3,-2,-1,0,1,2,3),reach_time = c(mean(condensed[v_3,9]),mean(condensed[v_2,9]),mean(condensed[v_1,9]),mean(condensed[v0,9]),mean(condensed[v1,9]),mean(condensed[v2,9]),mean(condensed[v3,9])),intertrial = c(mean(condensed[v_3,10]),mean(condensed[v_2,10]),mean(condensed[v_1,10]),mean(condensed[v0,10]),mean(condensed[v1,10]),mean(condensed[v2,10]),mean(condensed[v3,10])))
vstds <- data.frame(v_values=c(-3,-2,-1,0,1,2,3),reach_time = c(sd(condensed[v_3,9]),sd(condensed[v_2,9]),sd(condensed[v_1,9]),sd(condensed[v0,9]),sd(condensed[v1,9]),sd(condensed[v2,9]),sd(condensed[v3,9])),intertrial = c(sd(condensed[v_3,10]),sd(condensed[v_2,10]),sd(condensed[v_1,10]),sd(condensed[v0,10]),sd(condensed[v1,10]),sd(condensed[v2,10]),sd(condensed[v3,10])))
avg_melt <- melt(vavgs,id="v_values",variable.name='type',value.name='avg')
std_melt <- melt(vstds,id="v_values",variable.name='type',value.name='std')
test <- merge(std_melt,avg_melt,row.names='v_values')
test[is.na(test)] <- 0
plt <- ggplot(data=test,aes(x=v_values,y=avg,ymax=avg+std,ymin=avg-std,fill=type)) + geom_bar(position="dodge",stat="identity") + geom_errorbar(position=position_dodge(width=0.9),color="gray32",width=0.25)
plt <- plt + scale_fill_manual(values=c("royalblue","seagreen")) + geom_text(aes(y=0.75),size=3,label=sprintf("%0.2f", round(test$avg, digits = 2)),position=position_dodge(width=0.9)) + theme_classic() + labs(title="Average Times",y="Time (s)",x="Value",fill="")
plot(plt)
graphics.off()
#motivation
png(paste("time_", nhp_id,"_motivation.png",sep=""),width=8,height=6,units="in",res=500)
mavgs <- data.frame(m_values=c(0,1,2,3,4,5,6),reach_time = c(mean(condensed[m0,9]),mean(condensed[m1,9]),mean(condensed[m2,9]),mean(condensed[m3,9]),mean(condensed[m4,9]),mean(condensed[m5,9]),mean(condensed[m6,9])),intertrial = c(mean(condensed[m0,10]),mean(condensed[m1,10]),mean(condensed[m2,10]),mean(condensed[m3,10]),mean(condensed[m4,10]),mean(condensed[m5,10]),mean(condensed[m6,10])))
mstds <- data.frame(m_values=c(0,1,2,3,4,5,6),reach_time = c(sd(condensed[m0,9]),sd(condensed[m1,9]),sd(condensed[m2,9]),sd(condensed[m3,9]),sd(condensed[m4,9]),sd(condensed[m5,9]),sd(condensed[m6,9])),intertrial = c(sd(condensed[m0,10]),sd(condensed[m1,10]),sd(condensed[m2,10]),sd(condensed[m3,10]),sd(condensed[m4,10]),sd(condensed[m5,10]),sd(condensed[m6,10])))
avg_melt <- melt(mavgs,id="m_values",variable.name='type',value.name='avg')
std_melt <- melt(mstds,id="m_values",variable.name='type',value.name='std')
test <- merge(std_melt,avg_melt,row.names='m_values')
test[is.na(test)] <- 0
plt <- ggplot(data=test,aes(x=m_values,y=avg,ymax=avg+std,ymin=avg-std,fill=type)) + geom_bar(position="dodge",stat="identity") + geom_errorbar(position=position_dodge(width=0.9),color="gray32",width=0.25)
plt <- plt + scale_fill_manual(values=c("royalblue","seagreen")) + geom_text(aes(y=0.75),size=3,label=sprintf("%0.2f", round(test$avg, digits = 2)),position=position_dodge(width=0.9)) + theme_classic() + labs(title="Average Times",y="Time (s)",x="Motivation",fill="")
plot(plt)
graphics.off()
##########
##########
#reward sf
png(paste("time_", nhp_id,"_reward_sf.png",sep=""),width=8,height=6,units="in",res=500)
r_s_avgs <- data.frame(r_values=c(0,1,2,3),reach_time_s = c(mean(condensed[r0_succ,9]),mean(condensed[r1_succ,9]),mean(condensed[r2_succ,9]),mean(condensed[r3_succ,9])),intertrial_s = c(mean(condensed[r0_succ,10]),mean(condensed[r1_succ,10]),mean(condensed[r2_succ,10]),mean(condensed[r3_succ,10])))
r_s_stds <- data.frame(r_values=c(0,1,2,3),reach_time_s = c(sd(condensed[r0_succ,9]),sd(condensed[r1_succ,9]),sd(condensed[r2_succ,9]),sd(condensed[r3_succ,9])),intertrial_s = c(sd(condensed[r0_succ,10]),sd(condensed[r1_succ,10]),sd(condensed[r2_succ,10]),sd(condensed[r3_succ,10])))
r_f_avgs <- data.frame(r_values=c(0,1,2,3),reach_time_f = c(mean(condensed[r0_fail,9]),mean(condensed[r1_fail,9]),mean(condensed[r2_fail,9]),mean(condensed[r3_fail,9])),intertrial_f = c(mean(condensed[r0_fail,10]),mean(condensed[r1_fail,10]),mean(condensed[r2_fail,10]),mean(condensed[r3_fail,10])))
r_f_stds <- data.frame(r_values=c(0,1,2,3),reach_time_f = c(sd(condensed[r0_fail,9]),sd(condensed[r1_fail,9]),sd(condensed[r2_fail,9]),sd(condensed[r3_fail,9])),intertrial_f = c(sd(condensed[r0_fail,10]),sd(condensed[r1_fail,10]),sd(condensed[r2_fail,10]),sd(condensed[r3_fail,10])))
avg_s_melt <- melt(r_s_avgs,id="r_values",variable.name='type',value.name='avg')
std_s_melt <- melt(r_s_stds,id="r_values",variable.name='type',value.name='std')
avg_f_melt <- melt(r_f_avgs,id="r_values",variable.name='type',value.name='avg')
std_f_melt <- melt(r_f_stds,id="r_values",variable.name='type',value.name='std')
test_s <- merge(std_s_melt,avg_s_melt,row.names='r_values')
test_f <- merge(std_f_melt,avg_f_melt,row.names='r_values')
test <- rbind(test_s,test_f)
test[is.na(test)] <- 0
plt <- ggplot(data=test,aes(x=r_values,y=avg,ymax=avg+std,ymin=avg-std,fill=type)) + geom_bar(position="dodge",stat="identity") + geom_errorbar(position=position_dodge(width=0.9),color="gray32",width=0.25)
plt <- plt + scale_fill_manual(values=c("royalblue","seagreen","paleturquoise","lightgreen")) + theme_classic() + labs(title="Average Times",y="Time (s)",x="Reward",fill="") # + geom_text(aes(y=0.75),size=3,label=sprintf("%0.2f", round(test$avg, digits = 2)),position=position_dodge(width=0.9))
plot(plt)
graphics.off()
#punishment sf
png(paste("time_", nhp_id,"_punishment_sf.png",sep=""),width=8,height=6,units="in",res=500)
p_s_avgs <- data.frame(p_values=c(0,1,2,3),reach_time_s = c(mean(condensed[p0_succ,9]),mean(condensed[p1_succ,9]),mean(condensed[p2_succ,9]),mean(condensed[p3_succ,9])),intertrial_s = c(mean(condensed[p0_succ,10]),mean(condensed[p1_succ,10]),mean(condensed[p2_succ,10]),mean(condensed[p3_succ,10])))
p_s_stds <- data.frame(p_values=c(0,1,2,3),reach_time_s = c(sd(condensed[p0_succ,9]),sd(condensed[p1_succ,9]),sd(condensed[p2_succ,9]),sd(condensed[p3_succ,9])),intertrial_s = c(sd(condensed[p0_succ,10]),sd(condensed[p1_succ,10]),sd(condensed[p2_succ,10]),sd(condensed[p3_succ,10])))
p_f_avgs <- data.frame(p_values=c(0,1,2,3),reach_time_f = c(mean(condensed[p0_fail,9]),mean(condensed[p1_fail,9]),mean(condensed[p2_fail,9]),mean(condensed[p3_fail,9])),intertrial_f = c(mean(condensed[p0_fail,10]),mean(condensed[p1_fail,10]),mean(condensed[p2_fail,10]),mean(condensed[p3_fail,10])))
p_f_stds <- data.frame(p_values=c(0,1,2,3),reach_time_f = c(sd(condensed[p0_fail,9]),sd(condensed[p1_fail,9]),sd(condensed[p2_fail,9]),sd(condensed[p3_fail,9])),intertrial_f = c(sd(condensed[p0_fail,10]),sd(condensed[p1_fail,10]),sd(condensed[p2_fail,10]),sd(condensed[p3_fail,10])))
avg_s_melt <- melt(p_s_avgs,id="p_values",variable.name='type',value.name='avg')
std_s_melt <- melt(p_s_stds,id="p_values",variable.name='type',value.name='std')
avg_f_melt <- melt(p_f_avgs,id="p_values",variable.name='type',value.name='avg')
std_f_melt <- melt(p_f_stds,id="p_values",variable.name='type',value.name='std')
test_s <- merge(std_s_melt,avg_s_melt,row.names='p_values')
test_f <- merge(std_f_melt,avg_f_melt,row.names='p_values')
test <- rbind(test_s,test_f)
test[is.na(test)] <- 0
plt <- ggplot(data=test,aes(x=p_values,y=avg,ymax=avg+std,ymin=avg-std,fill=type)) + geom_bar(position="dodge",stat="identity") + geom_errorbar(position=position_dodge(width=0.9),color="gray32",width=0.25)
plt <- plt + scale_fill_manual(values=c("royalblue","seagreen","paleturquoise","lightgreen")) + theme_classic() + labs(title="Average Times",y="Time (s)",x="Punishment",fill="") # + geom_text(aes(y=0.75),size=3,label=sprintf("%0.2f", round(test$avg, digits = 2)),position=position_dodge(width=0.9))
plot(plt)
graphics.off()
rm(list=ls())
|
#' Mosaic plots.
#'
#' @export
#'
#' @description
#' A mosaic plot is a convenient graphical summary of the conditional ditributions
#' in a contingency table and is composed of spines in alternating directions.
#'
#'
#' @inheritParams ggplot2::layer
#' @param divider Divider function. The default divider function is mosaic() which will use spines in alternating directions. The four options for partioning:
#' \itemize{
#' \item \code{vspine} Vertical spine partition: width constant, height varies.
#' \item \code{hspine} Horizontal spine partition: height constant, width varies.
#' \item \code{vbar} Vertical bar partition: height constant, width varies.
#' \item \code{hbar} Horizontal bar partition: width constant, height varies.
#' }
#' @param offset Set the space between the first spine
#' @param na.rm If \code{FALSE} (the default), removes missing values with a warning. If \code{TRUE} silently removes missing values.
#' @param ... other arguments passed on to \code{layer}. These are often aesthetics, used to set an aesthetic to a fixed value, like \code{color = 'red'} or \code{size = 3}. They may also be parameters to the paired geom/stat.
#' @examples
#'
#' data(Titanic)
#' titanic <- as.data.frame(Titanic)
#' titanic$Survived <- factor(titanic$Survived, levels=c("Yes", "No"))
#'
#'
#' ggplot(data=titanic) +
#' geom_mosaic(aes(weight=Freq, x=product(Class), fill=Survived))
#' # good practice: use the 'dependent' variable (or most important variable)
#' # as fill variable
#' ggplot(data=titanic) +
#' geom_mosaic(aes(weight=Freq, x=product(Class, Age), fill=Survived))
#'
#' # we can change where we define variables
#' ggplot(data=titanic, aes(weight = Freq, fill=Survived, x=product(Class, Age))) +
#' geom_mosaic()
#'
#' ggplot(data=titanic) +
#' geom_mosaic(aes(weight=Freq, x=product(Class), conds=product(Age), fill=Survived))
#' ggplot(data=titanic) +
#' geom_mosaic(aes(weight=Freq, x=product(Survived, Class), fill=Age))
#'
#' \dontrun{
#' data(happy, package="productplots")
#'
#' ggplot(data = happy) + geom_mosaic(aes(x=product(happy)))
#' ggplot(data = happy) + geom_mosaic(aes(x=product(happy))) +
#' coord_flip()
#' # weighting is important
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight=wtssall, x=product(happy)))
#' ggplot(data = happy) + geom_mosaic(aes(weight=wtssall, x=product(health), fill=happy)) +
#' theme(axis.text.x=element_text(angle=35))
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight=wtssall, x=product(health), fill=happy), na.rm=TRUE)
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight=wtssall, x=product(health, sex, degree), fill=happy),
#' na.rm=TRUE)
#'
#' # here is where a bit more control over the spacing of the bars is helpful:
#' # set labels manually:
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight=wtssall, x=product(age), fill=happy), na.rm=TRUE, offset=0) +
#' scale_x_product("Age", labels=c(17+1:72, "NA"))
#' # thin out labels manually:
#' labels <- c(17+1:72, NA)
#' labels[labels %% 5 != 0] <- ""
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight=wtssall, x=product(age), fill=happy), na.rm=TRUE, offset=0) +
#' scale_x_product("Age", labels=labels)
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight=wtssall, x=product(age), fill=happy, conds = sex),
#' divider=mosaic("v"), na.rm=TRUE, offset=0.001) +
#' scale_x_product("Age", labels=labels)
#' # facetting works!!!!
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight=wtssall, x=product(age), fill=happy), na.rm=TRUE, offset = 0) +
#' facet_grid(sex~.) +
#' scale_x_product("Age", labels=labels)
#'
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight = wtssall, x = product(happy, finrela, health)),
#' divider=mosaic("h"))
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight = wtssall, x = product(happy, finrela, health)), offset=.005)
#'
#' # Spine example
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight = wtssall, x = product(health), fill = health)) +
#' facet_grid(happy~.)
#' }
geom_mosaic <- function(mapping = NULL, data = NULL, stat = "mosaic",
position = "identity", na.rm = FALSE, divider = mosaic(), offset = 0.01,
show.legend = NA, inherit.aes = TRUE, ...)
{
ggplot2::layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomMosaic,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = na.rm,
divider = divider,
offset = offset,
...
)
)
}
#' @importFrom grid grobTree
GeomMosaic <- ggplot2::ggproto(
"GeomMosaic", ggplot2::Geom,
setup_data = function(data, params) {
# cat("setup_data in GeomMosaic\n")
# browser()
data
},
required_aes = c("xmin", "xmax", "ymin", "ymax"),
default_aes = ggplot2::aes(width = 0.75, linetype = "solid", fontsize=5,
shape = 19, colour = NA,
size = .1, fill = "grey30", alpha = .8, stroke = 0.1,
linewidth=.1, weight = 1, x = NULL, y = NULL, conds = NULL),
draw_panel = function(data, panel_scales, coord) {
# cat("draw_panel in GeomMosaic\n")
# browser()
if (all(is.na(data$colour)))
data$colour <- alpha(data$fill, data$alpha) # regard alpha in colour determination
GeomRect$draw_panel(subset(data, level==max(data$level)), panel_scales, coord)
},
check_aesthetics = function(x, n) {
ns <- vapply(x, length, numeric(1))
good <- ns == 1L | ns == n
if (all(good)) {
return()
}
# browser()
stop(
"Aesthetics must be either length 1 or the same as the data (", n, "): ",
paste(names(!good), collapse = ", "),
call. = FALSE
)
},
draw_key = ggplot2::draw_key_rect
)
| /R/geom-mosaic.r | no_license | njtierney/ggmosaic | R | false | false | 5,783 | r | #' Mosaic plots.
#'
#' @export
#'
#' @description
#' A mosaic plot is a convenient graphical summary of the conditional ditributions
#' in a contingency table and is composed of spines in alternating directions.
#'
#'
#' @inheritParams ggplot2::layer
#' @param divider Divider function. The default divider function is mosaic() which will use spines in alternating directions. The four options for partioning:
#' \itemize{
#' \item \code{vspine} Vertical spine partition: width constant, height varies.
#' \item \code{hspine} Horizontal spine partition: height constant, width varies.
#' \item \code{vbar} Vertical bar partition: height constant, width varies.
#' \item \code{hbar} Horizontal bar partition: width constant, height varies.
#' }
#' @param offset Set the space between the first spine
#' @param na.rm If \code{FALSE} (the default), removes missing values with a warning. If \code{TRUE} silently removes missing values.
#' @param ... other arguments passed on to \code{layer}. These are often aesthetics, used to set an aesthetic to a fixed value, like \code{color = 'red'} or \code{size = 3}. They may also be parameters to the paired geom/stat.
#' @examples
#'
#' data(Titanic)
#' titanic <- as.data.frame(Titanic)
#' titanic$Survived <- factor(titanic$Survived, levels=c("Yes", "No"))
#'
#'
#' ggplot(data=titanic) +
#' geom_mosaic(aes(weight=Freq, x=product(Class), fill=Survived))
#' # good practice: use the 'dependent' variable (or most important variable)
#' # as fill variable
#' ggplot(data=titanic) +
#' geom_mosaic(aes(weight=Freq, x=product(Class, Age), fill=Survived))
#'
#' # we can change where we define variables
#' ggplot(data=titanic, aes(weight = Freq, fill=Survived, x=product(Class, Age))) +
#' geom_mosaic()
#'
#' ggplot(data=titanic) +
#' geom_mosaic(aes(weight=Freq, x=product(Class), conds=product(Age), fill=Survived))
#' ggplot(data=titanic) +
#' geom_mosaic(aes(weight=Freq, x=product(Survived, Class), fill=Age))
#'
#' \dontrun{
#' data(happy, package="productplots")
#'
#' ggplot(data = happy) + geom_mosaic(aes(x=product(happy)))
#' ggplot(data = happy) + geom_mosaic(aes(x=product(happy))) +
#' coord_flip()
#' # weighting is important
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight=wtssall, x=product(happy)))
#' ggplot(data = happy) + geom_mosaic(aes(weight=wtssall, x=product(health), fill=happy)) +
#' theme(axis.text.x=element_text(angle=35))
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight=wtssall, x=product(health), fill=happy), na.rm=TRUE)
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight=wtssall, x=product(health, sex, degree), fill=happy),
#' na.rm=TRUE)
#'
#' # here is where a bit more control over the spacing of the bars is helpful:
#' # set labels manually:
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight=wtssall, x=product(age), fill=happy), na.rm=TRUE, offset=0) +
#' scale_x_product("Age", labels=c(17+1:72, "NA"))
#' # thin out labels manually:
#' labels <- c(17+1:72, NA)
#' labels[labels %% 5 != 0] <- ""
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight=wtssall, x=product(age), fill=happy), na.rm=TRUE, offset=0) +
#' scale_x_product("Age", labels=labels)
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight=wtssall, x=product(age), fill=happy, conds = sex),
#' divider=mosaic("v"), na.rm=TRUE, offset=0.001) +
#' scale_x_product("Age", labels=labels)
#' # facetting works!!!!
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight=wtssall, x=product(age), fill=happy), na.rm=TRUE, offset = 0) +
#' facet_grid(sex~.) +
#' scale_x_product("Age", labels=labels)
#'
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight = wtssall, x = product(happy, finrela, health)),
#' divider=mosaic("h"))
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight = wtssall, x = product(happy, finrela, health)), offset=.005)
#'
#' # Spine example
#' ggplot(data = happy) +
#' geom_mosaic(aes(weight = wtssall, x = product(health), fill = health)) +
#' facet_grid(happy~.)
#' }
geom_mosaic <- function(mapping = NULL, data = NULL, stat = "mosaic",
position = "identity", na.rm = FALSE, divider = mosaic(), offset = 0.01,
show.legend = NA, inherit.aes = TRUE, ...)
{
ggplot2::layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomMosaic,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = na.rm,
divider = divider,
offset = offset,
...
)
)
}
#' @importFrom grid grobTree
GeomMosaic <- ggplot2::ggproto(
"GeomMosaic", ggplot2::Geom,
setup_data = function(data, params) {
# cat("setup_data in GeomMosaic\n")
# browser()
data
},
required_aes = c("xmin", "xmax", "ymin", "ymax"),
default_aes = ggplot2::aes(width = 0.75, linetype = "solid", fontsize=5,
shape = 19, colour = NA,
size = .1, fill = "grey30", alpha = .8, stroke = 0.1,
linewidth=.1, weight = 1, x = NULL, y = NULL, conds = NULL),
draw_panel = function(data, panel_scales, coord) {
# cat("draw_panel in GeomMosaic\n")
# browser()
if (all(is.na(data$colour)))
data$colour <- alpha(data$fill, data$alpha) # regard alpha in colour determination
GeomRect$draw_panel(subset(data, level==max(data$level)), panel_scales, coord)
},
check_aesthetics = function(x, n) {
ns <- vapply(x, length, numeric(1))
good <- ns == 1L | ns == n
if (all(good)) {
return()
}
# browser()
stop(
"Aesthetics must be either length 1 or the same as the data (", n, "): ",
paste(names(!good), collapse = ", "),
call. = FALSE
)
},
draw_key = ggplot2::draw_key_rect
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/train.R
\name{train.rpart}
\alias{train.rpart}
\title{train.rpart}
\usage{
train.rpart(
formula,
data,
weights,
subset,
na.action = na.rpart,
method,
model = TRUE,
x = FALSE,
y = TRUE,
parms,
control,
cost,
...
)
}
\arguments{
\item{formula}{a formula, with a response but no interaction terms. If this a a data frame, that is taken as the model frame.}
\item{data}{an optional data frame in which to interpret the variables named in the formula.}
\item{weights}{optional case weights.}
\item{subset}{optional expression saying that only a subset of the rows of the data should be used in the fit.}
\item{na.action}{the default action deletes all observations for which y is missing, but keeps those in which one or more predictors are missing.}
\item{method}{one of "anova", "poisson", "class" or "exp". If method is missing then the routine tries to make an intelligent guess. If y is a survival object, then method = "exp" is assumed, if y has 2 columns then method = "poisson" is assumed, if y is a factor then method = "class" is assumed, otherwise method = "anova" is assumed. It is wisest to specify the method directly, especially as more criteria may added to the function in future.
Alternatively, method can be a list of functions named init, split and eval. Examples are given in the file โtests/usersplits.Rโ in the sources, and in the vignettes โUser Written Split Functionsโ.}
\item{model}{if logical: keep a copy of the model frame in the result? If the input value for model is a model frame (likely from an earlier call to the rpart function), then this frame is used rather than constructing new data.}
\item{x}{keep a copy of the x matrix in the result.}
\item{y}{keep a copy of the dependent variable in the result. If missing and model is supplied this defaults to FALSE.}
\item{parms}{optional parameters for the splitting function.
Anova splitting has no parameters.
Poisson splitting has a single parameter, the coefficient of variation of the prior distribution on the rates. The default value is 1.
Exponential splitting has the same parameter as Poisson.
For classification splitting, the list can contain any of: the vector of prior probabilities (component prior), the loss matrix (component loss) or the splitting index (component split). The priors must be positive and sum to 1. The loss matrix must have zeros on the diagonal and positive off-diagonal elements. The splitting index can be gini or information. The default priors are proportional to the data counts, the losses default to 1, and the split defaults to gini.}
\item{control}{a list of options that control details of the rpart algorithm. See \code{\link[rpart]{rpart.control}}.}
\item{cost}{a vector of non-negative costs, one for each variable in the model. Defaults to one for all variables. These are scalings to be applied when considering splits, so the improvement on splitting on a variable is divided by its cost in deciding which split to choose.}
\item{...}{arguments to \code{\link[rpart]{rpart.control}} may also be specified in the call to rpart. They are checked against the list of valid arguments.}
}
\value{
A object rpart.prmdt with additional information to the model that allows to homogenize the results.
}
\description{
Provides a wrapping function for the \code{\link[rpart]{rpart}}.
}
\note{
the parameter information was taken from the original function \code{\link[rpart]{rpart}}.
}
\examples{
# Classification
data("iris")
n <- seq_len(nrow(iris))
.sample <- sample(n, length(n) * 0.75)
data.train <- iris[.sample,]
data.test <- iris[-.sample,]
modelo.rpart <- train.rpart(Species~., data.train)
modelo.rpart
prob <- predict(modelo.rpart, data.test, type = "prob")
prob
prediccion <- predict(modelo.rpart, data.test, type = "class")
prediccion
# Regression
len <- nrow(swiss)
sampl <- sample(x = 1:len,size = len*0.20,replace = FALSE)
ttesting <- swiss[sampl,]
ttraining <- swiss[-sampl,]
model.rpart <- train.rpart(Infant.Mortality~.,ttraining)
prediction <- predict(model.rpart,ttesting)
prediction
}
\seealso{
The internal function is from package \code{\link[rpart]{rpart}}.
}
| /man/train.rpart.Rd | no_license | PROMiDAT/traineR | R | false | true | 4,235 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/train.R
\name{train.rpart}
\alias{train.rpart}
\title{train.rpart}
\usage{
train.rpart(
formula,
data,
weights,
subset,
na.action = na.rpart,
method,
model = TRUE,
x = FALSE,
y = TRUE,
parms,
control,
cost,
...
)
}
\arguments{
\item{formula}{a formula, with a response but no interaction terms. If this a a data frame, that is taken as the model frame.}
\item{data}{an optional data frame in which to interpret the variables named in the formula.}
\item{weights}{optional case weights.}
\item{subset}{optional expression saying that only a subset of the rows of the data should be used in the fit.}
\item{na.action}{the default action deletes all observations for which y is missing, but keeps those in which one or more predictors are missing.}
\item{method}{one of "anova", "poisson", "class" or "exp". If method is missing then the routine tries to make an intelligent guess. If y is a survival object, then method = "exp" is assumed, if y has 2 columns then method = "poisson" is assumed, if y is a factor then method = "class" is assumed, otherwise method = "anova" is assumed. It is wisest to specify the method directly, especially as more criteria may added to the function in future.
Alternatively, method can be a list of functions named init, split and eval. Examples are given in the file โtests/usersplits.Rโ in the sources, and in the vignettes โUser Written Split Functionsโ.}
\item{model}{if logical: keep a copy of the model frame in the result? If the input value for model is a model frame (likely from an earlier call to the rpart function), then this frame is used rather than constructing new data.}
\item{x}{keep a copy of the x matrix in the result.}
\item{y}{keep a copy of the dependent variable in the result. If missing and model is supplied this defaults to FALSE.}
\item{parms}{optional parameters for the splitting function.
Anova splitting has no parameters.
Poisson splitting has a single parameter, the coefficient of variation of the prior distribution on the rates. The default value is 1.
Exponential splitting has the same parameter as Poisson.
For classification splitting, the list can contain any of: the vector of prior probabilities (component prior), the loss matrix (component loss) or the splitting index (component split). The priors must be positive and sum to 1. The loss matrix must have zeros on the diagonal and positive off-diagonal elements. The splitting index can be gini or information. The default priors are proportional to the data counts, the losses default to 1, and the split defaults to gini.}
\item{control}{a list of options that control details of the rpart algorithm. See \code{\link[rpart]{rpart.control}}.}
\item{cost}{a vector of non-negative costs, one for each variable in the model. Defaults to one for all variables. These are scalings to be applied when considering splits, so the improvement on splitting on a variable is divided by its cost in deciding which split to choose.}
\item{...}{arguments to \code{\link[rpart]{rpart.control}} may also be specified in the call to rpart. They are checked against the list of valid arguments.}
}
\value{
A object rpart.prmdt with additional information to the model that allows to homogenize the results.
}
\description{
Provides a wrapping function for the \code{\link[rpart]{rpart}}.
}
\note{
the parameter information was taken from the original function \code{\link[rpart]{rpart}}.
}
\examples{
# Classification
data("iris")
n <- seq_len(nrow(iris))
.sample <- sample(n, length(n) * 0.75)
data.train <- iris[.sample,]
data.test <- iris[-.sample,]
modelo.rpart <- train.rpart(Species~., data.train)
modelo.rpart
prob <- predict(modelo.rpart, data.test, type = "prob")
prob
prediccion <- predict(modelo.rpart, data.test, type = "class")
prediccion
# Regression
len <- nrow(swiss)
sampl <- sample(x = 1:len,size = len*0.20,replace = FALSE)
ttesting <- swiss[sampl,]
ttraining <- swiss[-sampl,]
model.rpart <- train.rpart(Infant.Mortality~.,ttraining)
prediction <- predict(model.rpart,ttesting)
prediction
}
\seealso{
The internal function is from package \code{\link[rpart]{rpart}}.
}
|
library(tidyverse)
source("data_cleaning.R")
#plot
master_covid_election %>%
ggplot(aes(size = cases)) +
geom_point(aes(x = state_win, y = winner, color = winner),
position = position_jitter(width = .3, height = .5),
alpha = 0.5) +
scale_color_manual(values = c("blue", "red")) +
scale_size(range = c(0.5, 10))
theme(axis.text.y = element_blank(),
axis.title.y = element_text(),
axis.ticks.y = element_blank()) | /beeswarm_plot.R | no_license | erhs-r/group_5 | R | false | false | 466 | r | library(tidyverse)
source("data_cleaning.R")
#plot
master_covid_election %>%
ggplot(aes(size = cases)) +
geom_point(aes(x = state_win, y = winner, color = winner),
position = position_jitter(width = .3, height = .5),
alpha = 0.5) +
scale_color_manual(values = c("blue", "red")) +
scale_size(range = c(0.5, 10))
theme(axis.text.y = element_blank(),
axis.title.y = element_text(),
axis.ticks.y = element_blank()) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/corpus_sample.R
\name{corpus_sample}
\alias{corpus_sample}
\title{randomly sample documents from a corpus}
\usage{
corpus_sample(x, size = ndoc(x), replace = FALSE, prob = NULL,
by = NULL, ...)
}
\arguments{
\item{x}{a corpus object whose documents will be sampled}
\item{size}{a positive number, the number of documents to select}
\item{replace}{Should sampling be with replacement?}
\item{prob}{A vector of probability weights for obtaining the elements of the
vector being sampled.}
\item{by}{a grouping variable for sampling. Useful for resampling
sub-document units such as sentences, for instance by specifying \code{by =
"document"}}
\item{...}{unused}
}
\value{
A corpus object with number of documents equal to \code{size}, drawn
from the corpus \code{x}. The returned corpus object will contain all of
the meta-data of the original corpus, and the same document variables for
the documents selected.
}
\description{
Takes a random sample or documents or features of the specified size from a
corpus or document-feature matrix, with or without replacement. Works just
as \code{\link{sample}} works for the documents and their associated
document-level variables.
}
\examples{
# sampling from a corpus
summary(corpus_sample(data_corpus_inaugural, 5))
summary(corpus_sample(data_corpus_inaugural, 10, replace=TRUE))
# sampling sentences within document
doccorpus <- corpus(c(one = "Sentence one. Sentence two. Third sentence.",
two = "First sentence, doc2. Second sentence, doc2."))
sentcorpus <- corpus_reshape(doccorpus, to = "sentences")
texts(sentcorpus)
texts(corpus_sample(sentcorpus, replace = TRUE, by = "document"))
}
\keyword{corpus}
| /man/corpus_sample.Rd | no_license | leeper/quanteda | R | false | true | 1,778 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/corpus_sample.R
\name{corpus_sample}
\alias{corpus_sample}
\title{randomly sample documents from a corpus}
\usage{
corpus_sample(x, size = ndoc(x), replace = FALSE, prob = NULL,
by = NULL, ...)
}
\arguments{
\item{x}{a corpus object whose documents will be sampled}
\item{size}{a positive number, the number of documents to select}
\item{replace}{Should sampling be with replacement?}
\item{prob}{A vector of probability weights for obtaining the elements of the
vector being sampled.}
\item{by}{a grouping variable for sampling. Useful for resampling
sub-document units such as sentences, for instance by specifying \code{by =
"document"}}
\item{...}{unused}
}
\value{
A corpus object with number of documents equal to \code{size}, drawn
from the corpus \code{x}. The returned corpus object will contain all of
the meta-data of the original corpus, and the same document variables for
the documents selected.
}
\description{
Takes a random sample or documents or features of the specified size from a
corpus or document-feature matrix, with or without replacement. Works just
as \code{\link{sample}} works for the documents and their associated
document-level variables.
}
\examples{
# sampling from a corpus
summary(corpus_sample(data_corpus_inaugural, 5))
summary(corpus_sample(data_corpus_inaugural, 10, replace=TRUE))
# sampling sentences within document
doccorpus <- corpus(c(one = "Sentence one. Sentence two. Third sentence.",
two = "First sentence, doc2. Second sentence, doc2."))
sentcorpus <- corpus_reshape(doccorpus, to = "sentences")
texts(sentcorpus)
texts(corpus_sample(sentcorpus, replace = TRUE, by = "document"))
}
\keyword{corpus}
|
#' Generates a Resolvable Row-Column Design (RowColD)
#'
#'
#' @description It randomly generates a resolvable row-column designs (RowColD).
#' Note that design optimization is only done at the level of rows and not columns;
#' hence, design is suboptimal. The randomization can be done across locations.
#'
#' @param t Number of treatments.
#' @param nrows Number of rows of a full resolvable replicate.
#' @param r Number of blocks (full resolvable replicates).
#' @param l Number of locations. By default \code{l = 1}.
#' @param plotNumber Numeric vector with the starting plot number for each location. By default \code{plotNumber = 101}.
#' @param seed (optional) Real number that specifies the starting seed to obtain reproducible designs.
#' @param locationNames (optional) Names for each location.
#' @param data (optional) Data frame with label list of treatments
#'
#' @author Didier Murillo [aut],
#' Salvador Gezan [aut],
#' Ana Heilman [ctb],
#' Thomas Walk [ctb],
#' Johan Aparicio [ctb],
#' Richard Horsley [ctb]
#'
#'
#' @importFrom stats runif na.omit
#'
#' @return A list with four elements.
#' \itemize{
#' \item \code{infoDesign} is a list with information on the design parameters.
#' \item \code{resolvableBlocks} a list with the resolvable row columns blocks.
#' \item \code{concurrence} is the concurrence matrix.
#' \item \code{fieldBook} is a data frame with the row-column field book.
#' }
#'
#'
#' @references
#' Edmondson., R. N. (2021). blocksdesign: Nested and crossed block designs for factorial and
#' unstructured treatment sets. https://CRAN.R-project.org/package=blocksdesign
#'
#' @examples
#'
#' # Example 1: Generates a row-column design with 3 full blocks and 36 treatments
#' # and 6 rows. This for one location.
#' rowcold1 <- row_column(t = 36, nrows = 6, r = 3, l = 1,
#' plotNumber= 101,
#' locationNames = "Loc1",
#' seed = 21)
#' rowcold1$infoDesign
#' rowcold1$resolvableBlocks
#' head(rowcold1$fieldBook,12)
#'
#' # Example 2: Generates a row-column design with 3 full blocks and 30 treatments
#' # and 5 rows, for one location.
#' # In this case, we show how to use the option data.
#' treatments <- paste("ND-", 1:30, sep = "")
#' ENTRY <- 1:30
#' treatment_list <- data.frame(list(ENTRY = ENTRY, TREATMENT = treatments))
#' head(treatment_list)
#' rowcold2 <- row_column(t = 30, nrows = 5, r = 3, l = 1,
#' plotNumber= c(101,1001),
#' locationNames = c("A", "B"),
#' seed = 15,
#' data = treatment_list)
#' rowcold2$infoDesign
#' rowcold2$resolvableBlocks
#' head(rowcold2$fieldBook,12)
#'
#'
#' @export
row_column <- function(t = NULL, nrows = NULL, r = NULL, l = 1, plotNumber= 101, locationNames = NULL,
seed = NULL, data = NULL) {
if (is.null(seed) || !is.numeric(seed)) seed <- runif(1, min = -50000, max = 50000)
set.seed(seed)
k <- nrows
lookup <- FALSE
if(is.null(data)) {
if (is.null(t) || is.null(k) || is.null(r) || is.null(l)) {
shiny::validate('Some of the basic design parameters are missing (t, k, r or l).')
}
arg1 <- list(k, r, l);arg2 <- c(k, r, l)
if (base::any(lengths(arg1) != 1) || base::any(arg2 %% 1 != 0) || base::any(arg2 < 1)) {
shiny::validate('row_column() requires k, r and l to be possitive integers.')
}
if (is.numeric(t)) {
if (length(t) == 1) {
if (t == 1 || t < 1) {
shiny::validate('row_column() requires more than one treatment.')
}
nt <- t
}else if ((length(t) > 1)) {
nt <- length(t)
TRT <- t
}
}else if (is.character(t) || is.factor(t)) {
if (length(t) == 1) {
shiny::validate('incomplete_blocks() requires more than one treatment.')
}
nt <- length(t)
}else if ((length(t) > 1)) {
nt <- length(t)
}
df <- data.frame(list(ENTRY = 1:nt, TREATMENT = paste0("G-", 1:nt)))
data_RowCol <- df
lookup <- TRUE
}else if (!is.null(data)) {
if (is.null(t) || is.null(r) || is.null(k) || is.null(l)) {
shiny::validate('Some of the basic design parameters are missing (t, r, k or l).')
}
if(!is.data.frame(data)) shiny::validate("Data must be a data frame.")
data_up <- as.data.frame(data[,c(1,2)])
data_up <- na.omit(data_up)
colnames(data_up) <- c("ENTRY", "TREATMENT")
data_up$TREATMENT <- as.character(data_up$TREATMENT)
new_t <- length(data_up$TREATMENT)
if (t != new_t) base::stop("Number of treatments do not match with data input.")
TRT <- data_up$TREATMENT
nt <- length(TRT)
data_RowCol <- data_up
lookup <- TRUE
}
if (k >= nt) shiny::validate('incomplete_blocks() requires k < t.')
if(is.null(locationNames) || length(locationNames) != l) locationNames <- 1:l
nunits <- k
matdf <- incomplete_blocks(t = nt, k = nunits, r = r, l = l, plotNumber = plotNumber,
seed = seed, locationNames = locationNames,
data = data_RowCol)
matdf <- matdf$fieldBook
matdf <- as.data.frame(matdf)
colnames(matdf)[5] <- "COLUMN"
matdf$ROW <- matdf$UNIT
OutRowCol <- matdf[,-6]
OutRowCol$LOCATION <- factor(OutRowCol$LOCATION, levels = locationNames)
OutRowCol <- OutRowCol[order(OutRowCol$LOCATION, OutRowCol$REP, OutRowCol$ROW),]
RowCol_plots <- ibd_plot_numbers(nt = nt, plot.number = plotNumber, r = r, l = l)
OutRowCol$PLOT <- as.vector(unlist(RowCol_plots))
if(lookup) {
OutRowCol <- OutRowCol[,c(2,3,4,8,5,6,7)]
}else OutRowCol <- OutRowCol[,c(2,3,4,7,5,6)]
ID <- 1:nrow(OutRowCol)
OutRowCol <- cbind(ID, OutRowCol)
rownames(OutRowCol) <- 1:nrow(OutRowCol)
loc <- levels(OutRowCol$LOCATION)
ib <- nt/k
Resolvable_rc_reps <- vector(mode = "list", length = r*l)
w <- 1
for (sites in 1:l) {
for (j in 1:r) {
z <- OutRowCol
z <- subset(z, z$LOCATION == loc[sites] & z$REP == j)
if (is.null(data)){
Resolvable_rc_reps[[w]] <- matrix(data = as.vector(z$ENTRY), nrow = nunits,
ncol = ib, byrow = TRUE)
}else {
Resolvable_rc_reps[[w]] <- matrix(data = as.vector(z$TREATMENT), nrow = nunits,
ncol = ib, byrow = TRUE)
}
w <- w + 1
}
}
NEW_Resolvable <- setNames(vector(mode = "list", length = l),
paste0("Loc_", locationNames))
x <- seq(1, r * l, r)
y <- seq(r, r * l, r)
z <- 1
for (loc in 1:l) {
NEW_Resolvable[[loc]] <- setNames(Resolvable_rc_reps[x[z]:y[z]],
paste0(rep("rep", r), 1:r))
z <- z + 1
}
df <- OutRowCol
trt <- "ENTRY"
c1 <- concurrence_matrix(df=df, trt=trt, target='REP')
c2 <- concurrence_matrix (df=df, trt=trt, target='ROW')
c3 <- concurrence_matrix (df=df, trt=trt, target='COLUMN')
summ <- merge(c1, c2, by="Concurrence", all=TRUE)
new_summ <- merge(summ, c3, by='Concurrence', all=TRUE)
infoDesign <- list(
rows = nrows,
columns = ib,
reps = r,
treatments = nt,
locations = l,
location_names = locationNames,
seed = seed,
id_design = 9
)
output <- list(infoDesign = infoDesign, resolvableBlocks = NEW_Resolvable,
concurrence = new_summ,
fieldBook = OutRowCol)
class(output) <- "FielDHub"
return(invisible(output))
} | /R/fct_row_column.R | permissive | DidierMurilloF/FielDHub | R | false | false | 7,532 | r | #' Generates a Resolvable Row-Column Design (RowColD)
#'
#'
#' @description It randomly generates a resolvable row-column designs (RowColD).
#' Note that design optimization is only done at the level of rows and not columns;
#' hence, design is suboptimal. The randomization can be done across locations.
#'
#' @param t Number of treatments.
#' @param nrows Number of rows of a full resolvable replicate.
#' @param r Number of blocks (full resolvable replicates).
#' @param l Number of locations. By default \code{l = 1}.
#' @param plotNumber Numeric vector with the starting plot number for each location. By default \code{plotNumber = 101}.
#' @param seed (optional) Real number that specifies the starting seed to obtain reproducible designs.
#' @param locationNames (optional) Names for each location.
#' @param data (optional) Data frame with label list of treatments
#'
#' @author Didier Murillo [aut],
#' Salvador Gezan [aut],
#' Ana Heilman [ctb],
#' Thomas Walk [ctb],
#' Johan Aparicio [ctb],
#' Richard Horsley [ctb]
#'
#'
#' @importFrom stats runif na.omit
#'
#' @return A list with four elements.
#' \itemize{
#' \item \code{infoDesign} is a list with information on the design parameters.
#' \item \code{resolvableBlocks} a list with the resolvable row columns blocks.
#' \item \code{concurrence} is the concurrence matrix.
#' \item \code{fieldBook} is a data frame with the row-column field book.
#' }
#'
#'
#' @references
#' Edmondson., R. N. (2021). blocksdesign: Nested and crossed block designs for factorial and
#' unstructured treatment sets. https://CRAN.R-project.org/package=blocksdesign
#'
#' @examples
#'
#' # Example 1: Generates a row-column design with 3 full blocks and 36 treatments
#' # and 6 rows. This for one location.
#' rowcold1 <- row_column(t = 36, nrows = 6, r = 3, l = 1,
#' plotNumber= 101,
#' locationNames = "Loc1",
#' seed = 21)
#' rowcold1$infoDesign
#' rowcold1$resolvableBlocks
#' head(rowcold1$fieldBook,12)
#'
#' # Example 2: Generates a row-column design with 3 full blocks and 30 treatments
#' # and 5 rows, for one location.
#' # In this case, we show how to use the option data.
#' treatments <- paste("ND-", 1:30, sep = "")
#' ENTRY <- 1:30
#' treatment_list <- data.frame(list(ENTRY = ENTRY, TREATMENT = treatments))
#' head(treatment_list)
#' rowcold2 <- row_column(t = 30, nrows = 5, r = 3, l = 1,
#' plotNumber= c(101,1001),
#' locationNames = c("A", "B"),
#' seed = 15,
#' data = treatment_list)
#' rowcold2$infoDesign
#' rowcold2$resolvableBlocks
#' head(rowcold2$fieldBook,12)
#'
#'
#' @export
row_column <- function(t = NULL, nrows = NULL, r = NULL, l = 1, plotNumber= 101, locationNames = NULL,
seed = NULL, data = NULL) {
if (is.null(seed) || !is.numeric(seed)) seed <- runif(1, min = -50000, max = 50000)
set.seed(seed)
k <- nrows
lookup <- FALSE
if(is.null(data)) {
if (is.null(t) || is.null(k) || is.null(r) || is.null(l)) {
shiny::validate('Some of the basic design parameters are missing (t, k, r or l).')
}
arg1 <- list(k, r, l);arg2 <- c(k, r, l)
if (base::any(lengths(arg1) != 1) || base::any(arg2 %% 1 != 0) || base::any(arg2 < 1)) {
shiny::validate('row_column() requires k, r and l to be possitive integers.')
}
if (is.numeric(t)) {
if (length(t) == 1) {
if (t == 1 || t < 1) {
shiny::validate('row_column() requires more than one treatment.')
}
nt <- t
}else if ((length(t) > 1)) {
nt <- length(t)
TRT <- t
}
}else if (is.character(t) || is.factor(t)) {
if (length(t) == 1) {
shiny::validate('incomplete_blocks() requires more than one treatment.')
}
nt <- length(t)
}else if ((length(t) > 1)) {
nt <- length(t)
}
df <- data.frame(list(ENTRY = 1:nt, TREATMENT = paste0("G-", 1:nt)))
data_RowCol <- df
lookup <- TRUE
}else if (!is.null(data)) {
if (is.null(t) || is.null(r) || is.null(k) || is.null(l)) {
shiny::validate('Some of the basic design parameters are missing (t, r, k or l).')
}
if(!is.data.frame(data)) shiny::validate("Data must be a data frame.")
data_up <- as.data.frame(data[,c(1,2)])
data_up <- na.omit(data_up)
colnames(data_up) <- c("ENTRY", "TREATMENT")
data_up$TREATMENT <- as.character(data_up$TREATMENT)
new_t <- length(data_up$TREATMENT)
if (t != new_t) base::stop("Number of treatments do not match with data input.")
TRT <- data_up$TREATMENT
nt <- length(TRT)
data_RowCol <- data_up
lookup <- TRUE
}
if (k >= nt) shiny::validate('incomplete_blocks() requires k < t.')
if(is.null(locationNames) || length(locationNames) != l) locationNames <- 1:l
nunits <- k
matdf <- incomplete_blocks(t = nt, k = nunits, r = r, l = l, plotNumber = plotNumber,
seed = seed, locationNames = locationNames,
data = data_RowCol)
matdf <- matdf$fieldBook
matdf <- as.data.frame(matdf)
colnames(matdf)[5] <- "COLUMN"
matdf$ROW <- matdf$UNIT
OutRowCol <- matdf[,-6]
OutRowCol$LOCATION <- factor(OutRowCol$LOCATION, levels = locationNames)
OutRowCol <- OutRowCol[order(OutRowCol$LOCATION, OutRowCol$REP, OutRowCol$ROW),]
RowCol_plots <- ibd_plot_numbers(nt = nt, plot.number = plotNumber, r = r, l = l)
OutRowCol$PLOT <- as.vector(unlist(RowCol_plots))
if(lookup) {
OutRowCol <- OutRowCol[,c(2,3,4,8,5,6,7)]
}else OutRowCol <- OutRowCol[,c(2,3,4,7,5,6)]
ID <- 1:nrow(OutRowCol)
OutRowCol <- cbind(ID, OutRowCol)
rownames(OutRowCol) <- 1:nrow(OutRowCol)
loc <- levels(OutRowCol$LOCATION)
ib <- nt/k
Resolvable_rc_reps <- vector(mode = "list", length = r*l)
w <- 1
for (sites in 1:l) {
for (j in 1:r) {
z <- OutRowCol
z <- subset(z, z$LOCATION == loc[sites] & z$REP == j)
if (is.null(data)){
Resolvable_rc_reps[[w]] <- matrix(data = as.vector(z$ENTRY), nrow = nunits,
ncol = ib, byrow = TRUE)
}else {
Resolvable_rc_reps[[w]] <- matrix(data = as.vector(z$TREATMENT), nrow = nunits,
ncol = ib, byrow = TRUE)
}
w <- w + 1
}
}
NEW_Resolvable <- setNames(vector(mode = "list", length = l),
paste0("Loc_", locationNames))
x <- seq(1, r * l, r)
y <- seq(r, r * l, r)
z <- 1
for (loc in 1:l) {
NEW_Resolvable[[loc]] <- setNames(Resolvable_rc_reps[x[z]:y[z]],
paste0(rep("rep", r), 1:r))
z <- z + 1
}
df <- OutRowCol
trt <- "ENTRY"
c1 <- concurrence_matrix(df=df, trt=trt, target='REP')
c2 <- concurrence_matrix (df=df, trt=trt, target='ROW')
c3 <- concurrence_matrix (df=df, trt=trt, target='COLUMN')
summ <- merge(c1, c2, by="Concurrence", all=TRUE)
new_summ <- merge(summ, c3, by='Concurrence', all=TRUE)
infoDesign <- list(
rows = nrows,
columns = ib,
reps = r,
treatments = nt,
locations = l,
location_names = locationNames,
seed = seed,
id_design = 9
)
output <- list(infoDesign = infoDesign, resolvableBlocks = NEW_Resolvable,
concurrence = new_summ,
fieldBook = OutRowCol)
class(output) <- "FielDHub"
return(invisible(output))
} |
# Jignesh H. Parmar and Pedro Mendes, Plos Comp Biol in 2019
# Fixed Model Entities:
#metabolite 'FeOutside': fixed
FeOutsi=0
#compartment 'Duodenum':fixed
Duodenu=3.84615e-05
#compartment 'RBC':fixed
RBC=0.00079
#compartment 'Spleen':fixed
Spleen=6.73077e-05
#compartment 'Liver':fixed
Liver=0.0011619
#compartment 'Plasma':fixed
Plasma=0.0013
#compartment 'RestOfBody':fixed
RestOfB=0.0196948
#compartment 'BoneMarrow':fixed
BoneMar=0.000214286
#global quantity 'kNTBI_Fe1Tf':fixed
kNTBI_F=1.004e+09
#global quantity 'kInDuo':fixed
kInDuo=0.0405971
#global quantity 'kInLiver':fixed
kInLive=2.11666
#global quantity 'kInRBC':fixed
kInRBC=5.03844e+11
#global quantity 'kInRest':fixed
kInRest=4.78121
#global quantity 'KmFeFPN':fixed
KmFeFPN=0.112511
#global quantity 'KiHepcidinFPN':fixed
KiHepci=6.3e-09
#global quantity 'kFe1Tf_Fe2Tf':fixed
kFe1Tf_=1.004e+09
#global quantity 'vDuoNTBI':fixed
vDuoNTB=0.200907
#global quantity 'vLiverNTBI':fixed
vLiverN=0.0444795
#global quantity 'vSpleenNTBI':fixed
vSpleen=2.06738
#global quantity 'vRestNTBI':fixed
vRestNT=0.0101453
#global quantity 'kDuoLoss':fixed
kDuoLos=6.80738e-05
#global quantity 'kRBCSpleen':fixed
kRBCSpl=0.03
#global quantity 'kInBM':fixed
kInBM=4.06878e+12
#global quantity 'kBMSpleen':fixed
kBMSple=0.103218
#global quantity 'vDiet_basal':fixed
vDiet_b=0.00346965
#global quantity 'KaNTBI':fixed
KaNTBI=0.000255016
#global quantity 'KmNTBI':fixed
KmNTBI=0.000679291
#global quantity 'vNTBILiver':fixed
vNTBILi=14.1511
#global quantity 'fDiet':fixed
fDiet=1
#global quantity 'ksHepcidin':fixed
ksHepci=0.000398766
#global quantity 'vEPO':fixed
vEPO=2.62675e-09
#global quantity 'kdEPO':fixed
kdEPO=4.8
#global quantity 'hEPO':fixed
hEPO=6.5
#global quantity 'KiEPORBC':fixed
KiEPORB=0.01
#global quantity 'KEPOHepcidin':fixed
KEPOHep=5e-12
#global quantity 'hEPOHepcidin':fixed
hEPOHep=4
#global quantity 'hNTBI':fixed
hNTBI=1
#global quantity 'kRestLoss':fixed
kRestLo=0.016862
#global quantity 'vTf':fixed
vTf=1.548e-05
#global quantity 'kdTf':fixed
kdTf=0.4
#global quantity 'kdHepcidin':fixed
kdHepci=0.75616
(pars=dput(ls()))
names(pars)=pars
dput(pars)
parmarPars19=c(BoneMar = BoneMar, Duodenu = Duodenu, fDiet = fDiet,
FeOutsi = FeOutsi, hEPO = hEPO, hEPOHep = hEPOHep, hNTBI = hNTBI,
KaNTBI = KaNTBI, kBMSple = kBMSple, kdEPO = kdEPO, kdHepci = kdHepci,
kdTf = kdTf, kDuoLos = kDuoLos, KEPOHep = KEPOHep, kFe1Tf_ = kFe1Tf_,
KiEPORB = KiEPORB, KiHepci = KiHepci, kInBM = kInBM, kInDuo = kInDuo,
kInLive = kInLive, kInRBC = kInRBC, kInRest = kInRest,
KmFeFPN = KmFeFPN, KmNTBI = KmNTBI, kNTBI_F = kNTBI_F,
kRBCSpl = kRBCSpl, kRestLo = kRestLo, ksHepci = ksHepci,
Liver = Liver, Plasma = Plasma, RBC = RBC, RestOfB = RestOfB,
Spleen = Spleen, vDiet_b = vDiet_b, vDuoNTB = vDuoNTB,
vEPO = vEPO, vLiverN = vLiverN, vNTBILi = vNTBILi, vRestNT = vRestNT,
vSpleen = vSpleen, vTf = vTf)
# parmarPars=c(FeOutsi=0, Duodenu=3.84615e-05, RBC=0.00079, Spleen=6.73077e-05, Liver=0.0011619,
# Plasma=0.0013, RestOfB=0.0196948, BoneMar=0.000214286,
# # organ volumes in Liters (Table 1) mouse= ~25gm = ~.023L
# kNTBI_F=1.08432e+09, #kNTBI_Fe1Tf
# kInDuo=0.0689984, kInLive=2.97791, kInRBC=1.08448, kInRest=6.16356,
# Km=0.0159421, Ki=1e-09, kFe1Tf_=1.08432e+09, #kFe1Tf_Fe2Tf
# VmaxDuo=0.200242, VmaxLiv=0.0261148, VmaxSpl=1.3422, VmaxRest=0.0109451, # all 4 NTBI, in Moles/Day
# kDuoLos=0.0270113, #kDuoLoss
# vRBCSpl=0.0235286, kRestLo=0.0235332, kInBM=15.7691, kBMSple=0.061903,
# vDiet=0.00377422,vDietH=0.00415624, vDietL=0, #Hi vs. low Fe diet
# quantit=6.02214e+23, # Avogadr=6.02214e+23,
# #HepcidinSynthesis Hi vs. Low Fe diet (adaptation?)
# vHepSyn=1.7393e-08, vHepSynH=2.30942e-08, vHepSynL=8.54927e-09,
# k1=0.75616 #reaction 'HepcidinDecay': kinetic ,eter 'k1'
# )
# sum(parmarPars[1:8]) #23.2 gm mouse
save(parmarPars19,file="parmarPars19.rda")
| /myelo/inst/doc/parmarPars19.R | no_license | radivot/myelo | R | false | false | 4,050 | r | # Jignesh H. Parmar and Pedro Mendes, Plos Comp Biol in 2019
# Fixed Model Entities:
#metabolite 'FeOutside': fixed
FeOutsi=0
#compartment 'Duodenum':fixed
Duodenu=3.84615e-05
#compartment 'RBC':fixed
RBC=0.00079
#compartment 'Spleen':fixed
Spleen=6.73077e-05
#compartment 'Liver':fixed
Liver=0.0011619
#compartment 'Plasma':fixed
Plasma=0.0013
#compartment 'RestOfBody':fixed
RestOfB=0.0196948
#compartment 'BoneMarrow':fixed
BoneMar=0.000214286
#global quantity 'kNTBI_Fe1Tf':fixed
kNTBI_F=1.004e+09
#global quantity 'kInDuo':fixed
kInDuo=0.0405971
#global quantity 'kInLiver':fixed
kInLive=2.11666
#global quantity 'kInRBC':fixed
kInRBC=5.03844e+11
#global quantity 'kInRest':fixed
kInRest=4.78121
#global quantity 'KmFeFPN':fixed
KmFeFPN=0.112511
#global quantity 'KiHepcidinFPN':fixed
KiHepci=6.3e-09
#global quantity 'kFe1Tf_Fe2Tf':fixed
kFe1Tf_=1.004e+09
#global quantity 'vDuoNTBI':fixed
vDuoNTB=0.200907
#global quantity 'vLiverNTBI':fixed
vLiverN=0.0444795
#global quantity 'vSpleenNTBI':fixed
vSpleen=2.06738
#global quantity 'vRestNTBI':fixed
vRestNT=0.0101453
#global quantity 'kDuoLoss':fixed
kDuoLos=6.80738e-05
#global quantity 'kRBCSpleen':fixed
kRBCSpl=0.03
#global quantity 'kInBM':fixed
kInBM=4.06878e+12
#global quantity 'kBMSpleen':fixed
kBMSple=0.103218
#global quantity 'vDiet_basal':fixed
vDiet_b=0.00346965
#global quantity 'KaNTBI':fixed
KaNTBI=0.000255016
#global quantity 'KmNTBI':fixed
KmNTBI=0.000679291
#global quantity 'vNTBILiver':fixed
vNTBILi=14.1511
#global quantity 'fDiet':fixed
fDiet=1
#global quantity 'ksHepcidin':fixed
ksHepci=0.000398766
#global quantity 'vEPO':fixed
vEPO=2.62675e-09
#global quantity 'kdEPO':fixed
kdEPO=4.8
#global quantity 'hEPO':fixed
hEPO=6.5
#global quantity 'KiEPORBC':fixed
KiEPORB=0.01
#global quantity 'KEPOHepcidin':fixed
KEPOHep=5e-12
#global quantity 'hEPOHepcidin':fixed
hEPOHep=4
#global quantity 'hNTBI':fixed
hNTBI=1
#global quantity 'kRestLoss':fixed
kRestLo=0.016862
#global quantity 'vTf':fixed
vTf=1.548e-05
#global quantity 'kdTf':fixed
kdTf=0.4
#global quantity 'kdHepcidin':fixed
kdHepci=0.75616
(pars=dput(ls()))
names(pars)=pars
dput(pars)
parmarPars19=c(BoneMar = BoneMar, Duodenu = Duodenu, fDiet = fDiet,
FeOutsi = FeOutsi, hEPO = hEPO, hEPOHep = hEPOHep, hNTBI = hNTBI,
KaNTBI = KaNTBI, kBMSple = kBMSple, kdEPO = kdEPO, kdHepci = kdHepci,
kdTf = kdTf, kDuoLos = kDuoLos, KEPOHep = KEPOHep, kFe1Tf_ = kFe1Tf_,
KiEPORB = KiEPORB, KiHepci = KiHepci, kInBM = kInBM, kInDuo = kInDuo,
kInLive = kInLive, kInRBC = kInRBC, kInRest = kInRest,
KmFeFPN = KmFeFPN, KmNTBI = KmNTBI, kNTBI_F = kNTBI_F,
kRBCSpl = kRBCSpl, kRestLo = kRestLo, ksHepci = ksHepci,
Liver = Liver, Plasma = Plasma, RBC = RBC, RestOfB = RestOfB,
Spleen = Spleen, vDiet_b = vDiet_b, vDuoNTB = vDuoNTB,
vEPO = vEPO, vLiverN = vLiverN, vNTBILi = vNTBILi, vRestNT = vRestNT,
vSpleen = vSpleen, vTf = vTf)
# parmarPars=c(FeOutsi=0, Duodenu=3.84615e-05, RBC=0.00079, Spleen=6.73077e-05, Liver=0.0011619,
# Plasma=0.0013, RestOfB=0.0196948, BoneMar=0.000214286,
# # organ volumes in Liters (Table 1) mouse= ~25gm = ~.023L
# kNTBI_F=1.08432e+09, #kNTBI_Fe1Tf
# kInDuo=0.0689984, kInLive=2.97791, kInRBC=1.08448, kInRest=6.16356,
# Km=0.0159421, Ki=1e-09, kFe1Tf_=1.08432e+09, #kFe1Tf_Fe2Tf
# VmaxDuo=0.200242, VmaxLiv=0.0261148, VmaxSpl=1.3422, VmaxRest=0.0109451, # all 4 NTBI, in Moles/Day
# kDuoLos=0.0270113, #kDuoLoss
# vRBCSpl=0.0235286, kRestLo=0.0235332, kInBM=15.7691, kBMSple=0.061903,
# vDiet=0.00377422,vDietH=0.00415624, vDietL=0, #Hi vs. low Fe diet
# quantit=6.02214e+23, # Avogadr=6.02214e+23,
# #HepcidinSynthesis Hi vs. Low Fe diet (adaptation?)
# vHepSyn=1.7393e-08, vHepSynH=2.30942e-08, vHepSynL=8.54927e-09,
# k1=0.75616 #reaction 'HepcidinDecay': kinetic ,eter 'k1'
# )
# sum(parmarPars[1:8]) #23.2 gm mouse
save(parmarPars19,file="parmarPars19.rda")
|
#Create a matrix object that can cache its inverse
makeCacheMatrix <- function(Mtrx = matrix()) {
#initialize the matrix Object
mtrxInverse <- NULL
#set matrix
set <- function(y) {
Mtrx <<- y
mtrxInverse <<- NULL
}
#get matrix
get <- function() Mtrx
# set the inverse
setInverse <- function(inverse) mtrxInverse <<- inverse
# get function to return the cached inverse
getInverse <- function() mtrxInverse
#list the available function in makeCacheMatrix
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Returns the inverse of matrix.
#If the inverse has already been calculated then the cachesolve will retrieve and return the inverse from cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# check if the inverse of the matrix exists in cache
invMtrx <- x$getInverse()
# if inverse of the matrix exists, then get the matrix inverse from cache and return the inverse
if(!is.null(invMtrx)) {
# Return the inverse from cache
message("getting cached data")
return(invMtrx)
}
#if the cache is empty , then proceed here
#x$get() gets the matrix
data <- x$get()
# function solve returns the inverse of the matrix
invMtrx <- solve(data)
# set the inverse in cache
x$setInverse(invMtrx)
# return the inverse
invMtrx
}
| /cachematrix.R | no_license | vinothinij/RProgrammingAssignment2 | R | false | false | 1,570 | r | #Create a matrix object that can cache its inverse
makeCacheMatrix <- function(Mtrx = matrix()) {
#initialize the matrix Object
mtrxInverse <- NULL
#set matrix
set <- function(y) {
Mtrx <<- y
mtrxInverse <<- NULL
}
#get matrix
get <- function() Mtrx
# set the inverse
setInverse <- function(inverse) mtrxInverse <<- inverse
# get function to return the cached inverse
getInverse <- function() mtrxInverse
#list the available function in makeCacheMatrix
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Returns the inverse of matrix.
#If the inverse has already been calculated then the cachesolve will retrieve and return the inverse from cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# check if the inverse of the matrix exists in cache
invMtrx <- x$getInverse()
# if inverse of the matrix exists, then get the matrix inverse from cache and return the inverse
if(!is.null(invMtrx)) {
# Return the inverse from cache
message("getting cached data")
return(invMtrx)
}
#if the cache is empty , then proceed here
#x$get() gets the matrix
data <- x$get()
# function solve returns the inverse of the matrix
invMtrx <- solve(data)
# set the inverse in cache
x$setInverse(invMtrx)
# return the inverse
invMtrx
}
|
#PCV was launched by the Union Health Minister, Shri JP Nadda on May 13, 2017 at Mandi,
#Himachal Pradesh [1]. With this phased introduction, nearly 2.1 million children in
#Himachal Pradesh (all 12 districts), parts of Bihar (17 out of 38 districts)
#and Uttar Pradesh (6 out of 75 districts) will be vaccinated with PCV in the first year [2].
#This will be followed by introduction in Madhya Pradesh and Rajasthan next year, and eventually coverage will be expanded across the entire country in a phased manner, in the coming years. "
#In uttar Pradesh it is: Lakhimpur Kheri, Sitapur, Siddharth Nagar, Bahraich, Balrampur, Shrawasti;
#In Bihar: The 17 high-priority districts are Araria, Begusarai, Darbhanga, Kishanganj, Khagaria, Katihar,
#Muzaffarpur, Munger, Vaishali, Madhepura, Madhubani, Purnea, Samastipur, Saran, Sitamarhi, Sheohar and Supaul
#bh1<-read.csv('C:/Users/dmw63/Desktop/My documents h/GATES/india/hmis/Bihar series by district.csv')
#######
#Lorine
##ACUTE RESPIRATORY INFECTION REQUIRING ADMISSION??
##85% of DEATH OCCUR AT HOME--SHOULD BE IN HMIS; MANY
##IN PRIVATE SECTOR HOSPITALS
##CONTROLS: DIARRHEA deaths? ROTA VACCINE IS ROLLING OUT..
##ACUTE ENCAPHALITIS IN SOUTH UP AND BIHAR. THERE ARE SOME
##VACCINE PROGRAMS BUT STARTED 5 YEARS AGO...NOT REALLY
#IN WESTERN UP
#ASPHYXIA DEATHS WONT BE SAME HOSPITALS --
#WOULD BE IN WOMENS HOSPITAL...MANY ARE SEPARATE
#WOMENS/MENS HOSPTALS
#UP HMIS--DR VISANT (Sp?)
#######
library(lme4)
library(lubridate)
bh1<-read.csv('C:/Users/dmw63/Desktop/My documents h/GATES/india/hmis/hmis/uttarPradesh series to Mar 2019.csv')
str(bh1)
bh1$month<-as.numeric(substr(bh1$DATE,6,7))
bh1$year<-as.numeric(substr(bh1$DATE,2,5))
bh1$uri<-bh1$X_10_13
bh1$pneu_death<-bh1$X_16_3_1
bh1$diar_death<-bh1$X_16_3_2
bh1$measles_death<-bh1$X_16_3_4
bh1$asphyxia_death<-bh1$X_16_2_2
bh1$sepsis_death<-bh1$X_16_2_1
bh1$neonatal_death<-bh1$X_16_1
bh1$monthdate<-as.Date(paste(bh1$year,bh1$month,'01', sep='-'))
up.intro.districts<-c('Lakhimpur Kheri', 'Sitapur', 'Siddharth Nagar', 'Bahraich', 'Balrampur', 'Shrawasti')
bh1$pcv.status<-0
bh1$pcv.status[bh1$DISTRICT %in% up.intro.districts] <-1
unique(bh1$DISTRICT)
#test123
strat1<-factor(bh1$monthdate)
ds.sub<-bh1[,c('uri', 'diar_death', 'pneu_death','sepsis_death','asphyxia_death', 'measles_death', 'neonatal_death')]
bh2<-aggregate(x=ds.sub, by=list( strat1) , FUN='sum', na.rm=TRUE)
names(bh2)<-c('monthdate',names(ds.sub))
bh2$monthdate<-as.Date(bh2$monthdate)
bh2$neonatal_death[nrow(bh2)]<-NA
bh2$month<-as.factor(month(bh2$monthdate))
bh2$year<-as.factor(year(bh2$monthdate))
par(mfrow=c(3,2), mar=c(3,3,1,1))
plot(bh2$monthdate,bh2$pneu_death,main='Pneumonia deaths', type='l', bty='l')
plot(bh2$monthdate,bh2$diar_death,main='Diarrhea deaths', type='l', bty='l')
plot(bh2$monthdate,bh2$sepsis_death,main='Sepsis Deaths', type='l', bty='l')
plot(bh2$monthdate,bh2$asphyxia_death,main='Asphyxia Deaths', type='l', bty='l')
plot(bh2$monthdate,bh2$measles_death,main='Measles Deaths', type='l', bty='l')
plot(bh2$monthdate, bh2$neonatal_death, main='Neonatal Deaths',type='l', bty='l')
par(mfrow=c(1,1), mar=c(2,3,1,1))
plot(bh2$uri, type='l',bty='l', main='URI cases')
mod.uri<- glm( uri~ month +year, family='poisson', data=bh2)
summary(mod.uri)
mod.pneu.death<- glm( pneu_death~ month +year, family='poisson', data=bh2[!(bh2$year %in% c('2017','2018','2019')),])
summary(mod.pneu.death)
#heatmap of reporting of URI
#seems to be incomplete before April 2017
library(reshape2)
ds.sub<-bh1[,c('DISTRICT','monthdate','uri')]
ds.m<-melt(ds.sub, id=c('DISTRICT','monthdate'))
ds.c<-dcast(ds.m, monthdate~DISTRICT)
par(mfrow=c(1,1), mar=c(1,1,1,1))
hm1<-heatmap(t(as.matrix(ds.c[,-1])), scale='row', Rowv=NA, Colv=NA,cexRow =0.5)
hm1<-heatmap(t(as.matrix(ds.c[ds.c$monthdate>=as.Date('2017-04-01'),-1])), scale='row', Rowv=NA, Colv=NA,cexRow =0.5)
hm1<-heatmap(t(as.matrix(ds.c[ds.c$monthdate>=as.Date('2017-04-01'),-1])), scale='row', Colv=NA,cexRow =0.5)
#Pneumonia deaths--seems to be incomplete before April 2017
ds.sub<-bh1[,c('DISTRICT','monthdate','pneu_death')]
ds.m<-melt(ds.sub, id=c('DISTRICT','monthdate'))
ds.c<-dcast(ds.m, monthdate~DISTRICT)
par(mfrow=c(1,1), mar=c(1,1,1,1))
hm1<-heatmap(t(as.matrix(ds.c[,-1])), scale='row', Rowv=NA, Colv=NA,cexRow =0.5)
hm1<-heatmap(t(as.matrix(ds.c[ds.c$monthdate>=as.Date('2017-04-01'),-1])), scale='row', Rowv=NA, Colv=NA,cexRow =0.5)
hm1<-heatmap(t(as.matrix(ds.c[ds.c$monthdate>=as.Date('2017-04-01'),-1])), scale='row', Colv=NA,cexRow =0.5)
##Simple model#
#Consider Apr 2018-Sep 2018 as roll out perios and Oct 2018-Mar 2019 as eval period
bh3<-bh1[bh1$monthdate>=as.Date('2017-04-01'),]
bh3$post.pcv<-0
bh3$post.pcv[bh3$pcv.status==1 & bh3$monthdate>=as.Date('2018-04-01')& bh3$monthdate<=as.Date('2018-09-01')] <- 1
bh3$post.pcv[bh3$pcv.status==1 & bh3$monthdate>=as.Date('2018-10-01')& bh3$monthdate<=as.Date('2019-03-01')] <- 2
bh3$post.pcv<-as.factor(bh3$post.pcv)
bh3$date.factor<-as.factor(bh3$monthdate)
bh3$obs<-as.factor(1:nrow(bh3))
mod1<-glmer(pneu_death ~ (1|date.factor) + (1|DISTRICT) + (1|obs)+ post.pcv, data=bh3, family='poisson' )
summary(mod1)
mod1<-glmer(uri ~ (1|DISTRICT) + (1|obs)+ post.pcv, data=bh3, family='poisson' )
summary(mod1)
#Gamm with time smooth
library(mgcv)
bh3$date.cont<-bh3$year+ bh3$month/12-1/12
mod2<-gamm(uri ~ s(date.cont) + post.pcv, data=bh3, family='poisson' ,random=list(DISTRICT=~1))
summary(mod2$lme)
#mod2<-gamm(pneu_death ~ s(date.cont) + post.pcv, data=bh3, family='poisson' ,random=list(DISTRICT=~1), niterPQL=500)
#summary(mod2$lme)
#statewide
ds.c.state<-dcast(ds.m, monthdate~1, fun.aggregate = sum)
par(mar=c(3,2,1,1))
plot(ds.c.state[ds.c.state$monthdate>=as.Date('2017-04-01'),1],ds.c.state[ds.c.state$monthdate>=as.Date('2017-04-01'),2], type='l')
#Brian: analyze by region vs district
#Need a crosswalk file. Western/Central/Eastern in UP; district-> division
#District hospitals--probably kids coming from district; bigger hospitals, kids coming from other places
# Mortality data: is location based on residence or place of death?
#--does death registry include out of hospital deaths
#There is an official death registry
| /Old code/hmis explore.R | no_license | DanWeinberger/hmis | R | false | false | 6,182 | r | #PCV was launched by the Union Health Minister, Shri JP Nadda on May 13, 2017 at Mandi,
#Himachal Pradesh [1]. With this phased introduction, nearly 2.1 million children in
#Himachal Pradesh (all 12 districts), parts of Bihar (17 out of 38 districts)
#and Uttar Pradesh (6 out of 75 districts) will be vaccinated with PCV in the first year [2].
#This will be followed by introduction in Madhya Pradesh and Rajasthan next year, and eventually coverage will be expanded across the entire country in a phased manner, in the coming years. "
#In uttar Pradesh it is: Lakhimpur Kheri, Sitapur, Siddharth Nagar, Bahraich, Balrampur, Shrawasti;
#In Bihar: The 17 high-priority districts are Araria, Begusarai, Darbhanga, Kishanganj, Khagaria, Katihar,
#Muzaffarpur, Munger, Vaishali, Madhepura, Madhubani, Purnea, Samastipur, Saran, Sitamarhi, Sheohar and Supaul
#bh1<-read.csv('C:/Users/dmw63/Desktop/My documents h/GATES/india/hmis/Bihar series by district.csv')
#######
#Lorine
##ACUTE RESPIRATORY INFECTION REQUIRING ADMISSION??
##85% of DEATH OCCUR AT HOME--SHOULD BE IN HMIS; MANY
##IN PRIVATE SECTOR HOSPITALS
##CONTROLS: DIARRHEA deaths? ROTA VACCINE IS ROLLING OUT..
##ACUTE ENCAPHALITIS IN SOUTH UP AND BIHAR. THERE ARE SOME
##VACCINE PROGRAMS BUT STARTED 5 YEARS AGO...NOT REALLY
#IN WESTERN UP
#ASPHYXIA DEATHS WONT BE SAME HOSPITALS --
#WOULD BE IN WOMENS HOSPITAL...MANY ARE SEPARATE
#WOMENS/MENS HOSPTALS
#UP HMIS--DR VISANT (Sp?)
#######
library(lme4)
library(lubridate)
bh1<-read.csv('C:/Users/dmw63/Desktop/My documents h/GATES/india/hmis/hmis/uttarPradesh series to Mar 2019.csv')
str(bh1)
bh1$month<-as.numeric(substr(bh1$DATE,6,7))
bh1$year<-as.numeric(substr(bh1$DATE,2,5))
bh1$uri<-bh1$X_10_13
bh1$pneu_death<-bh1$X_16_3_1
bh1$diar_death<-bh1$X_16_3_2
bh1$measles_death<-bh1$X_16_3_4
bh1$asphyxia_death<-bh1$X_16_2_2
bh1$sepsis_death<-bh1$X_16_2_1
bh1$neonatal_death<-bh1$X_16_1
bh1$monthdate<-as.Date(paste(bh1$year,bh1$month,'01', sep='-'))
up.intro.districts<-c('Lakhimpur Kheri', 'Sitapur', 'Siddharth Nagar', 'Bahraich', 'Balrampur', 'Shrawasti')
bh1$pcv.status<-0
bh1$pcv.status[bh1$DISTRICT %in% up.intro.districts] <-1
unique(bh1$DISTRICT)
#test123
strat1<-factor(bh1$monthdate)
ds.sub<-bh1[,c('uri', 'diar_death', 'pneu_death','sepsis_death','asphyxia_death', 'measles_death', 'neonatal_death')]
bh2<-aggregate(x=ds.sub, by=list( strat1) , FUN='sum', na.rm=TRUE)
names(bh2)<-c('monthdate',names(ds.sub))
bh2$monthdate<-as.Date(bh2$monthdate)
bh2$neonatal_death[nrow(bh2)]<-NA
bh2$month<-as.factor(month(bh2$monthdate))
bh2$year<-as.factor(year(bh2$monthdate))
par(mfrow=c(3,2), mar=c(3,3,1,1))
plot(bh2$monthdate,bh2$pneu_death,main='Pneumonia deaths', type='l', bty='l')
plot(bh2$monthdate,bh2$diar_death,main='Diarrhea deaths', type='l', bty='l')
plot(bh2$monthdate,bh2$sepsis_death,main='Sepsis Deaths', type='l', bty='l')
plot(bh2$monthdate,bh2$asphyxia_death,main='Asphyxia Deaths', type='l', bty='l')
plot(bh2$monthdate,bh2$measles_death,main='Measles Deaths', type='l', bty='l')
plot(bh2$monthdate, bh2$neonatal_death, main='Neonatal Deaths',type='l', bty='l')
par(mfrow=c(1,1), mar=c(2,3,1,1))
plot(bh2$uri, type='l',bty='l', main='URI cases')
mod.uri<- glm( uri~ month +year, family='poisson', data=bh2)
summary(mod.uri)
mod.pneu.death<- glm( pneu_death~ month +year, family='poisson', data=bh2[!(bh2$year %in% c('2017','2018','2019')),])
summary(mod.pneu.death)
#heatmap of reporting of URI
#seems to be incomplete before April 2017
library(reshape2)
ds.sub<-bh1[,c('DISTRICT','monthdate','uri')]
ds.m<-melt(ds.sub, id=c('DISTRICT','monthdate'))
ds.c<-dcast(ds.m, monthdate~DISTRICT)
par(mfrow=c(1,1), mar=c(1,1,1,1))
hm1<-heatmap(t(as.matrix(ds.c[,-1])), scale='row', Rowv=NA, Colv=NA,cexRow =0.5)
hm1<-heatmap(t(as.matrix(ds.c[ds.c$monthdate>=as.Date('2017-04-01'),-1])), scale='row', Rowv=NA, Colv=NA,cexRow =0.5)
hm1<-heatmap(t(as.matrix(ds.c[ds.c$monthdate>=as.Date('2017-04-01'),-1])), scale='row', Colv=NA,cexRow =0.5)
#Pneumonia deaths--seems to be incomplete before April 2017
ds.sub<-bh1[,c('DISTRICT','monthdate','pneu_death')]
ds.m<-melt(ds.sub, id=c('DISTRICT','monthdate'))
ds.c<-dcast(ds.m, monthdate~DISTRICT)
par(mfrow=c(1,1), mar=c(1,1,1,1))
hm1<-heatmap(t(as.matrix(ds.c[,-1])), scale='row', Rowv=NA, Colv=NA,cexRow =0.5)
hm1<-heatmap(t(as.matrix(ds.c[ds.c$monthdate>=as.Date('2017-04-01'),-1])), scale='row', Rowv=NA, Colv=NA,cexRow =0.5)
hm1<-heatmap(t(as.matrix(ds.c[ds.c$monthdate>=as.Date('2017-04-01'),-1])), scale='row', Colv=NA,cexRow =0.5)
##Simple model#
#Consider Apr 2018-Sep 2018 as roll out perios and Oct 2018-Mar 2019 as eval period
bh3<-bh1[bh1$monthdate>=as.Date('2017-04-01'),]
bh3$post.pcv<-0
bh3$post.pcv[bh3$pcv.status==1 & bh3$monthdate>=as.Date('2018-04-01')& bh3$monthdate<=as.Date('2018-09-01')] <- 1
bh3$post.pcv[bh3$pcv.status==1 & bh3$monthdate>=as.Date('2018-10-01')& bh3$monthdate<=as.Date('2019-03-01')] <- 2
bh3$post.pcv<-as.factor(bh3$post.pcv)
bh3$date.factor<-as.factor(bh3$monthdate)
bh3$obs<-as.factor(1:nrow(bh3))
mod1<-glmer(pneu_death ~ (1|date.factor) + (1|DISTRICT) + (1|obs)+ post.pcv, data=bh3, family='poisson' )
summary(mod1)
mod1<-glmer(uri ~ (1|DISTRICT) + (1|obs)+ post.pcv, data=bh3, family='poisson' )
summary(mod1)
#Gamm with time smooth
library(mgcv)
bh3$date.cont<-bh3$year+ bh3$month/12-1/12
mod2<-gamm(uri ~ s(date.cont) + post.pcv, data=bh3, family='poisson' ,random=list(DISTRICT=~1))
summary(mod2$lme)
#mod2<-gamm(pneu_death ~ s(date.cont) + post.pcv, data=bh3, family='poisson' ,random=list(DISTRICT=~1), niterPQL=500)
#summary(mod2$lme)
#statewide
ds.c.state<-dcast(ds.m, monthdate~1, fun.aggregate = sum)
par(mar=c(3,2,1,1))
plot(ds.c.state[ds.c.state$monthdate>=as.Date('2017-04-01'),1],ds.c.state[ds.c.state$monthdate>=as.Date('2017-04-01'),2], type='l')
#Brian: analyze by region vs district
#Need a crosswalk file. Western/Central/Eastern in UP; district-> division
#District hospitals--probably kids coming from district; bigger hospitals, kids coming from other places
# Mortality data: is location based on residence or place of death?
#--does death registry include out of hospital deaths
#There is an official death registry
|
test_that("User interface is correctly rendered", {
local_edition(3)
cur_header <- .cytomapper_header()
expect_equal(cur_header$name, "header")
expect_equal(cur_header$attribs$class, "main-header")
expect_null(cur_header$children[[1]])
expect_snapshot_output(cur_header$children[[2]])
expect_snapshot_output(cur_header$children[[3]])
cur_sidebar <- .cytomapper_sidebar()
expect_equal(cur_sidebar$name, "aside")
expect_equal(cur_sidebar$attribs$id, "sidebarCollapsed")
expect_equal(cur_sidebar$attribs$class, "main-sidebar")
expect_equal(cur_sidebar$attribs$`data-collapsed`, "false")
expect_null(cur_sidebar$children[[1]])
expect_snapshot_output(cur_sidebar$children[[2]])
skip_on_cran()
cur_body <- .cytomapper_body()
expect_equal(cur_body$name, "div")
expect_equal(cur_body$attribs$class, "content-wrapper")
cur_tab_id <- cur_body$children[[1]][[3]][[1]][[3]][[1]][[3]][[1]][[2]]$`data-tabsetid`
expect_equal(as.character(cur_body$children[[1]]), paste0('<section class=\"content\">\n <div class=\"col-sm-12\">\n <div class=\"nav-tabs-custom\">\n <ul class=\"nav nav-tabs shiny-tab-input\" id=\"tabbox1\" data-tabsetid=\"', cur_tab_id, '\">\n <li class=\"active\">\n <a href=\"#tab-', cur_tab_id, '-1\" data-toggle=\"tab\" data-value=\"tab1\">Scatter Plots</a>\n </li>\n <li>\n <a href=\"#tab-', cur_tab_id, '-2\" data-toggle=\"tab\" data-value=\"tab2\">Images</a>\n </li>\n </ul>\n <div class=\"tab-content\" data-tabsetid=\"', cur_tab_id, '\">\n <div class=\"tab-pane active\" data-value=\"tab1\" id=\"tab-', cur_tab_id, '-1\">\n <div id=\"AdditionalPlots_tab1\" class=\"shiny-html-output\"></div>\n </div>\n <div class=\"tab-pane\" data-value=\"tab2\" id=\"tab-', cur_tab_id, '-2\">\n <div id=\"AdditionalPlots_tab2\" class=\"shiny-html-output\"></div>\n </div>\n </div>\n </div>\n </div>\n</section>'))
})
test_that("Sidebar is correctly rendered", {
data("pancreasSCE")
local_edition(3)
testServer(app = cytomapperShiny(object = pancreasSCE, img_id = "ImageNb",
cell_id = "CellNb"),
{
session$setInputs(plotCount = 1)
expect_snapshot_output(.addPlots_sidebar(input)(session)$html)
session$setInputs(plotCount = 2)
expect_snapshot_output(.addPlots_sidebar(input)(session)$html)
session$setInputs(plotCount = 3)
expect_snapshot_output(.addPlots_sidebar(input)(session)$html)
})
})
test_that("Plots in tab 1 are correctly rendered", {
data("pancreasSCE")
local_edition(3)
testServer(app = cytomapperShiny(object = pancreasSCE, img_id = "ImageNb",
cell_id = "CellNb"),
{
session$setInputs(plotCount = 1)
expect_snapshot_output(.addPlots_tab1(input)(session)$html)
session$setInputs(plotCount = 2)
expect_snapshot_output(.addPlots_tab1(input)(session)$html)
session$setInputs(plotCount = 3)
expect_snapshot_output(.addPlots_tab1(input)(session)$html)
})
})
test_that("Plots in tab 2 are correctly rendered", {
data("pancreasSCE")
data("pancreasMasks")
data("pancreasImages")
local_edition(3)
testServer(app = cytomapperShiny(object = pancreasSCE, img_id = "ImageNb",
cell_id = "CellNb"),
{
expect_null(.addPlots_tab2(input, object = pancreasSCE, mask = NULL, image = NULL))
})
testServer(app = cytomapperShiny(object = pancreasSCE, img_id = "ImageNb",
cell_id = "CellNb", mask = pancreasMasks,
image = NULL),
{
expect_snapshot_output(.addPlots_tab2(input, object = pancreasSCE, mask = pancreasMasks, image = NULL)(session)$html)
})
testServer(app = cytomapperShiny(object = pancreasSCE, img_id = "ImageNb",
cell_id = "CellNb", mask = pancreasMasks,
image = pancreasImages),
{
expect_snapshot_output(.addPlots_tab2(input, object = pancreasSCE, mask = pancreasMasks, image = pancreasImages)(session)$html)
})
})
| /tests/testthat/test_shiny_ui.R | no_license | SchulzDan/cytomapper | R | false | false | 4,584 | r |
test_that("User interface is correctly rendered", {
local_edition(3)
cur_header <- .cytomapper_header()
expect_equal(cur_header$name, "header")
expect_equal(cur_header$attribs$class, "main-header")
expect_null(cur_header$children[[1]])
expect_snapshot_output(cur_header$children[[2]])
expect_snapshot_output(cur_header$children[[3]])
cur_sidebar <- .cytomapper_sidebar()
expect_equal(cur_sidebar$name, "aside")
expect_equal(cur_sidebar$attribs$id, "sidebarCollapsed")
expect_equal(cur_sidebar$attribs$class, "main-sidebar")
expect_equal(cur_sidebar$attribs$`data-collapsed`, "false")
expect_null(cur_sidebar$children[[1]])
expect_snapshot_output(cur_sidebar$children[[2]])
skip_on_cran()
cur_body <- .cytomapper_body()
expect_equal(cur_body$name, "div")
expect_equal(cur_body$attribs$class, "content-wrapper")
cur_tab_id <- cur_body$children[[1]][[3]][[1]][[3]][[1]][[3]][[1]][[2]]$`data-tabsetid`
expect_equal(as.character(cur_body$children[[1]]), paste0('<section class=\"content\">\n <div class=\"col-sm-12\">\n <div class=\"nav-tabs-custom\">\n <ul class=\"nav nav-tabs shiny-tab-input\" id=\"tabbox1\" data-tabsetid=\"', cur_tab_id, '\">\n <li class=\"active\">\n <a href=\"#tab-', cur_tab_id, '-1\" data-toggle=\"tab\" data-value=\"tab1\">Scatter Plots</a>\n </li>\n <li>\n <a href=\"#tab-', cur_tab_id, '-2\" data-toggle=\"tab\" data-value=\"tab2\">Images</a>\n </li>\n </ul>\n <div class=\"tab-content\" data-tabsetid=\"', cur_tab_id, '\">\n <div class=\"tab-pane active\" data-value=\"tab1\" id=\"tab-', cur_tab_id, '-1\">\n <div id=\"AdditionalPlots_tab1\" class=\"shiny-html-output\"></div>\n </div>\n <div class=\"tab-pane\" data-value=\"tab2\" id=\"tab-', cur_tab_id, '-2\">\n <div id=\"AdditionalPlots_tab2\" class=\"shiny-html-output\"></div>\n </div>\n </div>\n </div>\n </div>\n</section>'))
})
test_that("Sidebar is correctly rendered", {
data("pancreasSCE")
local_edition(3)
testServer(app = cytomapperShiny(object = pancreasSCE, img_id = "ImageNb",
cell_id = "CellNb"),
{
session$setInputs(plotCount = 1)
expect_snapshot_output(.addPlots_sidebar(input)(session)$html)
session$setInputs(plotCount = 2)
expect_snapshot_output(.addPlots_sidebar(input)(session)$html)
session$setInputs(plotCount = 3)
expect_snapshot_output(.addPlots_sidebar(input)(session)$html)
})
})
test_that("Plots in tab 1 are correctly rendered", {
data("pancreasSCE")
local_edition(3)
testServer(app = cytomapperShiny(object = pancreasSCE, img_id = "ImageNb",
cell_id = "CellNb"),
{
session$setInputs(plotCount = 1)
expect_snapshot_output(.addPlots_tab1(input)(session)$html)
session$setInputs(plotCount = 2)
expect_snapshot_output(.addPlots_tab1(input)(session)$html)
session$setInputs(plotCount = 3)
expect_snapshot_output(.addPlots_tab1(input)(session)$html)
})
})
test_that("Plots in tab 2 are correctly rendered", {
data("pancreasSCE")
data("pancreasMasks")
data("pancreasImages")
local_edition(3)
testServer(app = cytomapperShiny(object = pancreasSCE, img_id = "ImageNb",
cell_id = "CellNb"),
{
expect_null(.addPlots_tab2(input, object = pancreasSCE, mask = NULL, image = NULL))
})
testServer(app = cytomapperShiny(object = pancreasSCE, img_id = "ImageNb",
cell_id = "CellNb", mask = pancreasMasks,
image = NULL),
{
expect_snapshot_output(.addPlots_tab2(input, object = pancreasSCE, mask = pancreasMasks, image = NULL)(session)$html)
})
testServer(app = cytomapperShiny(object = pancreasSCE, img_id = "ImageNb",
cell_id = "CellNb", mask = pancreasMasks,
image = pancreasImages),
{
expect_snapshot_output(.addPlots_tab2(input, object = pancreasSCE, mask = pancreasMasks, image = pancreasImages)(session)$html)
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shinyRange.R
\docType{package}
\name{shinyRange}
\alias{shinyRange}
\alias{shinyRange-package}
\title{shinyRange: Text-box-based numeric range input widget for Shiny.}
\description{
Text-box-based numeric range input widget for Shiny.
}
| /man/shinyRange.Rd | permissive | wkdavis/shinyRange | R | false | true | 315 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shinyRange.R
\docType{package}
\name{shinyRange}
\alias{shinyRange}
\alias{shinyRange-package}
\title{shinyRange: Text-box-based numeric range input widget for Shiny.}
\description{
Text-box-based numeric range input widget for Shiny.
}
|
# https://elbersb.github.io/segregation/index.html?s=03
# https://elbersb.github.io/segregation/articles/segregation.html
library(segregation)
# example dataset with fake data provided by the package
mutual_total(schools00, "race", "school", weight = "n")
#> stat est
#> 1: M 0.426
#> 2: H 0.419 | /Segregacao/segregation.R | permissive | DATAUNIRIO/desigualdade | R | false | false | 309 | r |
# https://elbersb.github.io/segregation/index.html?s=03
# https://elbersb.github.io/segregation/articles/segregation.html
library(segregation)
# example dataset with fake data provided by the package
mutual_total(schools00, "race", "school", weight = "n")
#> stat est
#> 1: M 0.426
#> 2: H 0.419 |
\name{cSFM object}
\alias{print.cSFM}
\alias{fitted.cSFM}
\alias{predict.cSFM}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Generic Method for 'cSFM' Objects
}
\description{
Print, extract fitted values and predict for cSFM object.
Methods of the generic function \code{\link{print}}, \code{\link{fitted}} and \code{\link{predict}} for objects inheriting from class cSFM.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
\method{print}{cSFM}(x, ...)
\method{fitted}{cSFM}(object, quantile = TRUE,
quantile.level = c(0.5, 0.8, 0.9, 0.95, 0.99), ...)
\method{predict}{cSFM}(object, newdata, cp.valid, tp.valid = NULL, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{A cSFM object; returned by the function \code{\link{cSFM.est}}}
\item{object}{A cSFM object; returned by the function \code{\link{cSFM.est}}}
\item{quantile}{logical; if TRUE(default), estimates of quantiles are returned }
\item{quantile.level}{quantile vector; quantile levels to be estimated when \code{quantile = TRUE}}
\item{newdata}{the partically observed new data set (missing data allowed) to be predicated}
\item{cp.valid}{observed covariate vector for \code{newdata} with length \code{nrow{newdata}}}
\item{tp.valid}{observed timepoint vector for \code{newdata} with length \code{ncol{newdata}}; See "Details".}
\item{...}{other arguments passed to the generic functions}
%% ~~Describe \code{x} here~~
}
\details{
When use the function \code{predict}, each row of \code{newdata} corresponds to covariate information \code{cp.valid},
while the column is for the time points \code{tp.valid}. When \code{tp.valid} is \code{null}, then we assume the validation
data set has the same time points as the training data set, which is used to obtain \code{object}.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{\link{cSFM.est}}, \code{\link{print}}, \code{\link{fitted}}, \code{\link{predict}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
| /man/generic.HAC.Rd | no_license | cran/cSFM | R | false | false | 2,100 | rd | \name{cSFM object}
\alias{print.cSFM}
\alias{fitted.cSFM}
\alias{predict.cSFM}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Generic Method for 'cSFM' Objects
}
\description{
Print, extract fitted values and predict for cSFM object.
Methods of the generic function \code{\link{print}}, \code{\link{fitted}} and \code{\link{predict}} for objects inheriting from class cSFM.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
\method{print}{cSFM}(x, ...)
\method{fitted}{cSFM}(object, quantile = TRUE,
quantile.level = c(0.5, 0.8, 0.9, 0.95, 0.99), ...)
\method{predict}{cSFM}(object, newdata, cp.valid, tp.valid = NULL, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{A cSFM object; returned by the function \code{\link{cSFM.est}}}
\item{object}{A cSFM object; returned by the function \code{\link{cSFM.est}}}
\item{quantile}{logical; if TRUE(default), estimates of quantiles are returned }
\item{quantile.level}{quantile vector; quantile levels to be estimated when \code{quantile = TRUE}}
\item{newdata}{the partically observed new data set (missing data allowed) to be predicated}
\item{cp.valid}{observed covariate vector for \code{newdata} with length \code{nrow{newdata}}}
\item{tp.valid}{observed timepoint vector for \code{newdata} with length \code{ncol{newdata}}; See "Details".}
\item{...}{other arguments passed to the generic functions}
%% ~~Describe \code{x} here~~
}
\details{
When use the function \code{predict}, each row of \code{newdata} corresponds to covariate information \code{cp.valid},
while the column is for the time points \code{tp.valid}. When \code{tp.valid} is \code{null}, then we assume the validation
data set has the same time points as the training data set, which is used to obtain \code{object}.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{\link{cSFM.est}}, \code{\link{print}}, \code{\link{fitted}}, \code{\link{predict}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FILEST_package.R
\docType{package}
\name{FILEST-package}
\alias{FILEST}
\alias{FILEST-package}
\title{Fine-Level Structure Simulator}
\description{
A population genetic simulator, which is able to generate synthetic datasets
for single-nucleotide polymorphisms (SNP) for multiple populations. The
genetic distances among populations can be set according to the Fixation
Index (Fst). This tool is able to simulate outlying individuals and missing
SNPs can be specified. For Genome-wide association study (GWAS), disease
status can be set in desired level according risk ratio.
}
\details{
The R package \pkg{FILEST} requires \pkg{KRIS} and \pkg{rARPACK}.
Here is the list of functions in the R package \pkg{FILEST}:
\itemize{
\item \code{\link{cbind_bigmatrix}}
\item \code{\link{create.template.setting}}
\item \code{\link{demo.filest}}
\item \code{\link{filest}}
\item \code{\link{rbind_bigmatrix}}
}
}
\seealso{
Useful links:
\itemize{
\item \url{https://gitlab.com/kris.ccp/filest}
\item Report bugs at \url{https://gitlab.com/kris.ccp/filest/-/issues}
}
}
\author{
\strong{Maintainer}: Kridsadakorn Chaichoompu \email{kridsadakorn@biostatgen.org}
Authors:
\itemize{
\item Kristel Van Steen
\item Fentaw Abegaz
}
}
\keyword{internal}
| /man/FILEST-package.Rd | no_license | cran/FILEST | R | false | true | 1,327 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FILEST_package.R
\docType{package}
\name{FILEST-package}
\alias{FILEST}
\alias{FILEST-package}
\title{Fine-Level Structure Simulator}
\description{
A population genetic simulator, which is able to generate synthetic datasets
for single-nucleotide polymorphisms (SNP) for multiple populations. The
genetic distances among populations can be set according to the Fixation
Index (Fst). This tool is able to simulate outlying individuals and missing
SNPs can be specified. For Genome-wide association study (GWAS), disease
status can be set in desired level according risk ratio.
}
\details{
The R package \pkg{FILEST} requires \pkg{KRIS} and \pkg{rARPACK}.
Here is the list of functions in the R package \pkg{FILEST}:
\itemize{
\item \code{\link{cbind_bigmatrix}}
\item \code{\link{create.template.setting}}
\item \code{\link{demo.filest}}
\item \code{\link{filest}}
\item \code{\link{rbind_bigmatrix}}
}
}
\seealso{
Useful links:
\itemize{
\item \url{https://gitlab.com/kris.ccp/filest}
\item Report bugs at \url{https://gitlab.com/kris.ccp/filest/-/issues}
}
}
\author{
\strong{Maintainer}: Kridsadakorn Chaichoompu \email{kridsadakorn@biostatgen.org}
Authors:
\itemize{
\item Kristel Van Steen
\item Fentaw Abegaz
}
}
\keyword{internal}
|
source(Sys.getenv('RESULTS_BASE_PARTIAL'))
source(Sys.getenv('RESULTS_DISPLAY_PARTIAL'))
source(Sys.getenv('RESULTS_TABLES_PARTIAL'))
library(argparse)
library(tidyr, warn.conflicts = FALSE)
library(wombat)
parser <- ArgumentParser()
parser$add_argument('--mcmc-samples-lg-online-corrected')
parser$add_argument('--mcmc-samples-ln-online-corrected')
parser$add_argument('--mcmc-samples-lg-offline-online-corrected')
parser$add_argument('--mcmc-samples-ln-offline-online-corrected')
parser$add_argument('--output')
args <- parser$parse_args()
read_samples <- function(filename, mode_i, raw_i) {
window(readRDS(filename), start = 1001)$beta %>%
as.data.frame() %>%
pivot_longer(everything()) %>%
mutate(
mode = mode_i,
raw = raw_i
)
}
beta_samples_df <- bind_rows(
read_samples(
args$mcmc_samples_lg_online_corrected,
'WOMBAT LG',
'Uncorrected retrievals'
),
read_samples(
args$mcmc_samples_ln_online_corrected,
'WOMBAT LN',
'Uncorrected retrievals'
),
read_samples(
args$mcmc_samples_lg_offline_online_corrected,
'WOMBAT LG',
'TCCON-corrected retrievals'
),
read_samples(
args$mcmc_samples_ln_offline_online_corrected,
'WOMBAT LN',
'TCCON-corrected retrievals'
)
) %>%
mutate(
parameter = factor(c(
'is_oco2:oco2_operation_modeLG' = '(Intercept)',
'is_oco2:oco2_operation_modeLG:oco2_dp' = 'dp',
'is_oco2:oco2_operation_modeLG:oco2_co2_grad_del' = 'co2_grad_del',
'is_oco2:oco2_operation_modeLG:oco2_log_dws' = 'logDWS',
'is_oco2:oco2_operation_modeLN' = '(Intercept)',
'is_oco2:oco2_operation_modeLN:oco2_dp' = 'dp',
'is_oco2:oco2_operation_modeLN:oco2_co2_grad_del' = 'co2_grad_del',
'is_oco2:oco2_operation_modeLN:oco2_log_dws' = 'logDWS'
)[name], levels = c('dp', 'co2_grad_del', 'logDWS')),
mode = factor(
mode,
levels = c(
'WOMBAT LG',
'WOMBAT LN',
'TCCON-based offline correction'
)
),
value = -value
) %>%
filter(parameter != '(Intercept)') %>%
mutate(
value = ifelse(
mode == 'WOMBAT LG',
value / c(
'dp' = 3.057625,
'co2_grad_del' = 21.348496,
'logDWS' = 1.135891
)[parameter],
ifelse(
mode == 'WOMBAT LN',
value / c(
'dp' = 2.758201,
'co2_grad_del' = 19.829637,
'logDWS' = 1.144667
)[parameter],
value
)
)
)
output1 <- ggplot() +
geom_vline(
data = tibble(
parameter = factor(
c('dp', 'co2_grad_del', 'logDWS'),
levels = c('dp', 'co2_grad_del', 'logDWS')
),
value = c(0.3, 0.028, 0.6)
),
mapping = aes(xintercept = value),
colour = 'blue',
size = 1
) +
geom_density(
data = beta_samples_df %>% filter(raw == 'Uncorrected retrievals'),
mapping = aes(value, colour = mode)
) +
scale_colour_manual(
values = c(
'WOMBAT LG' = get_colour('wombat_lg'),
'WOMBAT LN' = get_colour('wombat_ln'),
'TCCON-based offline correction' = 'blue'
),
drop = FALSE,
guide = guide_legend(override.aes = list(size = 1))
) +
labs(x = 'Bias correction coefficient', y = 'Density', colour = NULL) +
facet_wrap(~ parameter, scales = 'free') +
scale_x_continuous(expand = expansion(mult = c(0.2, 0.2))) +
ggtitle('Uncorrected retrievals') +
theme(
axis.text.x = element_text(angle = 30, hjust = 1),
legend.position = 'bottom'
)
output2 <- ggplot() +
geom_vline(
xintercept = 0,
colour = '#999999',
linetype = 2,
size = 1
) +
geom_density(
data = beta_samples_df %>% filter(raw == 'TCCON-corrected retrievals'),
mapping = aes(value, colour = mode)
) +
scale_colour_manual(
values = c(
'WOMBAT LG' = get_colour('wombat_lg'),
'WOMBAT LN' = get_colour('wombat_ln'),
'TCCON-based offline correction' = 'blue'
),
drop = FALSE,
guide = guide_legend(override.aes = list(size = 1))
) +
labs(x = 'Bias correction coefficient', y = 'Density', colour = NULL) +
facet_wrap(~ parameter, scales = 'free') +
scale_x_continuous(expand = expansion(mult = c(0.2, 0.2))) +
ggtitle('TCCON-corrected retrievals') +
theme(
axis.text.x = element_text(angle = 30, hjust = 1),
legend.position = 'bottom'
)
output <- gridExtra::arrangeGrob(
output1 + theme(legend.position = 'none'),
output2 + theme(legend.position = 'none'),
get_legend(output1),
heights = c(5.5, 5.5, 1)
)
ggsave(
args$output,
plot = output,
width = DISPLAY_SETTINGS$full_width,
height = 12,
units = 'cm'
)
| /4_results/src/oco2-bias-correction.R | no_license | mbertolacci/wombat-paper | R | false | false | 4,612 | r | source(Sys.getenv('RESULTS_BASE_PARTIAL'))
source(Sys.getenv('RESULTS_DISPLAY_PARTIAL'))
source(Sys.getenv('RESULTS_TABLES_PARTIAL'))
library(argparse)
library(tidyr, warn.conflicts = FALSE)
library(wombat)
parser <- ArgumentParser()
parser$add_argument('--mcmc-samples-lg-online-corrected')
parser$add_argument('--mcmc-samples-ln-online-corrected')
parser$add_argument('--mcmc-samples-lg-offline-online-corrected')
parser$add_argument('--mcmc-samples-ln-offline-online-corrected')
parser$add_argument('--output')
args <- parser$parse_args()
read_samples <- function(filename, mode_i, raw_i) {
window(readRDS(filename), start = 1001)$beta %>%
as.data.frame() %>%
pivot_longer(everything()) %>%
mutate(
mode = mode_i,
raw = raw_i
)
}
beta_samples_df <- bind_rows(
read_samples(
args$mcmc_samples_lg_online_corrected,
'WOMBAT LG',
'Uncorrected retrievals'
),
read_samples(
args$mcmc_samples_ln_online_corrected,
'WOMBAT LN',
'Uncorrected retrievals'
),
read_samples(
args$mcmc_samples_lg_offline_online_corrected,
'WOMBAT LG',
'TCCON-corrected retrievals'
),
read_samples(
args$mcmc_samples_ln_offline_online_corrected,
'WOMBAT LN',
'TCCON-corrected retrievals'
)
) %>%
mutate(
parameter = factor(c(
'is_oco2:oco2_operation_modeLG' = '(Intercept)',
'is_oco2:oco2_operation_modeLG:oco2_dp' = 'dp',
'is_oco2:oco2_operation_modeLG:oco2_co2_grad_del' = 'co2_grad_del',
'is_oco2:oco2_operation_modeLG:oco2_log_dws' = 'logDWS',
'is_oco2:oco2_operation_modeLN' = '(Intercept)',
'is_oco2:oco2_operation_modeLN:oco2_dp' = 'dp',
'is_oco2:oco2_operation_modeLN:oco2_co2_grad_del' = 'co2_grad_del',
'is_oco2:oco2_operation_modeLN:oco2_log_dws' = 'logDWS'
)[name], levels = c('dp', 'co2_grad_del', 'logDWS')),
mode = factor(
mode,
levels = c(
'WOMBAT LG',
'WOMBAT LN',
'TCCON-based offline correction'
)
),
value = -value
) %>%
filter(parameter != '(Intercept)') %>%
mutate(
value = ifelse(
mode == 'WOMBAT LG',
value / c(
'dp' = 3.057625,
'co2_grad_del' = 21.348496,
'logDWS' = 1.135891
)[parameter],
ifelse(
mode == 'WOMBAT LN',
value / c(
'dp' = 2.758201,
'co2_grad_del' = 19.829637,
'logDWS' = 1.144667
)[parameter],
value
)
)
)
output1 <- ggplot() +
geom_vline(
data = tibble(
parameter = factor(
c('dp', 'co2_grad_del', 'logDWS'),
levels = c('dp', 'co2_grad_del', 'logDWS')
),
value = c(0.3, 0.028, 0.6)
),
mapping = aes(xintercept = value),
colour = 'blue',
size = 1
) +
geom_density(
data = beta_samples_df %>% filter(raw == 'Uncorrected retrievals'),
mapping = aes(value, colour = mode)
) +
scale_colour_manual(
values = c(
'WOMBAT LG' = get_colour('wombat_lg'),
'WOMBAT LN' = get_colour('wombat_ln'),
'TCCON-based offline correction' = 'blue'
),
drop = FALSE,
guide = guide_legend(override.aes = list(size = 1))
) +
labs(x = 'Bias correction coefficient', y = 'Density', colour = NULL) +
facet_wrap(~ parameter, scales = 'free') +
scale_x_continuous(expand = expansion(mult = c(0.2, 0.2))) +
ggtitle('Uncorrected retrievals') +
theme(
axis.text.x = element_text(angle = 30, hjust = 1),
legend.position = 'bottom'
)
output2 <- ggplot() +
geom_vline(
xintercept = 0,
colour = '#999999',
linetype = 2,
size = 1
) +
geom_density(
data = beta_samples_df %>% filter(raw == 'TCCON-corrected retrievals'),
mapping = aes(value, colour = mode)
) +
scale_colour_manual(
values = c(
'WOMBAT LG' = get_colour('wombat_lg'),
'WOMBAT LN' = get_colour('wombat_ln'),
'TCCON-based offline correction' = 'blue'
),
drop = FALSE,
guide = guide_legend(override.aes = list(size = 1))
) +
labs(x = 'Bias correction coefficient', y = 'Density', colour = NULL) +
facet_wrap(~ parameter, scales = 'free') +
scale_x_continuous(expand = expansion(mult = c(0.2, 0.2))) +
ggtitle('TCCON-corrected retrievals') +
theme(
axis.text.x = element_text(angle = 30, hjust = 1),
legend.position = 'bottom'
)
output <- gridExtra::arrangeGrob(
output1 + theme(legend.position = 'none'),
output2 + theme(legend.position = 'none'),
get_legend(output1),
heights = c(5.5, 5.5, 1)
)
ggsave(
args$output,
plot = output,
width = DISPLAY_SETTINGS$full_width,
height = 12,
units = 'cm'
)
|
# Plot maps and NDVI, EVI, NIRv relationships for 4 CA rangeland test sites
setwd("C:/Users/eordway/Desktop")
# Load libraries
library(raster); library(rgdal); library(sp)
library(reshape2); library(dplyr); library(tidyr); library(rgeos)
library(GISTools); library(sf); library(SDMTools)
library(ggplot2); library(rasterVis); library(viridis)
colr = inferno(100, direction=-1)
#----- Load covariates & TCC ----------------------------------------------------
## load CA shapefile
CA = readOGR(dsn="CA_state_boundary", layer="CA_State_TIGER2016"); plot(CA)
## load 5 test site shapfiles
bak = readOGR(dsn="Test_Sites", layer="Bakersfield"); plot(bak)
las = readOGR(dsn="Test_Sites", layer="Lassen"); plot(las)
mrc = readOGR(dsn="Test_Sites", layer="Merced"); plot(mrc)
mh = readOGR(dsn="Test_Sites", layer="Mt_Hamilton"); plot(mh)
## load GLCF Tree Canopy Cover product (2000)
## also 2005 & 2010
tcc <- raster("D:/DEM_Precip_Soil_TCC/TCC_CA_2000.tif"); plot(tcc)# 2000
## load & stack veg indices for all years
current.list <- list.files(path="Temp_Veg_Indices/EVI/",pattern =".tif$", full.names=TRUE)
#current.list <- list.files(path="Temp_Veg_Indices/NDVI/",pattern =".tif$", full.names=TRUE)
#current.list <- list.files(path="Temp_Veg_Indices/NIRv/",pattern =".tif$", full.names=TRUE)
veg.stack <- stack(current.list); dim(veg.stack) # should have 31 bands
plot(veg.stack, zlim = c(0,1), col=colr)# zlim = c(0,1),
## add TCC to stack
dat.stack <- stack(tcc,veg.stack); dim(dat.stack) # should have 32 bands
plot(dat.stack, col=colr)# zlim = c(0,1),
#-----------------------------------------------------------------------------
## crop stacked vegetation indices to test sites
out_rastB <- crop(dat.stack, bak, snap="out"); plot(trim(out_rastB), zlim = c(0,1), col=colr)
out_B <- mask(out_rastB, bak); plot(out_B, zlim = c(0,1), col=colr)
out_rastL <- crop(dat.stack, las, snap="out"); plot(trim(out_rastL), zlim = c(0,1), col=colr)
out_L <- mask(out_rastL, las); plot(out_L, zlim = c(0,1), col=colr)
out_rastM <- crop(dat.stack, mrc, snap="out"); plot(trim(out_rastM), zlim = c(0,1), col=colr)
out_M <- mask(out_rastM, mrc); plot(out_M, zlim = c(0,1), col=colr)
out_rastMH <- crop(dat.stack, mh, snap="out"); plot(trim(out_rastMH), zlim = c(0,1), col=colr)
out_MH <- mask(out_rastMH, mh); plot(out_MH, zlim = c(0,1), col=colr)
# write rasters
string_out <- "EVI_stack" # EVI_stack | NDVI_stack | NIRv_stack
writeRaster(out_B, format="GTiff", paste('D:/Test_Site_Ouput/',string_out,'_Bakersfield.tif',sep=""), overwrite=T)
writeRaster(out_L, format="GTiff", paste('D:/Test_Site_Ouput/',string_out,'_Lassen.tif',sep=""), overwrite=T)
writeRaster(out_M, format="GTiff", paste('D:/Test_Site_Ouput/',string_out,'_Merced.tif',sep=""), overwrite=T)
writeRaster(out_MH, format="GTiff", paste('D:/Test_Site_Ouput/',string_out,'_Mt_Hamilton.tif',sep=""), overwrite=T) | /rangeland-production/code/CA_Rangelands_create_test_site_stacks.R | no_license | eoway/soilc-ca-rangelands | R | false | false | 2,932 | r | # Plot maps and NDVI, EVI, NIRv relationships for 4 CA rangeland test sites
setwd("C:/Users/eordway/Desktop")
# Load libraries
library(raster); library(rgdal); library(sp)
library(reshape2); library(dplyr); library(tidyr); library(rgeos)
library(GISTools); library(sf); library(SDMTools)
library(ggplot2); library(rasterVis); library(viridis)
colr = inferno(100, direction=-1)
#----- Load covariates & TCC ----------------------------------------------------
## load CA shapefile
CA = readOGR(dsn="CA_state_boundary", layer="CA_State_TIGER2016"); plot(CA)
## load 5 test site shapfiles
bak = readOGR(dsn="Test_Sites", layer="Bakersfield"); plot(bak)
las = readOGR(dsn="Test_Sites", layer="Lassen"); plot(las)
mrc = readOGR(dsn="Test_Sites", layer="Merced"); plot(mrc)
mh = readOGR(dsn="Test_Sites", layer="Mt_Hamilton"); plot(mh)
## load GLCF Tree Canopy Cover product (2000)
## also 2005 & 2010
tcc <- raster("D:/DEM_Precip_Soil_TCC/TCC_CA_2000.tif"); plot(tcc)# 2000
## load & stack veg indices for all years
current.list <- list.files(path="Temp_Veg_Indices/EVI/",pattern =".tif$", full.names=TRUE)
#current.list <- list.files(path="Temp_Veg_Indices/NDVI/",pattern =".tif$", full.names=TRUE)
#current.list <- list.files(path="Temp_Veg_Indices/NIRv/",pattern =".tif$", full.names=TRUE)
veg.stack <- stack(current.list); dim(veg.stack) # should have 31 bands
plot(veg.stack, zlim = c(0,1), col=colr)# zlim = c(0,1),
## add TCC to stack
dat.stack <- stack(tcc,veg.stack); dim(dat.stack) # should have 32 bands
plot(dat.stack, col=colr)# zlim = c(0,1),
#-----------------------------------------------------------------------------
## crop stacked vegetation indices to test sites
out_rastB <- crop(dat.stack, bak, snap="out"); plot(trim(out_rastB), zlim = c(0,1), col=colr)
out_B <- mask(out_rastB, bak); plot(out_B, zlim = c(0,1), col=colr)
out_rastL <- crop(dat.stack, las, snap="out"); plot(trim(out_rastL), zlim = c(0,1), col=colr)
out_L <- mask(out_rastL, las); plot(out_L, zlim = c(0,1), col=colr)
out_rastM <- crop(dat.stack, mrc, snap="out"); plot(trim(out_rastM), zlim = c(0,1), col=colr)
out_M <- mask(out_rastM, mrc); plot(out_M, zlim = c(0,1), col=colr)
out_rastMH <- crop(dat.stack, mh, snap="out"); plot(trim(out_rastMH), zlim = c(0,1), col=colr)
out_MH <- mask(out_rastMH, mh); plot(out_MH, zlim = c(0,1), col=colr)
# write rasters
string_out <- "EVI_stack" # EVI_stack | NDVI_stack | NIRv_stack
writeRaster(out_B, format="GTiff", paste('D:/Test_Site_Ouput/',string_out,'_Bakersfield.tif',sep=""), overwrite=T)
writeRaster(out_L, format="GTiff", paste('D:/Test_Site_Ouput/',string_out,'_Lassen.tif',sep=""), overwrite=T)
writeRaster(out_M, format="GTiff", paste('D:/Test_Site_Ouput/',string_out,'_Merced.tif',sep=""), overwrite=T)
writeRaster(out_MH, format="GTiff", paste('D:/Test_Site_Ouput/',string_out,'_Mt_Hamilton.tif',sep=""), overwrite=T) |
#' Converts list(list(fields)) into list(fields(list))
#'
#' @param dots list of lists
listcomb <- function(dots) {
nms <- names(dots[[1]])
ans <- as.list(nms)
names(ans) <- nms
for (nm in nms) {
ans[[nm]] <- lapply(1:length(dots), function(i) dots[[i]][[nm]])
}
ans
}
#' Inverse square root of a matrix
#'
#' @param m matrix
isqrtm <- function(m) {
res <- eigen(m)
d <- res$values
if (min(d) < -1e-5) warning("Negative eigenvalues in isqrtm")
d[d < 0] <- 0
d[d > 0] <- 1/sqrt(d[d > 0])
v <- res$vectors
return (v %*% diag(d) %*% t(v))
}
#' Calls mclapply and combines the end result using listcomb
#'
#' @import parallel
#' @examples
#' lclapply(1:10, function(i) list(a = i, b = i^2), mc.cores = 1)
lclapply <- function(x, f, mc.cores = 0) {
if (mc.cores == 0) {
return(listcomb(lapply(x, f)))
} else {
return(listcomb(mclapply(x, f, mc.cores = mc.cores)))
}
}
#' Either uses lapply or mclapply
#'
#' @import parallel
#' @examples
#' lclapply(1:10, function(i) list(a = i, b = i^2), mc.cores = 1)
mclapply0 <- function(x, f, mc.cores = 0) {
if (mc.cores == 0) {
return(lapply(x, f))
} else {
return(mclapply(x, f, mc.cores = mc.cores))
}
}
| /R/testUtils.R | no_license | snarles/testUtils | R | false | false | 1,216 | r | #' Converts list(list(fields)) into list(fields(list))
#'
#' @param dots list of lists
listcomb <- function(dots) {
nms <- names(dots[[1]])
ans <- as.list(nms)
names(ans) <- nms
for (nm in nms) {
ans[[nm]] <- lapply(1:length(dots), function(i) dots[[i]][[nm]])
}
ans
}
#' Inverse square root of a matrix
#'
#' @param m matrix
isqrtm <- function(m) {
res <- eigen(m)
d <- res$values
if (min(d) < -1e-5) warning("Negative eigenvalues in isqrtm")
d[d < 0] <- 0
d[d > 0] <- 1/sqrt(d[d > 0])
v <- res$vectors
return (v %*% diag(d) %*% t(v))
}
#' Calls mclapply and combines the end result using listcomb
#'
#' @import parallel
#' @examples
#' lclapply(1:10, function(i) list(a = i, b = i^2), mc.cores = 1)
lclapply <- function(x, f, mc.cores = 0) {
if (mc.cores == 0) {
return(listcomb(lapply(x, f)))
} else {
return(listcomb(mclapply(x, f, mc.cores = mc.cores)))
}
}
#' Either uses lapply or mclapply
#'
#' @import parallel
#' @examples
#' lclapply(1:10, function(i) list(a = i, b = i^2), mc.cores = 1)
mclapply0 <- function(x, f, mc.cores = 0) {
if (mc.cores == 0) {
return(lapply(x, f))
} else {
return(mclapply(x, f, mc.cores = mc.cores))
}
}
|
library(maps)
library(sp)
library(maptools)
library(mapdata)
library(scales)
library(mapproj)
library(RColorBrewer)
library(rgdal)
library(ggplot2)
library(beyonce)
library(rworldmap)
library(RColorBrewer)
#Percent of land spared, used, or unchanged relative to BAU scenario
prop<-read.csv("Percent_map_all.csv", header=TRUE)
head(prop)
sPDF.prop.nt <- joinCountryData2Map(prop ,joinCode = "ISO3", nameJoinColumn = "ISO3", verbose=TRUE)
sPDF.prop <- spTransform(sPDF.prop.nt, CRS="+proj=moll +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84")
##CROP LANDUSE % MAPS------
numCats <- 6
colourPalette <- colorRampPalette(brewer.pal(numCats, "RdBu")) (6)
catmethod=c(-1,-0.50,-0.25,0,0.25,0.50,1) #feed
#quartz()
par(mfrow = c(2, 1))
par(mar = c(2, 1, 1, 1) + 0.1) # c(bottom, left, top, right)
mapParams<-mapCountryData(sPDF.prop,nameColumnToPlot='MIXED_crop', catMethod=catmethod, colourPalette=colourPalette,
missingCountryCol="dark gray",addLegend=F,oceanCol="white", mapTitle=" ")
do.call(addMapLegend, c(mapParams, legendLabels="all", legendWidth=2, digits=1, horizontal = T))
mapParamsMAR<-mapCountryData(sPDF.prop,nameColumnToPlot='MARINE_crop', catMethod=catmethod, colourPalette=colourPalette,
missingCountryCol="dark gray", addLegend=F,oceanCol="white", mapTitle=" ")
##GRAZING + CROP % MAP(S)-------
mapParams<-mapCountryData(sPDF.prop,nameColumnToPlot='MIXED_c.g', catMethod=catmethod, colourPalette=colourPalette,
missingCountryCol="dark gray",addLegend=F,oceanCol="white", mapTitle=" ")
do.call(addMapLegend, c(mapParams, legendLabels="all", legendWidth=2, digits=1, horizontal = T))
#MARINE SCENARIO NOT DEPICTED IN STUDY BECAUSE RESULT THE SAME
#mapParamsMAR<-mapCountryData(sPDF.prop,nameColumnToPlot='MARINE_c.g', catMethod=catmethod, colourPalette=colourPalette,
# missingCountryCol="dark gray", addLegend=F,oceanCol="white", mapTitle=" ")
#do.call(addMapLegend, c(mapParamsMAR, legendLabels="all", legendWidth=7, digits=1, horizontal = F))
##----------
#Number of countries pos (spared) or neg (used) land
num_con<-read.csv("PosNeg_plot_all.csv", header=TRUE)
head(num_con)
counts = num_con[,-1]
head(counts)
totals <- as.data.frame(colSums(counts))
names(totals) = "no_regions"
totals$scenario = c("Used","Spared","Used","Spared","Used","Spared","Used","Spared")
totals$id <- c("Mixed","Mixed","Marine","Marine","Mixed","Mixed","Marine","Marine")
totals
crop_tot = as.data.frame(totals[c(1:4),])
both_tot = totals[c(5:8),]
head(crop_tot)
cols <- colorRampPalette((beyonce_palette(12)))
myPal <- cols(length(unique(crop_tot$id)))
#Change Order of Factors
crop_tot$id <-factor(crop_tot$id,levels=c("Mixed","Marine")) #change order
levels(crop_tot$id)
ggplot(crop_tot, aes(x=scenario, y = no_regions, fill=factor(id))) +
geom_bar(stat = "identity",position=position_dodge()) +
#coord_flip()+
labs(x="", y = "No. Regions" )+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.text.x = element_text(size=13),axis.text.y = element_text(size=13),
axis.title.x = element_text(size=15), axis.title.y = element_text(size=15),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
panel.border = element_rect(colour = "white", fill=NA)) +
scale_y_continuous(limits = c(0, 215))+
scale_fill_manual(values = myPal)
##---------------
#Total land plots
#Number of countries pos or neg
total_ha<-read.csv("Total_ha_plot_all.csv", header=TRUE)
head(total_ha)
#Change Order of Factors
levels(total_ha$Scenario) #Order of default levels
total_ha$Scenario <-factor(total_ha$Scenario,levels=c("Current","BAU","Mixed", "Marine")) #change order
head(total_ha$Scenario)
crop_tot = total_ha[c(2,4,6,8),]
crop_tot
cols <- colorRampPalette((beyonce_palette(66)))
myPal <- cols(length(unique(crop_tot$Scenario)))
pd <- position_dodge(0.1)
ggplot(crop_tot, aes(x=Scenario, y = ha_total, fill=factor(Scenario))) +
geom_bar(stat = "identity",position=position_dodge()) +
geom_errorbar(aes(ymin=ha_total-ha_total_sd,
ymax=ha_total+ha_total_sd),
width=.2, position=pd)+
labs(x="", y = "Total Cropland Use (ha)" )+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.text.x = element_text(size=13),axis.text.y = element_text(size=13),
axis.title.x = element_text(size=15), axis.title.y = element_text(size=15),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
panel.border = element_rect(colour = "white", fill=NA)) +
scale_fill_manual(values = rev(myPal))
ggplot(total_ha, aes(x=Scenario, y = ha_total, fill=factor(id))) +
geom_bar(stat = "identity") +
labs(x="", y = "Total Land Use (ha)" )+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.text.x = element_text(size=13),axis.text.y = element_text(size=13),
axis.title.x = element_text(size=15), axis.title.y = element_text(size=15),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
panel.border = element_rect(colour = "white", fill=NA)) +
scale_fill_manual(values = myPal)
| /Outputs/Landuse_Maps&plots.R | no_license | brunj7/aquaculture-crop-feed | R | false | false | 5,473 | r | library(maps)
library(sp)
library(maptools)
library(mapdata)
library(scales)
library(mapproj)
library(RColorBrewer)
library(rgdal)
library(ggplot2)
library(beyonce)
library(rworldmap)
library(RColorBrewer)
#Percent of land spared, used, or unchanged relative to BAU scenario
prop<-read.csv("Percent_map_all.csv", header=TRUE)
head(prop)
sPDF.prop.nt <- joinCountryData2Map(prop ,joinCode = "ISO3", nameJoinColumn = "ISO3", verbose=TRUE)
sPDF.prop <- spTransform(sPDF.prop.nt, CRS="+proj=moll +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84")
##CROP LANDUSE % MAPS------
numCats <- 6
colourPalette <- colorRampPalette(brewer.pal(numCats, "RdBu")) (6)
catmethod=c(-1,-0.50,-0.25,0,0.25,0.50,1) #feed
#quartz()
par(mfrow = c(2, 1))
par(mar = c(2, 1, 1, 1) + 0.1) # c(bottom, left, top, right)
mapParams<-mapCountryData(sPDF.prop,nameColumnToPlot='MIXED_crop', catMethod=catmethod, colourPalette=colourPalette,
missingCountryCol="dark gray",addLegend=F,oceanCol="white", mapTitle=" ")
do.call(addMapLegend, c(mapParams, legendLabels="all", legendWidth=2, digits=1, horizontal = T))
mapParamsMAR<-mapCountryData(sPDF.prop,nameColumnToPlot='MARINE_crop', catMethod=catmethod, colourPalette=colourPalette,
missingCountryCol="dark gray", addLegend=F,oceanCol="white", mapTitle=" ")
##GRAZING + CROP % MAP(S)-------
mapParams<-mapCountryData(sPDF.prop,nameColumnToPlot='MIXED_c.g', catMethod=catmethod, colourPalette=colourPalette,
missingCountryCol="dark gray",addLegend=F,oceanCol="white", mapTitle=" ")
do.call(addMapLegend, c(mapParams, legendLabels="all", legendWidth=2, digits=1, horizontal = T))
#MARINE SCENARIO NOT DEPICTED IN STUDY BECAUSE RESULT THE SAME
#mapParamsMAR<-mapCountryData(sPDF.prop,nameColumnToPlot='MARINE_c.g', catMethod=catmethod, colourPalette=colourPalette,
# missingCountryCol="dark gray", addLegend=F,oceanCol="white", mapTitle=" ")
#do.call(addMapLegend, c(mapParamsMAR, legendLabels="all", legendWidth=7, digits=1, horizontal = F))
##----------
#Number of countries pos (spared) or neg (used) land
num_con<-read.csv("PosNeg_plot_all.csv", header=TRUE)
head(num_con)
counts = num_con[,-1]
head(counts)
totals <- as.data.frame(colSums(counts))
names(totals) = "no_regions"
totals$scenario = c("Used","Spared","Used","Spared","Used","Spared","Used","Spared")
totals$id <- c("Mixed","Mixed","Marine","Marine","Mixed","Mixed","Marine","Marine")
totals
crop_tot = as.data.frame(totals[c(1:4),])
both_tot = totals[c(5:8),]
head(crop_tot)
cols <- colorRampPalette((beyonce_palette(12)))
myPal <- cols(length(unique(crop_tot$id)))
#Change Order of Factors
crop_tot$id <-factor(crop_tot$id,levels=c("Mixed","Marine")) #change order
levels(crop_tot$id)
ggplot(crop_tot, aes(x=scenario, y = no_regions, fill=factor(id))) +
geom_bar(stat = "identity",position=position_dodge()) +
#coord_flip()+
labs(x="", y = "No. Regions" )+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.text.x = element_text(size=13),axis.text.y = element_text(size=13),
axis.title.x = element_text(size=15), axis.title.y = element_text(size=15),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
panel.border = element_rect(colour = "white", fill=NA)) +
scale_y_continuous(limits = c(0, 215))+
scale_fill_manual(values = myPal)
##---------------
#Total land plots
#Number of countries pos or neg
total_ha<-read.csv("Total_ha_plot_all.csv", header=TRUE)
head(total_ha)
#Change Order of Factors
levels(total_ha$Scenario) #Order of default levels
total_ha$Scenario <-factor(total_ha$Scenario,levels=c("Current","BAU","Mixed", "Marine")) #change order
head(total_ha$Scenario)
crop_tot = total_ha[c(2,4,6,8),]
crop_tot
cols <- colorRampPalette((beyonce_palette(66)))
myPal <- cols(length(unique(crop_tot$Scenario)))
pd <- position_dodge(0.1)
ggplot(crop_tot, aes(x=Scenario, y = ha_total, fill=factor(Scenario))) +
geom_bar(stat = "identity",position=position_dodge()) +
geom_errorbar(aes(ymin=ha_total-ha_total_sd,
ymax=ha_total+ha_total_sd),
width=.2, position=pd)+
labs(x="", y = "Total Cropland Use (ha)" )+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.text.x = element_text(size=13),axis.text.y = element_text(size=13),
axis.title.x = element_text(size=15), axis.title.y = element_text(size=15),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
panel.border = element_rect(colour = "white", fill=NA)) +
scale_fill_manual(values = rev(myPal))
ggplot(total_ha, aes(x=Scenario, y = ha_total, fill=factor(id))) +
geom_bar(stat = "identity") +
labs(x="", y = "Total Land Use (ha)" )+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.text.x = element_text(size=13),axis.text.y = element_text(size=13),
axis.title.x = element_text(size=15), axis.title.y = element_text(size=15),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
panel.border = element_rect(colour = "white", fill=NA)) +
scale_fill_manual(values = myPal)
|
# call function is similar with python
# args show the definition of funtcion
print(args(sd))
# the parameter can be missing even if it hasn't a default value(lazy evaluation)
# Q1
cube <- function(x, n) {
x^3
}
# defalt value
# Q2
cube(3)
pow <- function(x = 4, n = 3) {
x^n
}
pow()
# ... like C
# argument after ... must be named explicitly!!!
# search the environment
search()
# global environment and user's workspace is the first element to search
# package:base is always the last.
print(args(lm))
print(environment(lm))
lm <- function(x) {x ^ 2}
print(args(lm))
print(environment(lm))
# show free variable
ls(environment(lm))
# library() import a package, dan it will be the second to serach, the rest go down
library(grid)
search()
# documentation
library(datasets)
data(iris) | /ComputingForDataAnalysis/PA2/function.R | no_license | ktargows/CourseRA | R | false | false | 788 | r | # call function is similar with python
# args show the definition of funtcion
print(args(sd))
# the parameter can be missing even if it hasn't a default value(lazy evaluation)
# Q1
cube <- function(x, n) {
x^3
}
# defalt value
# Q2
cube(3)
pow <- function(x = 4, n = 3) {
x^n
}
pow()
# ... like C
# argument after ... must be named explicitly!!!
# search the environment
search()
# global environment and user's workspace is the first element to search
# package:base is always the last.
print(args(lm))
print(environment(lm))
lm <- function(x) {x ^ 2}
print(args(lm))
print(environment(lm))
# show free variable
ls(environment(lm))
# library() import a package, dan it will be the second to serach, the rest go down
library(grid)
search()
# documentation
library(datasets)
data(iris) |
library("data.table")
library("ggplot2")
# Initialization
SCC <- as.data.table(x = readRDS(file = "Source_Classification_Code.rds"))
NEI <- as.data.table(x = readRDS(file = "summarySCC_PM25.rds"))
# Gather the subset of the NEI data which corresponds to vehicles
condition <- grepl("vehicle", SCC[, SCC.Level.Two], ignore.case=TRUE)
vehiclesSCC <- SCC[condition, SCC]
vehiclesNEI <- NEI[NEI[, SCC] %in% vehiclesSCC,]
# Subset the vehicles NEI data by each city's fip and add city name.
vehiclesBaltimoreNEI <- vehiclesNEI[fips == "24510",]
vehiclesBaltimoreNEI[, city := c("Baltimore City")]
vehiclesLANEI <- vehiclesNEI[fips == "06037",]
vehiclesLANEI[, city := c("Los Angeles")]
# Combine data.tables into one data.table
bothNEI <- rbind(vehiclesBaltimoreNEI,vehiclesLANEI)
# Output graph
png("plot6.png")
# Plot graph using package ggplot2
ggplot(bothNEI, aes(x=factor(year), y=Emissions, fill=city)) +
geom_bar(aes(fill=year),stat="identity") +
facet_grid(scales="free", space="free", .~city) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Kilo-Tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore & LA, 1999-2008"))
dev.off() | /plot6.R | no_license | Ra1nOWL/Exploratory_Data_Analysis_Project_2 | R | false | false | 1,226 | r | library("data.table")
library("ggplot2")
# Initialization
SCC <- as.data.table(x = readRDS(file = "Source_Classification_Code.rds"))
NEI <- as.data.table(x = readRDS(file = "summarySCC_PM25.rds"))
# Gather the subset of the NEI data which corresponds to vehicles
condition <- grepl("vehicle", SCC[, SCC.Level.Two], ignore.case=TRUE)
vehiclesSCC <- SCC[condition, SCC]
vehiclesNEI <- NEI[NEI[, SCC] %in% vehiclesSCC,]
# Subset the vehicles NEI data by each city's fip and add city name.
vehiclesBaltimoreNEI <- vehiclesNEI[fips == "24510",]
vehiclesBaltimoreNEI[, city := c("Baltimore City")]
vehiclesLANEI <- vehiclesNEI[fips == "06037",]
vehiclesLANEI[, city := c("Los Angeles")]
# Combine data.tables into one data.table
bothNEI <- rbind(vehiclesBaltimoreNEI,vehiclesLANEI)
# Output graph
png("plot6.png")
# Plot graph using package ggplot2
ggplot(bothNEI, aes(x=factor(year), y=Emissions, fill=city)) +
geom_bar(aes(fill=year),stat="identity") +
facet_grid(scales="free", space="free", .~city) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Kilo-Tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore & LA, 1999-2008"))
dev.off() |
install.packages("shiny")
install.packages("mc2d")
install.packages("ggplot2")
install.packages("scales")
install.packages("dplyr")
install.packages("reshape2")
install.packages("countrycode")
install.packages("shinydashboard")
| /init.R | no_license | abhatia2014/Threat-Dashboard | R | false | false | 229 | r | install.packages("shiny")
install.packages("mc2d")
install.packages("ggplot2")
install.packages("scales")
install.packages("dplyr")
install.packages("reshape2")
install.packages("countrycode")
install.packages("shinydashboard")
|
## LIBRARIES ======================================================================================
library("tidyverse")
library("lubridate")
library("splines")
library("caret")
library("recipes")
## DATA
df_tr <- read_csv("./01_data/01_raw/train.csv") %>% mutate(partition = "train")
df_ts <- read_csv("./01_data/01_raw/test.csv") %>% mutate(partition = "test")
df_all <- bind_rows(df_tr, df_ts)
df_all %>% select(Id, SalePrice, partition, everything())
## MANUEL ENCODING ================================================================================
order_factors = FALSE
df_all <- df_all %>%
dplyr::rename(FrstFlrSF = `1stFlrSF`, ScndFlrSF = `2ndFlrSF`, ThrdSsnPorch = `3SsnPorch`) %>%
dplyr::mutate(
LotShape = factor(LotShape, levels = c("IR3", "IR2", "IR1", "Reg"), ordered = order_factors),
Utilities = factor(Utilities, levels = c("ELO", "NoSeWa", "NoSewr", "AllPub"), ordered = order_factors),
LandSlope = factor(LandSlope, levels = c("Sev", "Mod", "Gtl"), ordered = order_factors),
OverallQual = factor(OverallQual, levels = c(1:10), ordered = order_factors),
OverallCond = factor(OverallCond, levels = c(1:10), ordered = order_factors),
ExterQual = factor(ExterQual, levels = c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
ExterCond = factor(ExterCond, levels = c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
BsmtQual = factor(BsmtQual, levels = c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
BsmtCond = factor(BsmtCond, levels = c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
BsmtExposure = factor(BsmtExposure, levels = c("No", "Mn", "Av", "Gd"), ordered = order_factors),
BsmtFinType1 = factor(BsmtFinType1, levels = c("Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ"), ordered = order_factors),
BsmtFinType2 = factor(BsmtFinType2, levels = c("Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ"), ordered = order_factors),
HeatingQC = factor(HeatingQC, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
KitchenQual = factor(KitchenQual, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
Functional = factor(Functional, c("Sal", "Sev", "Maj2", "Maj1", "Mod", "Min2", "Min1", "Typ"), ordered = order_factors),
FireplaceQu = factor(FireplaceQu, levels = c("TA", "Gd", "Ex"), ordered = order_factors),
GarageFinish = factor(GarageFinish, levels = c("Uf", "RFn", "Fin"), ordered = order_factors),
GarageQual = factor(GarageQual, levels = c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
GarageCond = factor(GarageCond, levels = c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
PavedDrive = factor(PavedDrive, levels = c("N", "P", "Y"), ordered = order_factors),
PoolQC = factor(PoolQC, levels = c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
Fence = factor(Fence, levels = c("MnWw", "GdWo", "MnPrv", "GdPrv"), ordered = order_factors),
MoSold = month(MoSold, label = ),
MSSubClass = factor(paste0("MS", MSSubClass), ordered = order_factors)
) %>%
dplyr::mutate_if(is_character, .funs = ~ factor(.x))
## VISUAL ANALYSIS ================================================================================
ret_col_type <- function(df) {
num_vars <- colnames(df_all)[map_lgl(df_all, is.numeric)]
cat_vars <- colnames(df_all)[map_lgl(df_all, is.factor)]
return(list(num = num_vars, cat = cat_vars))
}
col_type <- ret_col_type(df_all)
num_vars <- setdiff(col_type$num, c("SalePrice", "partition"))
cat_vars <- setdiff(col_type$cat, c("SalePrice", "partition"))
other_vars <- setdiff(colnames(df_all), union(union(num_vars, cat_vars), c("SalePrice", "partition")))
if (length(other_vars) > 0) {
print(paste0(other_cols, " are neither numeric nor factors."))
}
pred_vars <- setdiff(colnames(df_all), c("SalePrice", "partition", "Id"))
# for (pred_var in pred_vars) {
# if (pred_var %in% num_vars) {
# p <- ggplot(df_all, aes_string(x = pred_var, y = "SalePrice")) +
# geom_point() +
# #geom_smooth(method = 'lm') +
# geom_smooth(method = 'loess')
# } else if (pred_var %in% cat_vars) {
# p <- ggplot(df_all, aes_string(x = pred_var, y = "SalePrice")) +
# geom_boxplot()
# }
# print(p)
# Sys.sleep(5)
# }
df_all %>%
ggplot(aes(x = OverallQual, y = SalePrice))+
geom_point()
## MISSING VALUES =================================================================================
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
## REMOVE OUTLIER
# df_all <- df_all %>% filter(GrLivArea < 4000) #%>% select(Id, GrLivArea, SalePrice)
#df_all <- df_all %>% filter(!Id %in% c(524, 1299))
## MISSING VALUES
miss_vals <- map_dfr(df_all, ~ sum(is.na(.x))) %>%
gather() %>%
filter(value > 0) %>%
arrange(desc(value))
miss_vals %>% print(n = 200)
# df_all[is.na(df_all$GarageYrBlt), c("GarageFinish", "GarageFinish", "GarageCond", "GarageQual", "GarageType",
# "GarageCars", "GarageArea", "GarageYrBlt")]
## NA means "No":
na_no <- c(
"Alley", "PoolQC", "MiscFeature", "Fence", "FireplaceQu", "LotFrontage",
##
"GarageFinish", "GarageFinish", "GarageCond", "GarageQual", "GarageType",
"GarageCars", "GarageArea", "GarageYrBlt",
##
"BsmtCond", "BsmtExposure", "BsmtQual", "BsmtFinType2", "BsmtFinType1",
"BsmtFullBath", "BsmtHalfBath", "BsmtFinSF2", "BsmtUnfSF", "TotalBsmtSF"
)
## NA means "missing":
na_missing <- c(
"MasVnrType", "MasVnrArea", "MSZoning", "Utilities", "Functional",
"Exterior1st", "Exterior2nd", "BsmtFinSF1", "Electrical", "KitchenQual",
"GarageYrBlt", "SaleType"
)
df_tr <- df_all %>% filter(partition == "train")
df_ts <- df_all %>% filter(partition == "test")
df_tr <- df_tr %>%
mutate_at(na_no, function(x) {
if (is.factor(x)) {
x <- fct_explicit_na(x, na_level = "No")
} else {
x <- replace_na(x, replace = 0)
}
}) %>%
mutate_at(na_missing, function(x) {
if (is.factor(x)) {
x <- fct_explicit_na(x, na_level = names(which.max(table(x))))
} else if (is.numeric(x)) {
# x <- replace_na(x, replace = median(x, na.rm = T))
x <- replace_na(x, replace = median(x))
}
})
df_ts <- df_ts %>%
mutate_at(na_no, function(x) {
if (is.factor(x)) {
x <- fct_explicit_na(x, na_level = "No")
} else {
x <- replace_na(x, replace = 0)
}
}) %>%
mutate_at(na_missing, function(x) {
if (is.factor(x)) {
x <- fct_explicit_na(x, na_level = names(which.max(table(x))))
} else if (is.numeric(x)) {
# x <- replace_na(x, replace = median(x, na.rm = T))
x <- replace_na(x, replace = median(x))
}
})
df_all <- bind_rows(df_tr, df_ts)
map_dfr(df_all, ~ sum(is.na(.x))) %>%
gather() %>%
filter(value > 0) %>%
arrange(desc(value))
## SIMPLE FEATURES ================================================================================
five_level_simple <- c(
"ExterQual", "ExterCond", "BsmtQual", "KitchenQual",
"GarageQual", "BsmtCond", "GarageCond", "HeatingQC", "PoolQC"
)
five_level_recode <- function(x) {
fct_recode(x, bad = "Po", avgerage = "Fa", avgerage = "TA", good = "Gd", good = "Ex")
}
six_level_simple <- c("BsmtFinType1", "BsmtFinType2")
six_level_recode <- function(x) {
fct_recode(x,
`1` = "Unf", `1` = "LwQ", `2` = "Rec",
`2` = "BLQ",
`3` = "ALQ", `3` = "GLQ"
)
}
ten_level_simple <- c("OverallQual", "OverallCond")
ten_level_recode <- function(x) {
fct_recode(x,
`1` = "1", `1` = "2", `1` = "3", `2` = "4", `2` = "5",
`2` = "6", `2` = "7", `3` = "8", `3` = "9", `3` = "10"
)
}
df_all <- df_all %>%
dplyr::mutate_at(five_level_simple,
.funs = list(Simple = five_level_recode)
) %>%
dplyr::mutate_at(six_level_simple,
.funs = list(Simple = six_level_recode)
) %>%
dplyr::mutate_at(ten_level_simple,
.funs = list(Simple = ten_level_recode)
) %>%
dplyr::mutate_at("Functional",
.funs = list(Simple = ~ fct_recode(.x,
`1` = "Sal", `1` = "Sev", `1` = "Maj2",
`2` = "Maj1", `2` = "Mod", `2` = "Min2",
`3` = "Min1", `3` = "Typ"
))
)
colnames(df_all) <- str_replace(colnames(df_all), "_", "")
## CREATE NEW FEATURES ============================================================================
##
df_all <-
df_all %>%
mutate(
YearsSinceBuilt = pmax(YrSold - YearBuilt, 0),
YearsSinceRemodel = pmax(YrSold - YearRemodAdd, 0),
ExterGrade = as.numeric(ExterCond) * as.numeric(ExterQual),
ExterGradeSimple = as.numeric(ExterCondSimple) * as.numeric(ExterQualSimple),
OverallGrade = as.numeric(OverallCond) * as.numeric(OverallQual),
OverallGradeSimple = as.numeric(OverallCondSimple) * as.numeric(OverallQualSimple),
GarageGrade = as.numeric(GarageCond) * as.numeric(GarageQual),
GarageGradeSimple = as.numeric(GarageCondSimple) * as.numeric(GarageQualSimple),
KitchenScore = KitchenAbvGr * as.numeric(KitchenQual),
KitchenScoreSimple = KitchenAbvGr * as.numeric(KitchenQualSimple),
FireplaceScore = Fireplaces * as.numeric(FireplaceQu),
GarageScore = GarageArea * as.numeric(GarageQual),
GarageScoreSimple = GarageArea * as.numeric(GarageQualSimple),
PoolScore = PoolArea * as.numeric(PoolQC),
PoolScoreSimple = PoolArea * as.numeric(PoolQCSimple),
# FireplaceScoreSimple = Fireplaces*as.numeric(FireplaceQuSimple),
TotalBath = BsmtFullBath + .5 * BsmtHalfBath + .5 * HalfBath + FullBath,
AllSF = GrLivArea + TotalBsmtSF,
AllFlrsSF = FrstFlrSF + ScndFlrSF,
AllPorchSF = OpenPorchSF + EnclosedPorch + ThrdSsnPorch + ScreenPorch + WoodDeckSF,
HasMasVnr = fct_recode(MasVnrType,
Yes = "BrkCmn", Yes = "BrkFace", Yes = "CBlock",
Yes = "Stone", No = "None"
),
BoughtOffPlan = fct_recode(SaleCondition,
Yes = "Partial",
No = "Abnorml", No = "Alloca", No = "AdjLand", No = "Family",
No = "Normal"
),
HasPool = ifelse(PoolArea > 0, "Yes", "No"),
HasScnFloor = ifelse(ScndFlrSF > 0, "Yes", "No"),
HasGarage = ifelse(GarageArea > 0, "Yes", "No"),
HasBsmt = ifelse(BsmtFinSF1 > 0, "Yes", "No"),
HasFireplace = ifelse(Fireplaces > 0, "Yes", "No")
) %>%
dplyr::mutate_if(is_character, .funs = ~ factor(.x))
# predictor types
col_type <- ret_col_type(df_all)
num_pred_vars <- setdiff(col_type$num, c("SalePrice", "partition", "Id"))
cat_pred_vars <- setdiff(col_type$cat, c("SalePrice", "partition", "Id"))
other_vars <- setdiff(colnames(df_all), union(union(num_pred_vars, cat_pred_vars), c("SalePrice", "partition", "Id")))
if (length(other_vars) > 0) {
print(paste0(other_cols, " are neither numeric nor factors."))
}
## TOP FEATURES BASED ON DIFFERENT CORRELATION COEFFICIENTS
cor_type <- "pearson" ## pearson, spearman, kendall
cor_df <- df_all[!is.na(df_all$SalePrice), c("SalePrice", num_pred_vars)]
cor_vals <- sort(cor(cor_df, method = cor_type)[1, ], decreasing = T)
top_features <- setdiff(
names(cor_vals)[abs(cor_vals) > .55],
"SalePrice"
)
## POLYNOMIAL TERMS
# df_all <- df_all %>%
# dplyr::mutate_at(top_features, .funs = list(
# poly2 = ~ .x**2,
# poly3 = ~ .x**3,
# poly4 = ~.x**4,
# sqrt = ~ .x**.5,
# log = ~log(.x)
# ))
## SPLINES
# spl <- map(
# df_all[top_features],
# function(x) {
# res <- bs(x, degree = 5)
# res
# }
# ) %>% reduce(cbind)
#
# spl <- data.frame(spl)
# colnames(spl) <- flatten_chr(map(
# top_features,
# ~ paste0(.x, paste0("_bs", c(1, 2, 3)))
# ))
# df_all <- bind_cols(df_all, spl)
write_csv2(df_all, "./01_data/02_processed/train_test_stacked.csv")
| /02_code/01_data_prep/data_prep.R | no_license | kaijennissen/advanced_regression | R | false | false | 11,532 | r | ## LIBRARIES ======================================================================================
library("tidyverse")
library("lubridate")
library("splines")
library("caret")
library("recipes")
## DATA
df_tr <- read_csv("./01_data/01_raw/train.csv") %>% mutate(partition = "train")
df_ts <- read_csv("./01_data/01_raw/test.csv") %>% mutate(partition = "test")
df_all <- bind_rows(df_tr, df_ts)
df_all %>% select(Id, SalePrice, partition, everything())
## MANUEL ENCODING ================================================================================
order_factors = FALSE
df_all <- df_all %>%
dplyr::rename(FrstFlrSF = `1stFlrSF`, ScndFlrSF = `2ndFlrSF`, ThrdSsnPorch = `3SsnPorch`) %>%
dplyr::mutate(
LotShape = factor(LotShape, levels = c("IR3", "IR2", "IR1", "Reg"), ordered = order_factors),
Utilities = factor(Utilities, levels = c("ELO", "NoSeWa", "NoSewr", "AllPub"), ordered = order_factors),
LandSlope = factor(LandSlope, levels = c("Sev", "Mod", "Gtl"), ordered = order_factors),
OverallQual = factor(OverallQual, levels = c(1:10), ordered = order_factors),
OverallCond = factor(OverallCond, levels = c(1:10), ordered = order_factors),
ExterQual = factor(ExterQual, levels = c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
ExterCond = factor(ExterCond, levels = c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
BsmtQual = factor(BsmtQual, levels = c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
BsmtCond = factor(BsmtCond, levels = c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
BsmtExposure = factor(BsmtExposure, levels = c("No", "Mn", "Av", "Gd"), ordered = order_factors),
BsmtFinType1 = factor(BsmtFinType1, levels = c("Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ"), ordered = order_factors),
BsmtFinType2 = factor(BsmtFinType2, levels = c("Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ"), ordered = order_factors),
HeatingQC = factor(HeatingQC, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
KitchenQual = factor(KitchenQual, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
Functional = factor(Functional, c("Sal", "Sev", "Maj2", "Maj1", "Mod", "Min2", "Min1", "Typ"), ordered = order_factors),
FireplaceQu = factor(FireplaceQu, levels = c("TA", "Gd", "Ex"), ordered = order_factors),
GarageFinish = factor(GarageFinish, levels = c("Uf", "RFn", "Fin"), ordered = order_factors),
GarageQual = factor(GarageQual, levels = c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
GarageCond = factor(GarageCond, levels = c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
PavedDrive = factor(PavedDrive, levels = c("N", "P", "Y"), ordered = order_factors),
PoolQC = factor(PoolQC, levels = c("Po", "Fa", "TA", "Gd", "Ex"), ordered = order_factors),
Fence = factor(Fence, levels = c("MnWw", "GdWo", "MnPrv", "GdPrv"), ordered = order_factors),
MoSold = month(MoSold, label = ),
MSSubClass = factor(paste0("MS", MSSubClass), ordered = order_factors)
) %>%
dplyr::mutate_if(is_character, .funs = ~ factor(.x))
## VISUAL ANALYSIS ================================================================================
ret_col_type <- function(df) {
num_vars <- colnames(df_all)[map_lgl(df_all, is.numeric)]
cat_vars <- colnames(df_all)[map_lgl(df_all, is.factor)]
return(list(num = num_vars, cat = cat_vars))
}
col_type <- ret_col_type(df_all)
num_vars <- setdiff(col_type$num, c("SalePrice", "partition"))
cat_vars <- setdiff(col_type$cat, c("SalePrice", "partition"))
other_vars <- setdiff(colnames(df_all), union(union(num_vars, cat_vars), c("SalePrice", "partition")))
if (length(other_vars) > 0) {
print(paste0(other_cols, " are neither numeric nor factors."))
}
pred_vars <- setdiff(colnames(df_all), c("SalePrice", "partition", "Id"))
# for (pred_var in pred_vars) {
# if (pred_var %in% num_vars) {
# p <- ggplot(df_all, aes_string(x = pred_var, y = "SalePrice")) +
# geom_point() +
# #geom_smooth(method = 'lm') +
# geom_smooth(method = 'loess')
# } else if (pred_var %in% cat_vars) {
# p <- ggplot(df_all, aes_string(x = pred_var, y = "SalePrice")) +
# geom_boxplot()
# }
# print(p)
# Sys.sleep(5)
# }
df_all %>%
ggplot(aes(x = OverallQual, y = SalePrice))+
geom_point()
## MISSING VALUES =================================================================================
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
## REMOVE OUTLIER
# df_all <- df_all %>% filter(GrLivArea < 4000) #%>% select(Id, GrLivArea, SalePrice)
#df_all <- df_all %>% filter(!Id %in% c(524, 1299))
## MISSING VALUES
miss_vals <- map_dfr(df_all, ~ sum(is.na(.x))) %>%
gather() %>%
filter(value > 0) %>%
arrange(desc(value))
miss_vals %>% print(n = 200)
# df_all[is.na(df_all$GarageYrBlt), c("GarageFinish", "GarageFinish", "GarageCond", "GarageQual", "GarageType",
# "GarageCars", "GarageArea", "GarageYrBlt")]
## NA means "No":
na_no <- c(
"Alley", "PoolQC", "MiscFeature", "Fence", "FireplaceQu", "LotFrontage",
##
"GarageFinish", "GarageFinish", "GarageCond", "GarageQual", "GarageType",
"GarageCars", "GarageArea", "GarageYrBlt",
##
"BsmtCond", "BsmtExposure", "BsmtQual", "BsmtFinType2", "BsmtFinType1",
"BsmtFullBath", "BsmtHalfBath", "BsmtFinSF2", "BsmtUnfSF", "TotalBsmtSF"
)
## NA means "missing":
na_missing <- c(
"MasVnrType", "MasVnrArea", "MSZoning", "Utilities", "Functional",
"Exterior1st", "Exterior2nd", "BsmtFinSF1", "Electrical", "KitchenQual",
"GarageYrBlt", "SaleType"
)
df_tr <- df_all %>% filter(partition == "train")
df_ts <- df_all %>% filter(partition == "test")
df_tr <- df_tr %>%
mutate_at(na_no, function(x) {
if (is.factor(x)) {
x <- fct_explicit_na(x, na_level = "No")
} else {
x <- replace_na(x, replace = 0)
}
}) %>%
mutate_at(na_missing, function(x) {
if (is.factor(x)) {
x <- fct_explicit_na(x, na_level = names(which.max(table(x))))
} else if (is.numeric(x)) {
# x <- replace_na(x, replace = median(x, na.rm = T))
x <- replace_na(x, replace = median(x))
}
})
df_ts <- df_ts %>%
mutate_at(na_no, function(x) {
if (is.factor(x)) {
x <- fct_explicit_na(x, na_level = "No")
} else {
x <- replace_na(x, replace = 0)
}
}) %>%
mutate_at(na_missing, function(x) {
if (is.factor(x)) {
x <- fct_explicit_na(x, na_level = names(which.max(table(x))))
} else if (is.numeric(x)) {
# x <- replace_na(x, replace = median(x, na.rm = T))
x <- replace_na(x, replace = median(x))
}
})
df_all <- bind_rows(df_tr, df_ts)
map_dfr(df_all, ~ sum(is.na(.x))) %>%
gather() %>%
filter(value > 0) %>%
arrange(desc(value))
## SIMPLE FEATURES ================================================================================
five_level_simple <- c(
"ExterQual", "ExterCond", "BsmtQual", "KitchenQual",
"GarageQual", "BsmtCond", "GarageCond", "HeatingQC", "PoolQC"
)
five_level_recode <- function(x) {
fct_recode(x, bad = "Po", avgerage = "Fa", avgerage = "TA", good = "Gd", good = "Ex")
}
six_level_simple <- c("BsmtFinType1", "BsmtFinType2")
six_level_recode <- function(x) {
fct_recode(x,
`1` = "Unf", `1` = "LwQ", `2` = "Rec",
`2` = "BLQ",
`3` = "ALQ", `3` = "GLQ"
)
}
ten_level_simple <- c("OverallQual", "OverallCond")
ten_level_recode <- function(x) {
fct_recode(x,
`1` = "1", `1` = "2", `1` = "3", `2` = "4", `2` = "5",
`2` = "6", `2` = "7", `3` = "8", `3` = "9", `3` = "10"
)
}
df_all <- df_all %>%
dplyr::mutate_at(five_level_simple,
.funs = list(Simple = five_level_recode)
) %>%
dplyr::mutate_at(six_level_simple,
.funs = list(Simple = six_level_recode)
) %>%
dplyr::mutate_at(ten_level_simple,
.funs = list(Simple = ten_level_recode)
) %>%
dplyr::mutate_at("Functional",
.funs = list(Simple = ~ fct_recode(.x,
`1` = "Sal", `1` = "Sev", `1` = "Maj2",
`2` = "Maj1", `2` = "Mod", `2` = "Min2",
`3` = "Min1", `3` = "Typ"
))
)
colnames(df_all) <- str_replace(colnames(df_all), "_", "")
## CREATE NEW FEATURES ============================================================================
##
df_all <-
df_all %>%
mutate(
YearsSinceBuilt = pmax(YrSold - YearBuilt, 0),
YearsSinceRemodel = pmax(YrSold - YearRemodAdd, 0),
ExterGrade = as.numeric(ExterCond) * as.numeric(ExterQual),
ExterGradeSimple = as.numeric(ExterCondSimple) * as.numeric(ExterQualSimple),
OverallGrade = as.numeric(OverallCond) * as.numeric(OverallQual),
OverallGradeSimple = as.numeric(OverallCondSimple) * as.numeric(OverallQualSimple),
GarageGrade = as.numeric(GarageCond) * as.numeric(GarageQual),
GarageGradeSimple = as.numeric(GarageCondSimple) * as.numeric(GarageQualSimple),
KitchenScore = KitchenAbvGr * as.numeric(KitchenQual),
KitchenScoreSimple = KitchenAbvGr * as.numeric(KitchenQualSimple),
FireplaceScore = Fireplaces * as.numeric(FireplaceQu),
GarageScore = GarageArea * as.numeric(GarageQual),
GarageScoreSimple = GarageArea * as.numeric(GarageQualSimple),
PoolScore = PoolArea * as.numeric(PoolQC),
PoolScoreSimple = PoolArea * as.numeric(PoolQCSimple),
# FireplaceScoreSimple = Fireplaces*as.numeric(FireplaceQuSimple),
TotalBath = BsmtFullBath + .5 * BsmtHalfBath + .5 * HalfBath + FullBath,
AllSF = GrLivArea + TotalBsmtSF,
AllFlrsSF = FrstFlrSF + ScndFlrSF,
AllPorchSF = OpenPorchSF + EnclosedPorch + ThrdSsnPorch + ScreenPorch + WoodDeckSF,
HasMasVnr = fct_recode(MasVnrType,
Yes = "BrkCmn", Yes = "BrkFace", Yes = "CBlock",
Yes = "Stone", No = "None"
),
BoughtOffPlan = fct_recode(SaleCondition,
Yes = "Partial",
No = "Abnorml", No = "Alloca", No = "AdjLand", No = "Family",
No = "Normal"
),
HasPool = ifelse(PoolArea > 0, "Yes", "No"),
HasScnFloor = ifelse(ScndFlrSF > 0, "Yes", "No"),
HasGarage = ifelse(GarageArea > 0, "Yes", "No"),
HasBsmt = ifelse(BsmtFinSF1 > 0, "Yes", "No"),
HasFireplace = ifelse(Fireplaces > 0, "Yes", "No")
) %>%
dplyr::mutate_if(is_character, .funs = ~ factor(.x))
# predictor types
col_type <- ret_col_type(df_all)
num_pred_vars <- setdiff(col_type$num, c("SalePrice", "partition", "Id"))
cat_pred_vars <- setdiff(col_type$cat, c("SalePrice", "partition", "Id"))
other_vars <- setdiff(colnames(df_all), union(union(num_pred_vars, cat_pred_vars), c("SalePrice", "partition", "Id")))
if (length(other_vars) > 0) {
print(paste0(other_cols, " are neither numeric nor factors."))
}
## TOP FEATURES BASED ON DIFFERENT CORRELATION COEFFICIENTS
cor_type <- "pearson" ## pearson, spearman, kendall
cor_df <- df_all[!is.na(df_all$SalePrice), c("SalePrice", num_pred_vars)]
cor_vals <- sort(cor(cor_df, method = cor_type)[1, ], decreasing = T)
top_features <- setdiff(
names(cor_vals)[abs(cor_vals) > .55],
"SalePrice"
)
## POLYNOMIAL TERMS
# df_all <- df_all %>%
# dplyr::mutate_at(top_features, .funs = list(
# poly2 = ~ .x**2,
# poly3 = ~ .x**3,
# poly4 = ~.x**4,
# sqrt = ~ .x**.5,
# log = ~log(.x)
# ))
## SPLINES
# spl <- map(
# df_all[top_features],
# function(x) {
# res <- bs(x, degree = 5)
# res
# }
# ) %>% reduce(cbind)
#
# spl <- data.frame(spl)
# colnames(spl) <- flatten_chr(map(
# top_features,
# ~ paste0(.x, paste0("_bs", c(1, 2, 3)))
# ))
# df_all <- bind_cols(df_all, spl)
write_csv2(df_all, "./01_data/02_processed/train_test_stacked.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/backup_operations.R
\name{backup_describe_region_settings}
\alias{backup_describe_region_settings}
\title{Returns the current service opt-in settings for the Region}
\usage{
backup_describe_region_settings()
}
\description{
Returns the current service opt-in settings for the Region. If service opt-in is enabled for a service, Backup tries to protect that service's resources in this Region, when the resource is included in an on-demand backup or scheduled backup plan. Otherwise, Backup does not try to protect that service's resources in this Region.
See \url{https://www.paws-r-sdk.com/docs/backup_describe_region_settings/} for full documentation.
}
\keyword{internal}
| /cran/paws.storage/man/backup_describe_region_settings.Rd | permissive | paws-r/paws | R | false | true | 754 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/backup_operations.R
\name{backup_describe_region_settings}
\alias{backup_describe_region_settings}
\title{Returns the current service opt-in settings for the Region}
\usage{
backup_describe_region_settings()
}
\description{
Returns the current service opt-in settings for the Region. If service opt-in is enabled for a service, Backup tries to protect that service's resources in this Region, when the resource is included in an on-demand backup or scheduled backup plan. Otherwise, Backup does not try to protect that service's resources in this Region.
See \url{https://www.paws-r-sdk.com/docs/backup_describe_region_settings/} for full documentation.
}
\keyword{internal}
|
## The 'makeCacheMatrix' function contains four functions that allow the user to
## set a matrix and make it invertible. The 'cacheSolve' function allows the
## user to get the inverse of the previous matrix and cache it, as well as
## retrieve the cache. This is useful and saves time when attempting to retrieve
## the inverse of a matrix repeatedly.
## This function is used to create a matrix that is invertible.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(solve) m <<- solve
getmatrix <- function() m
list(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## This function inverts a matrix and caches it, or retrieves the cached matrix.
cacheSolve <- function(x, ...) {
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix, ...)
x$setmatrix(m)
m
}
| /cachematrix.R | no_license | cervantesedd/ProgrammingAssignment2 | R | false | false | 1,084 | r | ## The 'makeCacheMatrix' function contains four functions that allow the user to
## set a matrix and make it invertible. The 'cacheSolve' function allows the
## user to get the inverse of the previous matrix and cache it, as well as
## retrieve the cache. This is useful and saves time when attempting to retrieve
## the inverse of a matrix repeatedly.
## This function is used to create a matrix that is invertible.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(solve) m <<- solve
getmatrix <- function() m
list(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## This function inverts a matrix and caches it, or retrieves the cached matrix.
cacheSolve <- function(x, ...) {
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix, ...)
x$setmatrix(m)
m
}
|
who = "World"
input = commandArgs(trailingOnly = TRUE)
if(length(input))
who = paste(input, collapse = " ")
message("Hello ", who, "!") | /ggplot_interview/hello_world.R | no_license | michaellevy/catalyst_classes | R | false | false | 141 | r | who = "World"
input = commandArgs(trailingOnly = TRUE)
if(length(input))
who = paste(input, collapse = " ")
message("Hello ", who, "!") |
#Course Directory
dir_local <- "exp_data_analysis_project2"
if(!file.exists(dir_local)){
dir.create(dir_local)
}
#Set working directory
setwd(dir_local)
#download and extract data in working directory
data_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
destination_file <- "dataset.zip"
if(!file.exists(destination_file)){
download.file(data_url, destfile = destination_file)
unzip(destination_file)
}
if (!"nei_data" %in% ls()) {
nei_data <- readRDS("summarySCC_PM25.rds")
}
if (!"scc_data" %in% ls()) {
scc_data <- readRDS("Source_Classification_Code.rds")
}
#How have emissions from motor vehicle sources changed from 1999โ2008 in Baltimore City?
subset_data <- nei_data[nei_data$fips == "24510", ]
par("mar"=c(5.1, 4.5, 4.1, 2.1))
png(filename = "plot5.png", width = 480, height = 480, units = "px")
motor <- grep("motor", scc_data$Short.Name, ignore.case = T)
motor <- scc_data[motor, ]
motor <- subset_data[subset_data$SCC %in% motor$SCC, ]
motorEmissions <- aggregate(motor$Emissions, list(motor$year), FUN = "sum")
plot(motorEmissions, type = "l", xlab = "Year", main = "Total Emissions From Motor Vehicle Sources\n from 1999 to 2008 in Baltimore City", ylab = expression('Total PM'[2.5]*" Emission"))
dev.off()
| /Plot5.R | no_license | kumalok/ExData_Plotting2 | R | false | false | 1,266 | r | #Course Directory
dir_local <- "exp_data_analysis_project2"
if(!file.exists(dir_local)){
dir.create(dir_local)
}
#Set working directory
setwd(dir_local)
#download and extract data in working directory
data_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
destination_file <- "dataset.zip"
if(!file.exists(destination_file)){
download.file(data_url, destfile = destination_file)
unzip(destination_file)
}
if (!"nei_data" %in% ls()) {
nei_data <- readRDS("summarySCC_PM25.rds")
}
if (!"scc_data" %in% ls()) {
scc_data <- readRDS("Source_Classification_Code.rds")
}
#How have emissions from motor vehicle sources changed from 1999โ2008 in Baltimore City?
subset_data <- nei_data[nei_data$fips == "24510", ]
par("mar"=c(5.1, 4.5, 4.1, 2.1))
png(filename = "plot5.png", width = 480, height = 480, units = "px")
motor <- grep("motor", scc_data$Short.Name, ignore.case = T)
motor <- scc_data[motor, ]
motor <- subset_data[subset_data$SCC %in% motor$SCC, ]
motorEmissions <- aggregate(motor$Emissions, list(motor$year), FUN = "sum")
plot(motorEmissions, type = "l", xlab = "Year", main = "Total Emissions From Motor Vehicle Sources\n from 1999 to 2008 in Baltimore City", ylab = expression('Total PM'[2.5]*" Emission"))
dev.off()
|
#' Adds a Layer with Observations to a Profile Plot
#'
#' Function \code{\link{show_observations}} adds a layer to a plot created with
#' \code{\link{plot.ceteris_paribus_explainer}} for selected observations.
#' Various parameters help to decide what should be plotted, profiles, aggregated profiles, points or rugs.
#'
#' @param x a ceteris paribus explainer produced with function \code{ceteris_paribus()}
#' @param ... other explainers that shall be plotted together
#' @param color a character. Either name of a color or name of a variable that should be used for coloring
#' @param size a numeric. Size of lines to be plotted
#' @param alpha a numeric between 0 and 1. Opacity of lines
#' @param variables if not \code{NULL} then only \code{variables} will be presented
#' @param variable_type a character. If "numerical" then only numerical variables will be plotted.
#' If "categorical" then only categorical variables will be plotted.
#'
#' @return a \code{ggplot2} layer
#'
#' @examples
#' library("DALEX")
#' library("randomForest")
#'
#' rf_model <- randomForest(survived == "yes" ~.,
#' data = titanic_imputed)
#'
#' explainer_rf <- explain(rf_model, data = titanic_imputed,
#' y = titanic_imputed$survived == "yes",
#' label = "RF", verbose = FALSE)
#'
#' selected_passangers <- select_sample(titanic_imputed, n = 100)
#' cp_rf <- ceteris_paribus(explainer_rf, selected_passangers)
#' cp_rf
#'
#' plot(cp_rf, variables = "age", color = "grey") +
#' show_observations(cp_rf, variables = "age", color = "black") +
#' show_rugs(cp_rf, variables = "age", color = "red")
#'
#'
#' @export
show_observations <- function(x, ...,
size = 2,
alpha = 1,
color = "#371ea3",
variable_type = "numerical",
variables = NULL) {
check_variable_type(variable_type)
# if there is more explainers, they should be merged into a single data frame
dfl <- c(list(x), list(...))
all_observations <- lapply(dfl, function(tmp) {
attr(tmp, "observations")
})
all_observations <- do.call(rbind, all_observations)
all_observations$`_ids_` <- factor(rownames(all_observations))
# variables to use
all_variables <- grep(colnames(all_observations), pattern = "^[^_]", value = TRUE)
if (!is.null(variables)) {
all_variables <- intersect(all_variables, variables)
if (length(all_variables) == 0) stop(paste0("variables do not overlap with ", paste(all_variables, collapse = ", ")))
}
# only numerical or only factors?
is_numeric <- sapply(all_observations[, all_variables, drop = FALSE], is.numeric)
if (variable_type == "numerical") {
vnames <- all_variables[which(is_numeric)]
if (length(vnames) == 0) stop("There are no numerical variables")
} else {
vnames <- all_variables[which(!is_numeric)]
if (length(vnames) == 0) stop("There are no non-numerical variables")
}
# prepare data for plotting points
is_color_points_a_variable <- color %in% c(all_variables, "_label_", "_vname_", "_ids_")
tmp <- lapply(vnames, function(var) {
data.frame(`_x_` = all_observations[,var],
`_vname_` = var,
`_yhat_` = all_observations$`_yhat_`,
`_y_` = if (is.null(all_observations$`_y_`)) NA else all_observations$`_y_`,
`_color_` = if (!is_color_points_a_variable) NA else {
if (color == "_vname_") var else all_observations[,color]
},
`_ids_` = all_observations$`_ids_`,
`_label_` = all_observations$`_label_`)
})
all_observations_long <- do.call(rbind, tmp)
colnames(all_observations_long) <- c("_x_", "_vname_", "_yhat_", "_y_", "_color_", "_ids_", "_label_")
if ((is_color_points_a_variable ) & !(color %in% colnames(all_observations_long)))
colnames(all_observations_long)[5] = color
# show observations
if (is_color_points_a_variable) {
res <- geom_point(data = all_observations_long, aes_string(color = paste0("`",color,"`")), size = size, alpha = alpha)
} else {
res <- geom_point(data = all_observations_long, size = size, alpha = alpha, color = color)
}
res
}
| /R/show_observations.R | no_license | Alex33261/ingredients | R | false | false | 4,300 | r | #' Adds a Layer with Observations to a Profile Plot
#'
#' Function \code{\link{show_observations}} adds a layer to a plot created with
#' \code{\link{plot.ceteris_paribus_explainer}} for selected observations.
#' Various parameters help to decide what should be plotted, profiles, aggregated profiles, points or rugs.
#'
#' @param x a ceteris paribus explainer produced with function \code{ceteris_paribus()}
#' @param ... other explainers that shall be plotted together
#' @param color a character. Either name of a color or name of a variable that should be used for coloring
#' @param size a numeric. Size of lines to be plotted
#' @param alpha a numeric between 0 and 1. Opacity of lines
#' @param variables if not \code{NULL} then only \code{variables} will be presented
#' @param variable_type a character. If "numerical" then only numerical variables will be plotted.
#' If "categorical" then only categorical variables will be plotted.
#'
#' @return a \code{ggplot2} layer
#'
#' @examples
#' library("DALEX")
#' library("randomForest")
#'
#' rf_model <- randomForest(survived == "yes" ~.,
#' data = titanic_imputed)
#'
#' explainer_rf <- explain(rf_model, data = titanic_imputed,
#' y = titanic_imputed$survived == "yes",
#' label = "RF", verbose = FALSE)
#'
#' selected_passangers <- select_sample(titanic_imputed, n = 100)
#' cp_rf <- ceteris_paribus(explainer_rf, selected_passangers)
#' cp_rf
#'
#' plot(cp_rf, variables = "age", color = "grey") +
#' show_observations(cp_rf, variables = "age", color = "black") +
#' show_rugs(cp_rf, variables = "age", color = "red")
#'
#'
#' @export
show_observations <- function(x, ...,
size = 2,
alpha = 1,
color = "#371ea3",
variable_type = "numerical",
variables = NULL) {
check_variable_type(variable_type)
# if there is more explainers, they should be merged into a single data frame
dfl <- c(list(x), list(...))
all_observations <- lapply(dfl, function(tmp) {
attr(tmp, "observations")
})
all_observations <- do.call(rbind, all_observations)
all_observations$`_ids_` <- factor(rownames(all_observations))
# variables to use
all_variables <- grep(colnames(all_observations), pattern = "^[^_]", value = TRUE)
if (!is.null(variables)) {
all_variables <- intersect(all_variables, variables)
if (length(all_variables) == 0) stop(paste0("variables do not overlap with ", paste(all_variables, collapse = ", ")))
}
# only numerical or only factors?
is_numeric <- sapply(all_observations[, all_variables, drop = FALSE], is.numeric)
if (variable_type == "numerical") {
vnames <- all_variables[which(is_numeric)]
if (length(vnames) == 0) stop("There are no numerical variables")
} else {
vnames <- all_variables[which(!is_numeric)]
if (length(vnames) == 0) stop("There are no non-numerical variables")
}
# prepare data for plotting points
is_color_points_a_variable <- color %in% c(all_variables, "_label_", "_vname_", "_ids_")
tmp <- lapply(vnames, function(var) {
data.frame(`_x_` = all_observations[,var],
`_vname_` = var,
`_yhat_` = all_observations$`_yhat_`,
`_y_` = if (is.null(all_observations$`_y_`)) NA else all_observations$`_y_`,
`_color_` = if (!is_color_points_a_variable) NA else {
if (color == "_vname_") var else all_observations[,color]
},
`_ids_` = all_observations$`_ids_`,
`_label_` = all_observations$`_label_`)
})
all_observations_long <- do.call(rbind, tmp)
colnames(all_observations_long) <- c("_x_", "_vname_", "_yhat_", "_y_", "_color_", "_ids_", "_label_")
if ((is_color_points_a_variable ) & !(color %in% colnames(all_observations_long)))
colnames(all_observations_long)[5] = color
# show observations
if (is_color_points_a_variable) {
res <- geom_point(data = all_observations_long, aes_string(color = paste0("`",color,"`")), size = size, alpha = alpha)
} else {
res <- geom_point(data = all_observations_long, size = size, alpha = alpha, color = color)
}
res
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/view_image.R
\name{display_set}
\alias{display_set}
\title{Display 2D mosaic output as a plot image}
\usage{
display_set(image_list, title = NULL)
}
\arguments{
\item{image_list}{List output from collect_bricks() or image_to_bricks(). Contains an element \code{Img_lego}.}
\item{title}{Optional title to include above plotted mosaic.}
}
\description{
Display 2D mosaic output as a plot image
}
| /man/display_set.Rd | permissive | rpodcast/brickr | R | false | true | 474 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/view_image.R
\name{display_set}
\alias{display_set}
\title{Display 2D mosaic output as a plot image}
\usage{
display_set(image_list, title = NULL)
}
\arguments{
\item{image_list}{List output from collect_bricks() or image_to_bricks(). Contains an element \code{Img_lego}.}
\item{title}{Optional title to include above plotted mosaic.}
}
\description{
Display 2D mosaic output as a plot image
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarise.R
\name{summarise_catval}
\alias{summarise_catval}
\title{Summarise a value in a column}
\usage{
summarise_catval(grouped_form, colname, val)
}
\arguments{
\item{grouped_form}{any grouped dataframe}
\item{colname}{name to column to summarise}
}
\value{
summary dataframe
}
\description{
Summarise the number and percentage of that value in a column
}
| /man/summarise_catval.Rd | no_license | EddieZhang540/INORMUS | R | false | true | 440 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarise.R
\name{summarise_catval}
\alias{summarise_catval}
\title{Summarise a value in a column}
\usage{
summarise_catval(grouped_form, colname, val)
}
\arguments{
\item{grouped_form}{any grouped dataframe}
\item{colname}{name to column to summarise}
}
\value{
summary dataframe
}
\description{
Summarise the number and percentage of that value in a column
}
|
############################################################################################
#' @title Produce a Summary Table of Data Product Health by Month
#' @author Robert Lee \email{rlee@battelleecology.org}\cr
#' @description For a specified data product ID, this function will produce a data frame of data
#' product availability and validity for their period of record at a site.
#'
#'
#' Because the full period of record for all sites are queried,
#' this function can take a long time to execute.
#' @inheritParams dp.survey
#'
#' @param dp.id Parameter of class character. The NEON data product code of the data product of
#' interest.
#' @param site Parameter of class character. The NEON site of interest.
#'
#' @param bgn.month Parameter of class character. The year-month (e.g. "2017-01") of the first month to get data for.
#' @param end.month Parameter of class character. The year-month (e.g. "2017-01") of the last month to get data for.
#' @param save.dir The directory for data files to be saved to.
#' @return A data frame of health statisitcs by month for a given site. Raw NEON data are also saved to the specified save.dir, if supplied.
#' @keywords process quality, data quality, gaps, commissioning, data product, health
#' @examples
#' # Summarize 2D wind perfomance at CPER:
#' CPER_wind=dp.survey(dp.id = "DP1.00001.001", site="CPER")
#' @export
# changelog and author contributions / copyrights
# Robert Lee (2017-11-21)
# original creation
#
##############################################################################################
health.data= function(site, dp.id, bgn.month, end.month, save.dir){
if(missing(save.dir)){save.dir=tempdir()}
pri.var=Noble::tis_pri_vars$data.field[which(Noble::tis_pri_vars$dp.id==dp.id)]
var.name=gsub(pattern = "mean", replacement = "", x = pri.var, ignore.case = T)
dp.avail = neon.avail(dp.id = dp.id)
dp.avail = cbind(Month=dp.avail[,1], dp.avail[,which(colnames(dp.avail) %in% Noble::tis_site_config$site.id)])
temp.dates = zoo::as.Date(dp.avail$Month[
which(
dp.avail[which(colnames(dp.avail)==site)]=="x"
)
]
)
if(missing(bgn.month)&missing(end.month)){
run.dates = substr(temp.dates, start = 0, stop = 7)
info.dates=run.dates
}else if(missing(end.month)){
end.month=Sys.Date()
info.dates=seq.Date(from=as.Date(paste0(bgn.month, "-01")), to=end.month, by="1 month")
run.dates=base::substr(temp.dates[temp.dates %in% info.dates], start = 0, stop = 7)
}else if(missing(bgn.month)){
info.dates=seq.Date(from=as.Date("2014-01-01"), to=end.month, by="1 month")
run.dates=substr(temp.dates[temp.dates %in% info.dates], start = 0, stop = 7)
}else{
info.dates=seq.Date(from=as.Date(paste0(bgn.month, "-01")), to=as.Date(paste0(end.month, "-01")), by="1 month")
run.dates=substr(temp.dates[temp.dates %in% info.dates], start = 0, stop = 7)
}
health.data=data.frame(Month=substr(info.dates,0, 7), Availability=rep(0, times=length(info.dates)), Validity =rep(0, times=length(info.dates)))
message(paste0("Working on ", site, "..."))
if(length(run.dates)>0){
for(d in 1:length(run.dates)){
message(paste0("Downloading ", run.dates[d]))
month.data<-try(Noble::pull.data(site = site, dp.id = dp.id, bgn.month = run.dates[d], end.month = run.dates[d], time.agr = 30, package = "basic", save.dir = save.dir))
if(!length(month.data)==0){
priData=data.frame(month.data[,grepl(pattern = pri.var, x = colnames(month.data), ignore.case = T)])
if(!length(priData)==0){
pcntData=round(sum(colSums(!is.na(priData)))/(length(priData[,1])*length(priData))*100, digits = 2)
finalQFs=data.frame(month.data[,grepl(pattern = "*finalQF*", x = colnames(month.data), ignore.case = T)])
if(length(colnames(finalQFs))>length(colnames(priData))){
finalQFs=data.frame(finalQFs[,grepl(pattern = var.name, x = colnames(finalQFs), ignore.case = T)])
}
pcntValid=round(100-(sum(colSums(finalQFs, na.rm = T))/(length(priData[,1])*length(priData))*100+
sum(colSums(is.na(finalQFs)))/(length(priData[,1])*length(finalQFs))*100), digits = 2)
health.data$Availability[which(health.data$Month==run.dates[d])]=pcntData
health.data$Validity[which(health.data$Month==run.dates[d])]=pcntValid}
}
}}
return(health.data)
}
| /R/health_data.R | no_license | rhlee12/Noble | R | false | false | 4,582 | r | ############################################################################################
#' @title Produce a Summary Table of Data Product Health by Month
#' @author Robert Lee \email{rlee@battelleecology.org}\cr
#' @description For a specified data product ID, this function will produce a data frame of data
#' product availability and validity for their period of record at a site.
#'
#'
#' Because the full period of record for all sites are queried,
#' this function can take a long time to execute.
#' @inheritParams dp.survey
#'
#' @param dp.id Parameter of class character. The NEON data product code of the data product of
#' interest.
#' @param site Parameter of class character. The NEON site of interest.
#'
#' @param bgn.month Parameter of class character. The year-month (e.g. "2017-01") of the first month to get data for.
#' @param end.month Parameter of class character. The year-month (e.g. "2017-01") of the last month to get data for.
#' @param save.dir The directory for data files to be saved to.
#' @return A data frame of health statisitcs by month for a given site. Raw NEON data are also saved to the specified save.dir, if supplied.
#' @keywords process quality, data quality, gaps, commissioning, data product, health
#' @examples
#' # Summarize 2D wind perfomance at CPER:
#' CPER_wind=dp.survey(dp.id = "DP1.00001.001", site="CPER")
#' @export
# changelog and author contributions / copyrights
# Robert Lee (2017-11-21)
# original creation
#
##############################################################################################
health.data= function(site, dp.id, bgn.month, end.month, save.dir){
if(missing(save.dir)){save.dir=tempdir()}
pri.var=Noble::tis_pri_vars$data.field[which(Noble::tis_pri_vars$dp.id==dp.id)]
var.name=gsub(pattern = "mean", replacement = "", x = pri.var, ignore.case = T)
dp.avail = neon.avail(dp.id = dp.id)
dp.avail = cbind(Month=dp.avail[,1], dp.avail[,which(colnames(dp.avail) %in% Noble::tis_site_config$site.id)])
temp.dates = zoo::as.Date(dp.avail$Month[
which(
dp.avail[which(colnames(dp.avail)==site)]=="x"
)
]
)
if(missing(bgn.month)&missing(end.month)){
run.dates = substr(temp.dates, start = 0, stop = 7)
info.dates=run.dates
}else if(missing(end.month)){
end.month=Sys.Date()
info.dates=seq.Date(from=as.Date(paste0(bgn.month, "-01")), to=end.month, by="1 month")
run.dates=base::substr(temp.dates[temp.dates %in% info.dates], start = 0, stop = 7)
}else if(missing(bgn.month)){
info.dates=seq.Date(from=as.Date("2014-01-01"), to=end.month, by="1 month")
run.dates=substr(temp.dates[temp.dates %in% info.dates], start = 0, stop = 7)
}else{
info.dates=seq.Date(from=as.Date(paste0(bgn.month, "-01")), to=as.Date(paste0(end.month, "-01")), by="1 month")
run.dates=substr(temp.dates[temp.dates %in% info.dates], start = 0, stop = 7)
}
health.data=data.frame(Month=substr(info.dates,0, 7), Availability=rep(0, times=length(info.dates)), Validity =rep(0, times=length(info.dates)))
message(paste0("Working on ", site, "..."))
if(length(run.dates)>0){
for(d in 1:length(run.dates)){
message(paste0("Downloading ", run.dates[d]))
month.data<-try(Noble::pull.data(site = site, dp.id = dp.id, bgn.month = run.dates[d], end.month = run.dates[d], time.agr = 30, package = "basic", save.dir = save.dir))
if(!length(month.data)==0){
priData=data.frame(month.data[,grepl(pattern = pri.var, x = colnames(month.data), ignore.case = T)])
if(!length(priData)==0){
pcntData=round(sum(colSums(!is.na(priData)))/(length(priData[,1])*length(priData))*100, digits = 2)
finalQFs=data.frame(month.data[,grepl(pattern = "*finalQF*", x = colnames(month.data), ignore.case = T)])
if(length(colnames(finalQFs))>length(colnames(priData))){
finalQFs=data.frame(finalQFs[,grepl(pattern = var.name, x = colnames(finalQFs), ignore.case = T)])
}
pcntValid=round(100-(sum(colSums(finalQFs, na.rm = T))/(length(priData[,1])*length(priData))*100+
sum(colSums(is.na(finalQFs)))/(length(priData[,1])*length(finalQFs))*100), digits = 2)
health.data$Availability[which(health.data$Month==run.dates[d])]=pcntData
health.data$Validity[which(health.data$Month==run.dates[d])]=pcntValid}
}
}}
return(health.data)
}
|
observe({
updateSelectizeInput(session, "plottype", choices = if (intro.numericnames()[1] == "") c("Mosaic Plot" = "mosaicplot") else if (intro.categoricnames()[1] == "") c("Histogram" = "histogram", "Normal Quantile Plot" = "quantileplot", "Scatterplot" = "scatterplot", "Line Chart" = "linechart") else c("Histogram" = "histogram", "Normal Quantile Plot" = "quantileplot", "Scatterplot" = "scatterplot", "Line Chart" = "linechart", "Boxplot" = "boxplot", "Bar Chart" = "barchart", "Pareto Chart" = "paretochart", "Mosaic Plot" = "mosaicplot"))
})
observe({
input$plottype
updateSelectizeInput(session, "x", choices = x_choices(), selected = x_selected())
updateSelectizeInput(session, "y", choices = y_choices(), selected = y_selected())
})
observeEvent(input$store_graphical, {
cat(paste0("\n\n", paste(readLines(file.path(userdir, "code_graphical_reactive.R")), collapse = "\n")), file = file.path(userdir, "code_All.R"), append = TRUE)
## Fix this
cat(paste0("\n\ninput_xdomain <- c(", input$xmin, ", ", input$xmax, ")\n"), file = file.path(userdir, "code_All.R"), append = TRUE)
cat(paste0("input_ydomain <- c(", input$ymin, ", ", input$ymax, ")\n"), file = file.path(userdir, "code_All.R"), append = TRUE)
cat(paste0("input_binwidth <- ", ifelse(is.na(input$binwidth), "NULL", input$binwidth), "\n"), file = file.path(userdir, "code_All.R"), append = TRUE)
mystr <- paste0("X Variable: ", input$x, (if (input$plottype %in% c("histogram", "quantileplot")) "" else paste0("; Y Variable: ", input$y)))
cat("\n\n", file = file.path(userdir, paste0("code_", input$plottype, ".R")), append = TRUE)
cat(paste0("\n\n", paste(readLines(file.path(userdir, paste0("code_", input$plottype, ".R"))), collapse = "\n")), file = file.path(userdir, "code_All.R"), append = TRUE)
if (input$plottype != "mosaicplot") cat(paste0("p.", input$plottype), file = file.path(userdir, "code_All.R"), append = TRUE)
cat(paste0("\ncat('", mystr, "')"), file = file.path(userdir, "code_All.R"), append = TRUE)
})
| /modules/graphical/observe.R | no_license | elombardi-cleve/intRo | R | false | false | 2,063 | r | observe({
updateSelectizeInput(session, "plottype", choices = if (intro.numericnames()[1] == "") c("Mosaic Plot" = "mosaicplot") else if (intro.categoricnames()[1] == "") c("Histogram" = "histogram", "Normal Quantile Plot" = "quantileplot", "Scatterplot" = "scatterplot", "Line Chart" = "linechart") else c("Histogram" = "histogram", "Normal Quantile Plot" = "quantileplot", "Scatterplot" = "scatterplot", "Line Chart" = "linechart", "Boxplot" = "boxplot", "Bar Chart" = "barchart", "Pareto Chart" = "paretochart", "Mosaic Plot" = "mosaicplot"))
})
observe({
input$plottype
updateSelectizeInput(session, "x", choices = x_choices(), selected = x_selected())
updateSelectizeInput(session, "y", choices = y_choices(), selected = y_selected())
})
observeEvent(input$store_graphical, {
cat(paste0("\n\n", paste(readLines(file.path(userdir, "code_graphical_reactive.R")), collapse = "\n")), file = file.path(userdir, "code_All.R"), append = TRUE)
## Fix this
cat(paste0("\n\ninput_xdomain <- c(", input$xmin, ", ", input$xmax, ")\n"), file = file.path(userdir, "code_All.R"), append = TRUE)
cat(paste0("input_ydomain <- c(", input$ymin, ", ", input$ymax, ")\n"), file = file.path(userdir, "code_All.R"), append = TRUE)
cat(paste0("input_binwidth <- ", ifelse(is.na(input$binwidth), "NULL", input$binwidth), "\n"), file = file.path(userdir, "code_All.R"), append = TRUE)
mystr <- paste0("X Variable: ", input$x, (if (input$plottype %in% c("histogram", "quantileplot")) "" else paste0("; Y Variable: ", input$y)))
cat("\n\n", file = file.path(userdir, paste0("code_", input$plottype, ".R")), append = TRUE)
cat(paste0("\n\n", paste(readLines(file.path(userdir, paste0("code_", input$plottype, ".R"))), collapse = "\n")), file = file.path(userdir, "code_All.R"), append = TRUE)
if (input$plottype != "mosaicplot") cat(paste0("p.", input$plottype), file = file.path(userdir, "code_All.R"), append = TRUE)
cat(paste0("\ncat('", mystr, "')"), file = file.path(userdir, "code_All.R"), append = TRUE)
})
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/ovary.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.1,family="gaussian",standardize=TRUE)
sink('./ovary_025.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/ovary/ovary_025.R | no_license | esbgkannan/QSMART | R | false | false | 346 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/ovary.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.1,family="gaussian",standardize=TRUE)
sink('./ovary_025.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EDK.R
\name{EDK}
\alias{EDK}
\title{EDK
to be documented}
\usage{
EDK(d, h, k = 2)
}
\arguments{
\item{d}{to be documented}
\item{h}{to be documented}
\item{k}{to be documented}
}
\value{
to be documented
}
\description{
EDK
to be documented
}
\keyword{internal}
| /man/EDK.Rd | no_license | shepherdmeng/mgwrsar | R | false | true | 343 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EDK.R
\name{EDK}
\alias{EDK}
\title{EDK
to be documented}
\usage{
EDK(d, h, k = 2)
}
\arguments{
\item{d}{to be documented}
\item{h}{to be documented}
\item{k}{to be documented}
}
\value{
to be documented
}
\description{
EDK
to be documented
}
\keyword{internal}
|
#loading data
load("~/simulated_app_data.R")
currentdata = new.data
#loading required packages
require(survey)
require(MCMCpack)
expit = function(x){exp(x)/(1+exp(x))}
#parameter specifications
bootstrap=TRUE
K=1000
D = dim(currentdata$treatment)[2]
n = dim(currentdata$fixed.X)[1]
W = 100
# initializing data structures
result = list()
#ipw variables
save.weights = matrix(NA,nrow=n,ncol=D)
save.boot.weights = matrix(1,nrow=n,ncol=W)
#overlap variables
save.overlap = matrix(NA,nrow=n,ncol=D)
save.boot.overlap = matrix(1,nrow=n,ncol=W)
#ppta variables
save.membership = array(NA,dim = c(n,K,D))
ppta.boot.save.membership = array(1,dim=c(n,K,W))
cross.probs = array(NA,dim = c(n,K,D))
ppta.size = matrix(NA,nrow=D,ncol=K)
ppta.treated.size = matrix(NA,nrow=D,ncol=K)
ppta.control.size = matrix(NA,nrow=D,ncol=K)
#stabilized variables
save.stable = matrix(NA,nrow=n,ncol=D)
save.boot.stable = matrix(1,nrow=n,ncol=W)
#specifying input data
treatment = data.frame(currentdata$treatment)
fixedcovar = data.frame(currentdata$fixed.X)
timecovar = currentdata$time.X
#naming input data
names(fixedcovar) = paste("fixed.",names(fixedcovar),sep="")
names(timecovar) = paste("time.",names(timecovar),sep="")
treatment.considered = data.frame(treatment[,1])
names(treatment.considered) = "treat.1"
time.covars.considered = data.frame(timecovar[[1]])
#sampling bootstrap draws
if(bootstrap){
boot.indicies = matrix(sample(1:n,n*W,replace=TRUE),nrow=n,ncol=W)
} else{
W = 1
}
###################################################################
# iterate through time points
for(d in 1:D){
data.matrix = cbind(treatment.considered,fixedcovar,time.covars.considered)
#create datasets with bootstrapped indicies
if(bootstrap){
ppta.boot.data = apply(boot.indicies,2,function(x){data.matrix[x,]})
}
#specify formulas for propensity score model and model for stabilized weights
if(d==1){
no.treat = names(data.matrix)[names(data.matrix)!="treat.1"]
form = paste("treat.1 ~",paste(no.treat,collapse="+"))
form2 = "treat.1~1"
} else {
current.treat = paste("treat.",d,sep="")
no.treat = names(data.matrix)[names(data.matrix)!=current.treat]
form = paste(current.treat,"~", paste(no.treat,collapse="+"))
form2 = paste(current.treat,"~", paste(no.treat[grep("treat*",no.treat)],collapse="+"))
}
####################################################
# ALL WEIGHTING SCHEMES
PS.fit = glm(form,data=data.matrix,family="binomial")
sum.ps.fit = summary(PS.fit)
ipw.PS.est = predict(PS.fit,type="response")
stable.fit = glm(form2,data=data.matrix,family="binomial")
stable.est = predict(stable.fit,type="response")
###calculate ipw/overlap/stable weights
save.weights[,d] = treatment[,d]/ipw.PS.est + (1-treatment[,d])/(1-ipw.PS.est)
save.overlap[,d] = treatment[,d]*(1- ipw.PS.est) + (1-treatment[,d])*ipw.PS.est
save.stable[,d] = treatment[,d]*(stable.est/ipw.PS.est) + (1-treatment[,d])*((1-stable.est)/(1-ipw.PS.est))
### bootstrapped MLE
if(bootstrap){
for (w in 1:W){
boot.treatment = treatment[boot.indicies[,w],]
PS.fit = glm(form,data=ppta.boot.data[[w]],family="binomial")
sum.ps.fit = summary(PS.fit)
PS.est = predict(PS.fit,type="response")
stable.est = predict(glm(form2,data=ppta.boot.data[[w]],family="binomial"),type="response")
save.boot.weights[,w] = save.boot.weights[,w]*(boot.treatment[,d]/PS.est + (1-boot.treatment[,d])/(1-PS.est))
save.boot.stable[,w] = save.boot.stable[,w]*(boot.treatment[,d]*(stable.est/PS.est) + (1-boot.treatment[,d])*((1-stable.est)/(1-PS.est)))
save.boot.overlap[,w] = save.boot.overlap[,w]*(boot.treatment[,d]*(1- PS.est) + (1-boot.treatment[,d])*PS.est)
}
}
###############################################
# PPTA
#draws from PS distribution
posteriors = as.matrix(MCMClogit(form,data=data.matrix,burnin=K/2,mcmc=K*20,thin=20))
save(posteriors,file=paste("~/final_app_",d,"_posteriors.R"))
covars = as.matrix(cbind(rep(1,n),data.matrix[,-1]))
ppta.PS.est = expit(covars%*%t(posteriors)) #PS estimated from draws of alpha
# boostrapping for PPTA
if(bootstrap){
ppta.boot.posteriors = lapply(ppta.boot.data,function(x){as.matrix(MCMClogit(form,data=x,burnin=K/10,mcmc=K*10,thin=10))})
ppta.boot.covars = lapply(ppta.boot.data,function(x){as.matrix(cbind(rep(1,n),x[,-1]))})
ppta.boot.PS = mapply(function(x,y){as.matrix(expit(x%*%t(y)))},x = ppta.boot.covars,y=ppta.boot.posteriors,SIMPLIFY = FALSE)
ppta.boot.PS = simplify2array(ppta.boot.PS)
}
#calculate in/out group
for(k in 1:K){
cross.prob = treatment[,d]*(1-ppta.PS.est[,k]) + (1-treatment[,d])*ppta.PS.est[,k]
cross.probs[,k,d] = cross.prob
if(bootstrap){
ppta.boot.treatment = apply(boot.indicies,2,function(x){treatment[x,d]})
ppta.boot.cross.prob = apply(matrix(1:W,ncol=W),2,function(x){ppta.boot.treatment[,x]*(1-ppta.boot.PS[,k,x])+(1-ppta.boot.treatment[,x])*ppta.boot.PS[,k,x]})
ppta.boot.save.membership[,k,] =ppta.boot.save.membership[,k,]*apply(ppta.boot.cross.prob,2,function(x){rbinom(n,1,x)})
}
}
#updating covariates if not last time point
if(d<D){
treatment.considered = data.frame(treatment[,(d+1)],treatment[,d])
time.covars.considered = data.frame(timecovar[[d+1]],timecovar[[d]])
names(treatment.considered) = paste("treat.",seq(from=(d+1),to=(d)),sep="")
names(time.covars.considered) = paste(rep(names(data.frame(timecovar[[d+1]])),2),rep(seq(from=(d+1),to=(d)),each=2),sep=".")
}
}
save(save.weights,file = "~/save.weights.R")
save(boot.indicies,file="~/boot.indicies.R")
save(save.boot.weights,file="~/save.boot.weights.R")
###############################################
# effect estimation
#creating necessary data structures
outcome = currentdata$outcome
offset = currentdata$offset
cum.treat = apply(treatment,1,sum,na.rm=TRUE)
treat.out = data.frame(cum.treat,outcome,offset)
treat.out$outcome.rate = outcome/offset
treat.out.list = apply(boot.indicies,2,function(x,data){data[x,]},data=treat.out)
# function for calculating bootstrapped SE
coef.fxn = function(x,weights,data){
weights = weights[,x]
dat = data[[x]]
fit = glm(outcome~cum.treat+offset(log(offset)),data=dat,weights=weights,family="poisson")
xx = summary(fit)$coef
if(dim(xx)[1]>1){
return(xx[2,1])
}else{
return(NA)
}
}
#####IPW
print("IPW")
outcome.weights = apply(save.weights,1,prod,na.rm=TRUE)
ipw.weights = outcome.weights
ipw.fit = glm(outcome~cum.treat+offset(log(offset)),data=treat.out,family="poisson",weights=outcome.weights)
ipw.boots = apply(matrix(1:W,nrow=1),2,coef.fxn,weights=save.boot.weights,data=treat.out.list)
ipw.preddelta = summary(ipw.fit)$coef[2,1]
ipw.predse = summary(ipw.fit)$coef[2,2]
ipw.bootse = sd(ipw.boots,na.rm=TRUE)
ipw.lower = quantile(ipw.boots,prob=0.025)
ipw.upper = quantile(ipw.boots,prob=0.975)
##################
#overlap weighted regression
print("overlap")
outcome.weights = apply(save.overlap,1,prod,na.rm=TRUE)
overlap.weights = outcome.weights
overlap.fit = glm(outcome~cum.treat+offset(log(offset)),data=treat.out,family="poisson",weights=overlap.weights)
overlap.boots = apply(matrix(1:W,nrow=1),2,coef.fxn,weights=save.boot.overlap,data=treat.out.list)
overlap.preddelta = summary(overlap.fit)$coef[2,1]
overlap.predse = summary(overlap.fit)$coef[2,2]
overlap.bootse = sd(overlap.boots,na.rm=TRUE)
overlap.lower = quantile(overlap.boots,prob=0.025)
overlap.upper = quantile(overlap.boots,prob=0.975)
###############################
#stablized weighting
print("stabilized")
outcome.weights = apply(save.stable,1,prod,na.rm=TRUE)
stabilized.weights = outcome.weights
stable.fit2 = glm(outcome~cum.treat+offset(log(offset)),data=treat.out,family="poisson",weights=outcome.weights)
stable.boots = apply(matrix(1:W,nrow=1),2,coef.fxn,weights=save.boot.stable,data=treat.out.list)
stabilized.preddelta = summary(stable.fit2)$coef[2,1]
stabilized.predse = summary(stable.fit2)$coef[2,2]
stabilized.bootse = sd(stable.boots,na.rm=TRUE)
stabilized.lower = quantile(stable.boots,prob=0.025)
stabilized.upper = quantile(stable.boots,prob=0.975)
####################
#ppta
print("PPTA")
mean.boots = NA
ppta.fail = rep(0,K)
ppta.fit = rep(NA,K)
ppta.boot.fit = matrix(NA,nrow = W,ncol = K)
final.cross.probs = apply(cross.probs,c(1,2),prod)
final.membership = apply(final.cross.probs,2,function(x){rbinom(n,1,prob=x)})
ppta.final.size = apply(final.membership,2,sum)
for(k in 1:K){
is.in = final.membership[,k]
if (sum(is.in)>5){
treat.out.in = treat.out[is.in==1,]
ppta.fit[k] = coef(glm(outcome~cum.treat+offset(log(offset)),data = treat.out.in,family="poisson"))[2]
} else {
ppta.fit[k] = NA
ppta.fail[k] = 1}
}
ppta.fxn = function(x,boot.data){
if(sum(x)>10){
boot.data.in = boot.data[x==1,]
return(coef(glm(outcome ~ cum.treat + offset(log(offset)),data = boot.data.in,family="poisson"))[2])
} else{return(NA)}
}
for(w in 1:W){
#estimate ATE
boot.data = treat.out[boot.indicies[,w],]
boot.is.in = matrix(ppta.boot.save.membership[,,w],nrow=n)
ppta.boot.fit[w,] = apply(boot.is.in,2,ppta.fxn,boot.data = boot.data)
}
ppta.fail.perc = mean(ppta.fail)
ppta.marginal = apply(final.membership,1,mean,na.rm=TRUE)
ppta.once = mean(apply(final.membership,1,max,na.rm=TRUE),na.rm=TRUE)
ppta.preddelta = NA
ppta.predse = NA
ppta.boot.predse = NA
mean.boots = NA
if(any(!is.na(ppta.fit))){
ppta.preddelta = mean(ppta.fit,na.rm=TRUE)
ppta.predse = sd(apply(ppta.boot.fit,1,mean,na.rm=TRUE),na.rm=TRUE)
}
if(any(!is.na(ppta.boot.fit))){
ppta.boot.predse = sd(ppta.boot.fit,na.rm=TRUE)
mean.boots = apply(ppta.boot.fit,1,mean,na.rm=TRUE)
ppta.lower = quantile(mean.boots,0.025)
ppta.upper = quantile(mean.boots,0.975)}
###########################
#save results
result =
list(list(ipw.preddelta,ipw.predse,ipw.bootse,ipw.weights),list(ppta.preddelta,ppta.predse,
ppta.marginal,ppta.once,ppta.size,
ppta.treated.size,ppta.control.size,ppta.fail.perc),list(overlap.preddelta,
overlap.predse,overlap.bootse,overlap.weights),list(stabilized.preddelta,stabilized.predse,stabilized.bootse,stabilized.weights))
save(result,file="~/final_app_result.R")
| /runIPW_boot_app.R | no_license | shirleyxliao/ppta-2019 | R | false | false | 10,509 | r | #loading data
load("~/simulated_app_data.R")
currentdata = new.data
#loading required packages
require(survey)
require(MCMCpack)
expit = function(x){exp(x)/(1+exp(x))}
#parameter specifications
bootstrap=TRUE
K=1000
D = dim(currentdata$treatment)[2]
n = dim(currentdata$fixed.X)[1]
W = 100
# initializing data structures
result = list()
#ipw variables
save.weights = matrix(NA,nrow=n,ncol=D)
save.boot.weights = matrix(1,nrow=n,ncol=W)
#overlap variables
save.overlap = matrix(NA,nrow=n,ncol=D)
save.boot.overlap = matrix(1,nrow=n,ncol=W)
#ppta variables
save.membership = array(NA,dim = c(n,K,D))
ppta.boot.save.membership = array(1,dim=c(n,K,W))
cross.probs = array(NA,dim = c(n,K,D))
ppta.size = matrix(NA,nrow=D,ncol=K)
ppta.treated.size = matrix(NA,nrow=D,ncol=K)
ppta.control.size = matrix(NA,nrow=D,ncol=K)
#stabilized variables
save.stable = matrix(NA,nrow=n,ncol=D)
save.boot.stable = matrix(1,nrow=n,ncol=W)
#specifying input data
treatment = data.frame(currentdata$treatment)
fixedcovar = data.frame(currentdata$fixed.X)
timecovar = currentdata$time.X
#naming input data
names(fixedcovar) = paste("fixed.",names(fixedcovar),sep="")
names(timecovar) = paste("time.",names(timecovar),sep="")
treatment.considered = data.frame(treatment[,1])
names(treatment.considered) = "treat.1"
time.covars.considered = data.frame(timecovar[[1]])
#sampling bootstrap draws
if(bootstrap){
boot.indicies = matrix(sample(1:n,n*W,replace=TRUE),nrow=n,ncol=W)
} else{
W = 1
}
###################################################################
# iterate through time points
for(d in 1:D){
data.matrix = cbind(treatment.considered,fixedcovar,time.covars.considered)
#create datasets with bootstrapped indicies
if(bootstrap){
ppta.boot.data = apply(boot.indicies,2,function(x){data.matrix[x,]})
}
#specify formulas for propensity score model and model for stabilized weights
if(d==1){
no.treat = names(data.matrix)[names(data.matrix)!="treat.1"]
form = paste("treat.1 ~",paste(no.treat,collapse="+"))
form2 = "treat.1~1"
} else {
current.treat = paste("treat.",d,sep="")
no.treat = names(data.matrix)[names(data.matrix)!=current.treat]
form = paste(current.treat,"~", paste(no.treat,collapse="+"))
form2 = paste(current.treat,"~", paste(no.treat[grep("treat*",no.treat)],collapse="+"))
}
####################################################
# ALL WEIGHTING SCHEMES
PS.fit = glm(form,data=data.matrix,family="binomial")
sum.ps.fit = summary(PS.fit)
ipw.PS.est = predict(PS.fit,type="response")
stable.fit = glm(form2,data=data.matrix,family="binomial")
stable.est = predict(stable.fit,type="response")
###calculate ipw/overlap/stable weights
save.weights[,d] = treatment[,d]/ipw.PS.est + (1-treatment[,d])/(1-ipw.PS.est)
save.overlap[,d] = treatment[,d]*(1- ipw.PS.est) + (1-treatment[,d])*ipw.PS.est
save.stable[,d] = treatment[,d]*(stable.est/ipw.PS.est) + (1-treatment[,d])*((1-stable.est)/(1-ipw.PS.est))
### bootstrapped MLE
if(bootstrap){
for (w in 1:W){
boot.treatment = treatment[boot.indicies[,w],]
PS.fit = glm(form,data=ppta.boot.data[[w]],family="binomial")
sum.ps.fit = summary(PS.fit)
PS.est = predict(PS.fit,type="response")
stable.est = predict(glm(form2,data=ppta.boot.data[[w]],family="binomial"),type="response")
save.boot.weights[,w] = save.boot.weights[,w]*(boot.treatment[,d]/PS.est + (1-boot.treatment[,d])/(1-PS.est))
save.boot.stable[,w] = save.boot.stable[,w]*(boot.treatment[,d]*(stable.est/PS.est) + (1-boot.treatment[,d])*((1-stable.est)/(1-PS.est)))
save.boot.overlap[,w] = save.boot.overlap[,w]*(boot.treatment[,d]*(1- PS.est) + (1-boot.treatment[,d])*PS.est)
}
}
###############################################
# PPTA
#draws from PS distribution
posteriors = as.matrix(MCMClogit(form,data=data.matrix,burnin=K/2,mcmc=K*20,thin=20))
save(posteriors,file=paste("~/final_app_",d,"_posteriors.R"))
covars = as.matrix(cbind(rep(1,n),data.matrix[,-1]))
ppta.PS.est = expit(covars%*%t(posteriors)) #PS estimated from draws of alpha
# boostrapping for PPTA
if(bootstrap){
ppta.boot.posteriors = lapply(ppta.boot.data,function(x){as.matrix(MCMClogit(form,data=x,burnin=K/10,mcmc=K*10,thin=10))})
ppta.boot.covars = lapply(ppta.boot.data,function(x){as.matrix(cbind(rep(1,n),x[,-1]))})
ppta.boot.PS = mapply(function(x,y){as.matrix(expit(x%*%t(y)))},x = ppta.boot.covars,y=ppta.boot.posteriors,SIMPLIFY = FALSE)
ppta.boot.PS = simplify2array(ppta.boot.PS)
}
#calculate in/out group
for(k in 1:K){
cross.prob = treatment[,d]*(1-ppta.PS.est[,k]) + (1-treatment[,d])*ppta.PS.est[,k]
cross.probs[,k,d] = cross.prob
if(bootstrap){
ppta.boot.treatment = apply(boot.indicies,2,function(x){treatment[x,d]})
ppta.boot.cross.prob = apply(matrix(1:W,ncol=W),2,function(x){ppta.boot.treatment[,x]*(1-ppta.boot.PS[,k,x])+(1-ppta.boot.treatment[,x])*ppta.boot.PS[,k,x]})
ppta.boot.save.membership[,k,] =ppta.boot.save.membership[,k,]*apply(ppta.boot.cross.prob,2,function(x){rbinom(n,1,x)})
}
}
#updating covariates if not last time point
if(d<D){
treatment.considered = data.frame(treatment[,(d+1)],treatment[,d])
time.covars.considered = data.frame(timecovar[[d+1]],timecovar[[d]])
names(treatment.considered) = paste("treat.",seq(from=(d+1),to=(d)),sep="")
names(time.covars.considered) = paste(rep(names(data.frame(timecovar[[d+1]])),2),rep(seq(from=(d+1),to=(d)),each=2),sep=".")
}
}
save(save.weights,file = "~/save.weights.R")
save(boot.indicies,file="~/boot.indicies.R")
save(save.boot.weights,file="~/save.boot.weights.R")
###############################################
# effect estimation
#creating necessary data structures
outcome = currentdata$outcome
offset = currentdata$offset
cum.treat = apply(treatment,1,sum,na.rm=TRUE)
treat.out = data.frame(cum.treat,outcome,offset)
treat.out$outcome.rate = outcome/offset
treat.out.list = apply(boot.indicies,2,function(x,data){data[x,]},data=treat.out)
# function for calculating bootstrapped SE
coef.fxn = function(x,weights,data){
weights = weights[,x]
dat = data[[x]]
fit = glm(outcome~cum.treat+offset(log(offset)),data=dat,weights=weights,family="poisson")
xx = summary(fit)$coef
if(dim(xx)[1]>1){
return(xx[2,1])
}else{
return(NA)
}
}
#####IPW
print("IPW")
outcome.weights = apply(save.weights,1,prod,na.rm=TRUE)
ipw.weights = outcome.weights
ipw.fit = glm(outcome~cum.treat+offset(log(offset)),data=treat.out,family="poisson",weights=outcome.weights)
ipw.boots = apply(matrix(1:W,nrow=1),2,coef.fxn,weights=save.boot.weights,data=treat.out.list)
ipw.preddelta = summary(ipw.fit)$coef[2,1]
ipw.predse = summary(ipw.fit)$coef[2,2]
ipw.bootse = sd(ipw.boots,na.rm=TRUE)
ipw.lower = quantile(ipw.boots,prob=0.025)
ipw.upper = quantile(ipw.boots,prob=0.975)
##################
#overlap weighted regression
print("overlap")
outcome.weights = apply(save.overlap,1,prod,na.rm=TRUE)
overlap.weights = outcome.weights
overlap.fit = glm(outcome~cum.treat+offset(log(offset)),data=treat.out,family="poisson",weights=overlap.weights)
overlap.boots = apply(matrix(1:W,nrow=1),2,coef.fxn,weights=save.boot.overlap,data=treat.out.list)
overlap.preddelta = summary(overlap.fit)$coef[2,1]
overlap.predse = summary(overlap.fit)$coef[2,2]
overlap.bootse = sd(overlap.boots,na.rm=TRUE)
overlap.lower = quantile(overlap.boots,prob=0.025)
overlap.upper = quantile(overlap.boots,prob=0.975)
###############################
#stablized weighting
print("stabilized")
outcome.weights = apply(save.stable,1,prod,na.rm=TRUE)
stabilized.weights = outcome.weights
stable.fit2 = glm(outcome~cum.treat+offset(log(offset)),data=treat.out,family="poisson",weights=outcome.weights)
stable.boots = apply(matrix(1:W,nrow=1),2,coef.fxn,weights=save.boot.stable,data=treat.out.list)
stabilized.preddelta = summary(stable.fit2)$coef[2,1]
stabilized.predse = summary(stable.fit2)$coef[2,2]
stabilized.bootse = sd(stable.boots,na.rm=TRUE)
stabilized.lower = quantile(stable.boots,prob=0.025)
stabilized.upper = quantile(stable.boots,prob=0.975)
####################
#ppta
print("PPTA")
mean.boots = NA
ppta.fail = rep(0,K)
ppta.fit = rep(NA,K)
ppta.boot.fit = matrix(NA,nrow = W,ncol = K)
final.cross.probs = apply(cross.probs,c(1,2),prod)
final.membership = apply(final.cross.probs,2,function(x){rbinom(n,1,prob=x)})
ppta.final.size = apply(final.membership,2,sum)
for(k in 1:K){
is.in = final.membership[,k]
if (sum(is.in)>5){
treat.out.in = treat.out[is.in==1,]
ppta.fit[k] = coef(glm(outcome~cum.treat+offset(log(offset)),data = treat.out.in,family="poisson"))[2]
} else {
ppta.fit[k] = NA
ppta.fail[k] = 1}
}
ppta.fxn = function(x,boot.data){
if(sum(x)>10){
boot.data.in = boot.data[x==1,]
return(coef(glm(outcome ~ cum.treat + offset(log(offset)),data = boot.data.in,family="poisson"))[2])
} else{return(NA)}
}
for(w in 1:W){
#estimate ATE
boot.data = treat.out[boot.indicies[,w],]
boot.is.in = matrix(ppta.boot.save.membership[,,w],nrow=n)
ppta.boot.fit[w,] = apply(boot.is.in,2,ppta.fxn,boot.data = boot.data)
}
ppta.fail.perc = mean(ppta.fail)
ppta.marginal = apply(final.membership,1,mean,na.rm=TRUE)
ppta.once = mean(apply(final.membership,1,max,na.rm=TRUE),na.rm=TRUE)
ppta.preddelta = NA
ppta.predse = NA
ppta.boot.predse = NA
mean.boots = NA
if(any(!is.na(ppta.fit))){
ppta.preddelta = mean(ppta.fit,na.rm=TRUE)
ppta.predse = sd(apply(ppta.boot.fit,1,mean,na.rm=TRUE),na.rm=TRUE)
}
if(any(!is.na(ppta.boot.fit))){
ppta.boot.predse = sd(ppta.boot.fit,na.rm=TRUE)
mean.boots = apply(ppta.boot.fit,1,mean,na.rm=TRUE)
ppta.lower = quantile(mean.boots,0.025)
ppta.upper = quantile(mean.boots,0.975)}
###########################
#save results
result =
list(list(ipw.preddelta,ipw.predse,ipw.bootse,ipw.weights),list(ppta.preddelta,ppta.predse,
ppta.marginal,ppta.once,ppta.size,
ppta.treated.size,ppta.control.size,ppta.fail.perc),list(overlap.preddelta,
overlap.predse,overlap.bootse,overlap.weights),list(stabilized.preddelta,stabilized.predse,stabilized.bootse,stabilized.weights))
save(result,file="~/final_app_result.R")
|
############################
### function: plotMixture()
############################
plotMixture=function(fit,dist="overall",curve="survival",
xlab=NULL,ylab=NULL,main=NULL,col=NULL,lty=NULL,lwd=1,axes=T){
est=c(fit,curvesLogAFT(fit))
plotNPMLEsurv(est=est,dist=dist,curve=curve,type="l",
xlab=xlab,ylab=ylab,main=main,col=col,lty=lty,lwd=lwd,axes=axes)
}
############################
### function: curvesLogAFT()
############################
curvesLogAFT=function(fit){
n.group=length(fit$groups.obs$labels)
buffer=apply(as.matrix(fit$groups.obs$labels),1,strsplit,split=",")
matrix.cov=matrix(1,length(buffer),1+length(buffer[[1]][[1]]))
for(k in 1:length(buffer)) matrix.cov[k,]=c(1,as.numeric(buffer[[k]][[1]]))
### "p1", "xbeta", "xgamma", "xalpha", "xq"
p1=as.matrix(rep(1,dim(matrix.cov)[1]))
if (!is.null(fit$covariates$beta)){
xbeta=as.matrix(matrix.cov[,fit$covariates$beta])%*%as.vector(fit$par$beta)
p1=exp(xbeta)/(1+exp(xbeta))
if(!is.null(fit$mixturetype)){
index=which(fit$mixturetype==1)
if(length(index)>0) p1[index,]=1
}
}
xgamma=as.matrix(matrix.cov[,fit$covariates$gamma])%*%as.vector(fit$par$gamma)
xalpha=as.matrix(matrix.cov[,fit$covariates$alpha])%*%as.vector(fit$par$alpha)
if(is.null(fit$fix.par$q)){
xq=as.matrix(matrix.cov[,fit$covariates$q])%*%as.vector(fit$par$q)
}else{
xq=matrix(fit$fix.par$q,n.group)
}
### "surv"
time=seq(fit$minmax.time[1],fit$minmax.time[2],by=0.01)
y=log(time-fit$time.origin)
surv=list()
for (i in 1:n.group){
w=(y-xgamma[i])/exp(xalpha[i])
q=rep(xq[i],length(w))
surv1=as.data.frame(matrix(NA,length(time),7))
names(surv1)=c("time","survival","survivalD","density","densityD","hazard","hazardD")
surv1[,"time"]=time
surv1[,"densityD"]=dw.fun(w,q)/(time*exp(xalpha[i]))
surv1[,"survivalD"]=Sw.fun(w,q)
surv1[,"hazardD"]=surv1[,"densityD"]/surv1[,"survivalD"]
index=which(is.na(surv1[,"hazardD"]))
if(length(index)>0){
if(min(index)>1) surv1[index,"hazardD"]=surv1[min(index)-1,"hazardD"]
}
surv1[,"density"]=p1[i]*surv1[,"densityD"]
surv1[,"survival"]=p1[i]*surv1[,"survivalD"]+(1-p1[i])
surv1[,"hazard"]=surv1[,"density"]/surv1[,"survival"]
surv[[i]]=surv1
}
names(surv)=fit$groups.obs$labels
### "PD", "PC"
PD=round(as.vector(p1),3)
PC=1-PD
return(list(surv=surv,PD=PD,PC=PC))
}
| /R/plotMixture.R | no_license | cran/MixtureRegLTIC | R | false | false | 2,487 | r | ############################
### function: plotMixture()
############################
plotMixture=function(fit,dist="overall",curve="survival",
xlab=NULL,ylab=NULL,main=NULL,col=NULL,lty=NULL,lwd=1,axes=T){
est=c(fit,curvesLogAFT(fit))
plotNPMLEsurv(est=est,dist=dist,curve=curve,type="l",
xlab=xlab,ylab=ylab,main=main,col=col,lty=lty,lwd=lwd,axes=axes)
}
############################
### function: curvesLogAFT()
############################
curvesLogAFT=function(fit){
n.group=length(fit$groups.obs$labels)
buffer=apply(as.matrix(fit$groups.obs$labels),1,strsplit,split=",")
matrix.cov=matrix(1,length(buffer),1+length(buffer[[1]][[1]]))
for(k in 1:length(buffer)) matrix.cov[k,]=c(1,as.numeric(buffer[[k]][[1]]))
### "p1", "xbeta", "xgamma", "xalpha", "xq"
p1=as.matrix(rep(1,dim(matrix.cov)[1]))
if (!is.null(fit$covariates$beta)){
xbeta=as.matrix(matrix.cov[,fit$covariates$beta])%*%as.vector(fit$par$beta)
p1=exp(xbeta)/(1+exp(xbeta))
if(!is.null(fit$mixturetype)){
index=which(fit$mixturetype==1)
if(length(index)>0) p1[index,]=1
}
}
xgamma=as.matrix(matrix.cov[,fit$covariates$gamma])%*%as.vector(fit$par$gamma)
xalpha=as.matrix(matrix.cov[,fit$covariates$alpha])%*%as.vector(fit$par$alpha)
if(is.null(fit$fix.par$q)){
xq=as.matrix(matrix.cov[,fit$covariates$q])%*%as.vector(fit$par$q)
}else{
xq=matrix(fit$fix.par$q,n.group)
}
### "surv"
time=seq(fit$minmax.time[1],fit$minmax.time[2],by=0.01)
y=log(time-fit$time.origin)
surv=list()
for (i in 1:n.group){
w=(y-xgamma[i])/exp(xalpha[i])
q=rep(xq[i],length(w))
surv1=as.data.frame(matrix(NA,length(time),7))
names(surv1)=c("time","survival","survivalD","density","densityD","hazard","hazardD")
surv1[,"time"]=time
surv1[,"densityD"]=dw.fun(w,q)/(time*exp(xalpha[i]))
surv1[,"survivalD"]=Sw.fun(w,q)
surv1[,"hazardD"]=surv1[,"densityD"]/surv1[,"survivalD"]
index=which(is.na(surv1[,"hazardD"]))
if(length(index)>0){
if(min(index)>1) surv1[index,"hazardD"]=surv1[min(index)-1,"hazardD"]
}
surv1[,"density"]=p1[i]*surv1[,"densityD"]
surv1[,"survival"]=p1[i]*surv1[,"survivalD"]+(1-p1[i])
surv1[,"hazard"]=surv1[,"density"]/surv1[,"survival"]
surv[[i]]=surv1
}
names(surv)=fit$groups.obs$labels
### "PD", "PC"
PD=round(as.vector(p1),3)
PC=1-PD
return(list(surv=surv,PD=PD,PC=PC))
}
|
library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.35,family="gaussian",standardize=FALSE)
sink('./Model/EN/ReliefF/NSCLC/NSCLC_046.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/ReliefF/NSCLC/NSCLC_046.R | no_license | leon1003/QSMART | R | false | false | 350 | r | library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.35,family="gaussian",standardize=FALSE)
sink('./Model/EN/ReliefF/NSCLC/NSCLC_046.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
setClass("RENAMECOL",
representation = representation(
strEqcCommand = "character",
colInRename = "character",
colOutRename = "character"
),
prototype = prototype(
strEqcCommand = "",
colInRename = "",
colOutRename = ""
)
#contains = c("EcfReader")
)
setGeneric("setRENAMECOL", function(object) standardGeneric("setRENAMECOL"))
setMethod("setRENAMECOL", signature = (object = "RENAMECOL"), function(object) {
aEqcSlotNamesIn = c("colInRename", "colOutRename")
objEqcReader <- EqcReader(object@strEqcCommand,aEqcSlotNamesIn)
if(length(objEqcReader@lsEqcSlotsOut) > 0) {
for(i in 1:length(objEqcReader@lsEqcSlotsOut)) {
tmpSlot <- names(objEqcReader@lsEqcSlotsOut)[i]
tmpSlotVal <- objEqcReader@lsEqcSlotsOut[[i]]
#if(all(!is.na(tmpSlotVal))) slot(object, tmpSlot) <- tmpSlotVal
if(any(!is.na(tmpSlotVal))) slot(object, tmpSlot) <- tmpSlotVal
}
}
return(object)
})
#############################################################################################################################
validRENAMECOL <- function(objRNC) {
if(objRNC@colInRename == "")
stop(paste(" EASY ERROR:RENAMECOL\n No colInRename defined. Please set colInRename.", sep=""))
if(objRNC@colOutRename == "")
stop(paste(" EASY ERROR:RENAMECOL\n No colOutRename defined. Please set colOutRename.", sep=""))
return(TRUE)
}
#############################################################################################################################
RENAMECOL.run <- function(objRNC, objGWA, objREPORT) {
colInRename <- objRNC@colInRename
colOutRename <- objRNC@colOutRename
iMatchIn = match(objRNC@colInRename,objGWA@aHeader)
iMatchOut = match(objRNC@colOutRename,objGWA@aHeader)
isAnyRenamed = FALSE
if(!is.na(iMatchIn) & is.na(iMatchOut)) {
### Column must be renamed, colOut is not in table!
#names(objGWA@tblGWA)[iMatchIn] <- objGWA@aHeader[iMatchIn] <- objRNC@colOutRename
objGWA <- GWADATA.renamecol(objGWA, colInRename, colOutRename)
isAnyRenamed = TRUE
# } else if(!is.na(iMatchIn) & !is.na(iMatchOut)) {
# ### Column colInRename AND colOutRename are already in table
# names(objGWA@tblGWA)[iMatchOut] <- objGWA@aHeader[iMatchOut] <- paste(objGWA@aHeader[iMatchOut],".old",sep="")
# names(objGWA@tblGWA)[iMatchIn] <- objGWA@aHeader[iMatchIn] <- objRNC@colOutRename
# isAnyRenamed = TRUE
} else {
if(!is.na(iMatchOut))
stop(paste("EASY ERROR:RENAMECOL\n New column name \n ",colOutRename,"\n does already exist!!!\n", sep=""))
if(is.na(iMatchIn))
stop(paste("EASY ERROR:RENAMECOL\n Column \n ",colInRename,"\n does not exist!!!\n", sep=""))
}
if(!("numColRenamed" %in% names(objREPORT@tblReport))) objREPORT <- REPORT.addval(objREPORT, "numColRenamed", 0)
else if(REPORT.getval(objREPORT, "numColRenamed") == "NA") objREPORT <- REPORT.addval(objREPORT, "numColRenamed", 0)
if(isAnyRenamed) {
## Update Report numColRenamed = numColRenamed + 1
strNumColRenamed = REPORT.getval(objREPORT, "numColRenamed")
numColRenamedNew = as.numeric(strNumColRenamed) + 1
objREPORT <- REPORT.setval(objREPORT, "numColRenamed", numColRenamedNew)
}
return(list(objGWA,objREPORT))
}
RENAMECOL <- function(strEqcCommand){
## Wrapper for class definition
RENAMECOLout <- setRENAMECOL(new("RENAMECOL", strEqcCommand = strEqcCommand))
validRENAMECOL(RENAMECOLout)
#RENAMECOLout.valid <- validRENAMECOL(RENAMECOLout)
return(RENAMECOLout)
#validECF(ECFout)
#return(ECFout)
## Identical:
# ECFin <- new("ECF5", fileECF = fileECFIn)
# ECFout <- setECF5(ECFin)
# return(ECFout)
}
# setValidity("RENAMECOL", function(object){
# print("RENAMECOL-CHECK")
# print(TRUE)
# return(TRUE)
# })
| /EasyQC/R/clsRENAMECOL3.r | no_license | barbarathorslund/warfarin | R | false | false | 3,862 | r | setClass("RENAMECOL",
representation = representation(
strEqcCommand = "character",
colInRename = "character",
colOutRename = "character"
),
prototype = prototype(
strEqcCommand = "",
colInRename = "",
colOutRename = ""
)
#contains = c("EcfReader")
)
setGeneric("setRENAMECOL", function(object) standardGeneric("setRENAMECOL"))
setMethod("setRENAMECOL", signature = (object = "RENAMECOL"), function(object) {
aEqcSlotNamesIn = c("colInRename", "colOutRename")
objEqcReader <- EqcReader(object@strEqcCommand,aEqcSlotNamesIn)
if(length(objEqcReader@lsEqcSlotsOut) > 0) {
for(i in 1:length(objEqcReader@lsEqcSlotsOut)) {
tmpSlot <- names(objEqcReader@lsEqcSlotsOut)[i]
tmpSlotVal <- objEqcReader@lsEqcSlotsOut[[i]]
#if(all(!is.na(tmpSlotVal))) slot(object, tmpSlot) <- tmpSlotVal
if(any(!is.na(tmpSlotVal))) slot(object, tmpSlot) <- tmpSlotVal
}
}
return(object)
})
#############################################################################################################################
validRENAMECOL <- function(objRNC) {
if(objRNC@colInRename == "")
stop(paste(" EASY ERROR:RENAMECOL\n No colInRename defined. Please set colInRename.", sep=""))
if(objRNC@colOutRename == "")
stop(paste(" EASY ERROR:RENAMECOL\n No colOutRename defined. Please set colOutRename.", sep=""))
return(TRUE)
}
#############################################################################################################################
RENAMECOL.run <- function(objRNC, objGWA, objREPORT) {
colInRename <- objRNC@colInRename
colOutRename <- objRNC@colOutRename
iMatchIn = match(objRNC@colInRename,objGWA@aHeader)
iMatchOut = match(objRNC@colOutRename,objGWA@aHeader)
isAnyRenamed = FALSE
if(!is.na(iMatchIn) & is.na(iMatchOut)) {
### Column must be renamed, colOut is not in table!
#names(objGWA@tblGWA)[iMatchIn] <- objGWA@aHeader[iMatchIn] <- objRNC@colOutRename
objGWA <- GWADATA.renamecol(objGWA, colInRename, colOutRename)
isAnyRenamed = TRUE
# } else if(!is.na(iMatchIn) & !is.na(iMatchOut)) {
# ### Column colInRename AND colOutRename are already in table
# names(objGWA@tblGWA)[iMatchOut] <- objGWA@aHeader[iMatchOut] <- paste(objGWA@aHeader[iMatchOut],".old",sep="")
# names(objGWA@tblGWA)[iMatchIn] <- objGWA@aHeader[iMatchIn] <- objRNC@colOutRename
# isAnyRenamed = TRUE
} else {
if(!is.na(iMatchOut))
stop(paste("EASY ERROR:RENAMECOL\n New column name \n ",colOutRename,"\n does already exist!!!\n", sep=""))
if(is.na(iMatchIn))
stop(paste("EASY ERROR:RENAMECOL\n Column \n ",colInRename,"\n does not exist!!!\n", sep=""))
}
if(!("numColRenamed" %in% names(objREPORT@tblReport))) objREPORT <- REPORT.addval(objREPORT, "numColRenamed", 0)
else if(REPORT.getval(objREPORT, "numColRenamed") == "NA") objREPORT <- REPORT.addval(objREPORT, "numColRenamed", 0)
if(isAnyRenamed) {
## Update Report numColRenamed = numColRenamed + 1
strNumColRenamed = REPORT.getval(objREPORT, "numColRenamed")
numColRenamedNew = as.numeric(strNumColRenamed) + 1
objREPORT <- REPORT.setval(objREPORT, "numColRenamed", numColRenamedNew)
}
return(list(objGWA,objREPORT))
}
RENAMECOL <- function(strEqcCommand){
## Wrapper for class definition
RENAMECOLout <- setRENAMECOL(new("RENAMECOL", strEqcCommand = strEqcCommand))
validRENAMECOL(RENAMECOLout)
#RENAMECOLout.valid <- validRENAMECOL(RENAMECOLout)
return(RENAMECOLout)
#validECF(ECFout)
#return(ECFout)
## Identical:
# ECFin <- new("ECF5", fileECF = fileECFIn)
# ECFout <- setECF5(ECFin)
# return(ECFout)
}
# setValidity("RENAMECOL", function(object){
# print("RENAMECOL-CHECK")
# print(TRUE)
# return(TRUE)
# })
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1EvaluatedAnnotationExplanation}
\alias{GoogleCloudAiplatformV1EvaluatedAnnotationExplanation}
\title{GoogleCloudAiplatformV1EvaluatedAnnotationExplanation Object}
\usage{
GoogleCloudAiplatformV1EvaluatedAnnotationExplanation(
explanationType = NULL,
explanation = NULL
)
}
\arguments{
\item{explanationType}{Explanation type}
\item{explanation}{Explanation attribution response details}
}
\value{
GoogleCloudAiplatformV1EvaluatedAnnotationExplanation object
}
\description{
GoogleCloudAiplatformV1EvaluatedAnnotationExplanation Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Explanation result of the prediction produced by the Model.
}
\concept{GoogleCloudAiplatformV1EvaluatedAnnotationExplanation functions}
| /googleaiplatformv1.auto/man/GoogleCloudAiplatformV1EvaluatedAnnotationExplanation.Rd | no_license | justinjm/autoGoogleAPI | R | false | true | 884 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1EvaluatedAnnotationExplanation}
\alias{GoogleCloudAiplatformV1EvaluatedAnnotationExplanation}
\title{GoogleCloudAiplatformV1EvaluatedAnnotationExplanation Object}
\usage{
GoogleCloudAiplatformV1EvaluatedAnnotationExplanation(
explanationType = NULL,
explanation = NULL
)
}
\arguments{
\item{explanationType}{Explanation type}
\item{explanation}{Explanation attribution response details}
}
\value{
GoogleCloudAiplatformV1EvaluatedAnnotationExplanation object
}
\description{
GoogleCloudAiplatformV1EvaluatedAnnotationExplanation Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Explanation result of the prediction produced by the Model.
}
\concept{GoogleCloudAiplatformV1EvaluatedAnnotationExplanation functions}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EmptyRemoteFileResponse.r
\docType{data}
\name{EmptyRemoteFileResponse}
\alias{EmptyRemoteFileResponse}
\title{EmptyRemoteFileResponse Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
EmptyRemoteFileResponse
}
\description{
EmptyRemoteFileResponse Class
}
\section{Fields}{
\describe{
\item{\code{message}}{success or failure}
\item{\code{result}}{empty result}
\item{\code{status}}{success or failure}
}}
\keyword{datasets}
| /man/EmptyRemoteFileResponse.Rd | permissive | agaveplatform/r-sdk | R | false | true | 539 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EmptyRemoteFileResponse.r
\docType{data}
\name{EmptyRemoteFileResponse}
\alias{EmptyRemoteFileResponse}
\title{EmptyRemoteFileResponse Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
EmptyRemoteFileResponse
}
\description{
EmptyRemoteFileResponse Class
}
\section{Fields}{
\describe{
\item{\code{message}}{success or failure}
\item{\code{result}}{empty result}
\item{\code{status}}{success or failure}
}}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/KeggPathwayView.R
\docType{methods}
\name{arrangePathview}
\alias{arrangePathview}
\title{Kegg pathway view and arrange grobs on page}
\usage{
arrangePathview(
genelist,
pathways = c(),
top = 4,
ncol = 2,
title = NULL,
sub = NULL,
organism = "hsa",
output = ".",
path.archive = ".",
kegg.native = TRUE,
verbose = TRUE
)
}
\arguments{
\item{genelist}{a data frame with columns of ENTREZID, Control and Treatment. The columns
of Control and Treatment represent gene score in Control and Treatment sample.}
\item{pathways}{character vector, the KEGG pathway ID(s), usually 5 digit, may also
include the 3 letter KEGG species code.}
\item{top}{integer, specifying how many top enriched pathways to be visualized.}
\item{ncol}{integer, specifying how many column of figures to be arranged in each page.}
\item{title}{optional string, or grob.}
\item{sub}{optional string, or grob.}
\item{organism}{character, either the kegg code, scientific name or the common name of
the target species. This applies to both pathway and gene.data or cpd.data. When KEGG
ortholog pathway is considered, species="ko". Default species="hsa", it is equivalent
to use either "Homo sapiens" (scientific name) or "human" (common name).}
\item{output}{Path to save plot to.}
\item{path.archive}{character, the directory of KEGG pathway data file (.xml) and image file
(.png). Users may supply their own data files in the same format and naming convention
of KEGG's (species code + pathway id, e.g. hsa04110.xml, hsa04110.png etc) in this
directory. Default kegg.dir="." (current working directory).}
\item{kegg.native}{logical, whether to render pathway graph as native KEGG graph (.png)
or using graphviz layout engine (.pdf). Default kegg.native=TRUE.}
\item{verbose}{Boolean}
}
\value{
plot on the current device
}
\description{
Kegg pathway view and arrange grobs on page.
}
\examples{
file3 = file.path(system.file("extdata", package = "MAGeCKFlute"),
"testdata/mle.gene_summary.txt")
dd = ReadBeta(file3)
colnames(dd)[2:3] = c("Control", "Treatment")
# arrangePathview(dd, c("hsa00534"), title=NULL, sub=NULL, organism="hsa")
}
\author{
Wubing Zhang
}
| /man/arrangePathview.Rd | no_license | sysyangb/MAGeCKFlute | R | false | true | 2,243 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/KeggPathwayView.R
\docType{methods}
\name{arrangePathview}
\alias{arrangePathview}
\title{Kegg pathway view and arrange grobs on page}
\usage{
arrangePathview(
genelist,
pathways = c(),
top = 4,
ncol = 2,
title = NULL,
sub = NULL,
organism = "hsa",
output = ".",
path.archive = ".",
kegg.native = TRUE,
verbose = TRUE
)
}
\arguments{
\item{genelist}{a data frame with columns of ENTREZID, Control and Treatment. The columns
of Control and Treatment represent gene score in Control and Treatment sample.}
\item{pathways}{character vector, the KEGG pathway ID(s), usually 5 digit, may also
include the 3 letter KEGG species code.}
\item{top}{integer, specifying how many top enriched pathways to be visualized.}
\item{ncol}{integer, specifying how many column of figures to be arranged in each page.}
\item{title}{optional string, or grob.}
\item{sub}{optional string, or grob.}
\item{organism}{character, either the kegg code, scientific name or the common name of
the target species. This applies to both pathway and gene.data or cpd.data. When KEGG
ortholog pathway is considered, species="ko". Default species="hsa", it is equivalent
to use either "Homo sapiens" (scientific name) or "human" (common name).}
\item{output}{Path to save plot to.}
\item{path.archive}{character, the directory of KEGG pathway data file (.xml) and image file
(.png). Users may supply their own data files in the same format and naming convention
of KEGG's (species code + pathway id, e.g. hsa04110.xml, hsa04110.png etc) in this
directory. Default kegg.dir="." (current working directory).}
\item{kegg.native}{logical, whether to render pathway graph as native KEGG graph (.png)
or using graphviz layout engine (.pdf). Default kegg.native=TRUE.}
\item{verbose}{Boolean}
}
\value{
plot on the current device
}
\description{
Kegg pathway view and arrange grobs on page.
}
\examples{
file3 = file.path(system.file("extdata", package = "MAGeCKFlute"),
"testdata/mle.gene_summary.txt")
dd = ReadBeta(file3)
colnames(dd)[2:3] = c("Control", "Treatment")
# arrangePathview(dd, c("hsa00534"), title=NULL, sub=NULL, organism="hsa")
}
\author{
Wubing Zhang
}
|
# capitalize words
cw <- function(s, strict = FALSE, onlyfirst = FALSE) {
cap <- function(s) paste(toupper(substring(s,1,1)),
{s <- substring(s,2); if(strict) tolower(s) else s}, sep = "", collapse = " " )
if(!onlyfirst){
sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
} else {
sapply(s, function(x)
paste(toupper(substring(x,1,1)),
tolower(substring(x,2)),
sep="", collapse=" "), USE.NAMES=F)
}
}
# extract pattern from a string
strextract <- function(str, pattern) regmatches(str, regexpr(pattern, str))
# trim space from beginning and end of strings
str_trim_ <- function(str) gsub("^\\s+|\\s+$", "", str)
# remove zero length strings
nozero <- function(x) {
x[nzchar(x)]
}
checker <- function(x, type, len) {
if (!length(x) %in% len)
stop(sprintf("%s input should be of length %s", type, p0c(len)), call. = FALSE)
if (!is.double(x))
stop(sprintf("%s input should be of type double (a number)", type), call. = FALSE)
}
fmtcheck <- function(x) {
if (!is.double(x) || is.na(x)) stop("fmt must be an integer value", call. = FALSE)
if (x < 0 || x > 20) stop("fmt must be 0 and 20", call. = FALSE)
}
# decfmt <- function(pts, fmt) {
# rapply(pts, format, nsmall = fmt, trim = TRUE, how = "list")
# }
centroid <- function(x, center){
if (!is.null(center)) {
stopifnot(is.numeric(center))
return(center)
} else {
if ("geometry" %in% names(x)) {
obj <- x$geometry$coordinates
# tryasnum <- tryCatch(as.numeric(obj), warning = function(w) w)
if (!is(obj, "list")) {
obj
} else {
# sapply(obj, function(z) sapply(z, function(b) b[2]))
lngs <- rapply(obj, function(x) x[1])
# sapply(obj, function(z) sapply(z, function(b) b[1]))
lats <- rapply(obj, function(x) x[2])
c(mean(as.numeric(lngs)), mean(as.numeric(lats)))
}
} else {
c(
mean(as.numeric(sapply(x$coordinates, function(z) sapply(z, function(b) b[2])))),
mean(as.numeric(sapply(x$coordinates, function(z) sapply(z, function(b) b[1]))))
)
}
}
}
check_str <- function(x) {
checklog <- vapply(x, lint, logical(1))
if (!all(checklog)) {
notwkt <- x[!checklog]
notwkt_cs <- vapply(notwkt, class, "")
notwkt[notwkt_cs != "character"] <- notwkt_cs[notwkt_cs != "character"]
notwkt <- paste0(notwkt, collapse = "\n")
stop("The following strings are not WKT:\n", notwkt, call. = FALSE)
}
stopifnot(length(x[[1]]) == 1)
x[[1]]
}
chek_for_pkg <- function(x) {
if (!requireNamespace(x, quietly = TRUE)) {
stop("Please install ", x, call. = FALSE)
} else {
invisible(TRUE)
}
}
| /wellknown/R/zzz.R | no_license | ingted/R-Examples | R | false | false | 2,696 | r | # capitalize words
cw <- function(s, strict = FALSE, onlyfirst = FALSE) {
cap <- function(s) paste(toupper(substring(s,1,1)),
{s <- substring(s,2); if(strict) tolower(s) else s}, sep = "", collapse = " " )
if(!onlyfirst){
sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
} else {
sapply(s, function(x)
paste(toupper(substring(x,1,1)),
tolower(substring(x,2)),
sep="", collapse=" "), USE.NAMES=F)
}
}
# extract pattern from a string
strextract <- function(str, pattern) regmatches(str, regexpr(pattern, str))
# trim space from beginning and end of strings
str_trim_ <- function(str) gsub("^\\s+|\\s+$", "", str)
# remove zero length strings
nozero <- function(x) {
x[nzchar(x)]
}
checker <- function(x, type, len) {
if (!length(x) %in% len)
stop(sprintf("%s input should be of length %s", type, p0c(len)), call. = FALSE)
if (!is.double(x))
stop(sprintf("%s input should be of type double (a number)", type), call. = FALSE)
}
fmtcheck <- function(x) {
if (!is.double(x) || is.na(x)) stop("fmt must be an integer value", call. = FALSE)
if (x < 0 || x > 20) stop("fmt must be 0 and 20", call. = FALSE)
}
# decfmt <- function(pts, fmt) {
# rapply(pts, format, nsmall = fmt, trim = TRUE, how = "list")
# }
centroid <- function(x, center){
if (!is.null(center)) {
stopifnot(is.numeric(center))
return(center)
} else {
if ("geometry" %in% names(x)) {
obj <- x$geometry$coordinates
# tryasnum <- tryCatch(as.numeric(obj), warning = function(w) w)
if (!is(obj, "list")) {
obj
} else {
# sapply(obj, function(z) sapply(z, function(b) b[2]))
lngs <- rapply(obj, function(x) x[1])
# sapply(obj, function(z) sapply(z, function(b) b[1]))
lats <- rapply(obj, function(x) x[2])
c(mean(as.numeric(lngs)), mean(as.numeric(lats)))
}
} else {
c(
mean(as.numeric(sapply(x$coordinates, function(z) sapply(z, function(b) b[2])))),
mean(as.numeric(sapply(x$coordinates, function(z) sapply(z, function(b) b[1]))))
)
}
}
}
check_str <- function(x) {
checklog <- vapply(x, lint, logical(1))
if (!all(checklog)) {
notwkt <- x[!checklog]
notwkt_cs <- vapply(notwkt, class, "")
notwkt[notwkt_cs != "character"] <- notwkt_cs[notwkt_cs != "character"]
notwkt <- paste0(notwkt, collapse = "\n")
stop("The following strings are not WKT:\n", notwkt, call. = FALSE)
}
stopifnot(length(x[[1]]) == 1)
x[[1]]
}
chek_for_pkg <- function(x) {
if (!requireNamespace(x, quietly = TRUE)) {
stop("Please install ", x, call. = FALSE)
} else {
invisible(TRUE)
}
}
|
#' Simplex Projection
#'
#' \code{simplex} returns model statistics computed from given multiple time series
#' using simplex projection. This function performs simplex projections for all
#' possible combinations of \code{E}, \code{tau} and \code{tp}.
#'
#' @inheritParams uic
#' @param lib_var
#' the name or column index of a library (and target) variable.
#' The specified variable is used as a response variable and its time-delay variables are
#' used as explanatory variables.
#' @param alpha
#' the significant level to determine the embedding dimension of reference model
#' (i.e., E0). If \code{alpha = NULL}, E0 is set to E - 1. If \code{0 < alpha < 1}
#' E0 depends on the model results with lower embedding dimensions.
#'
#' @details
#' Transfer entropy is computed as follows:
#' \deqn{
#' \sum_{t} log p(X_{t+tp} | X_{t}, X_{t-\tau}, \ldots, X_{t-(E -1)\tau}, Z_{t}) -
#' log p(X_{t+tp} | X_{t}, X_{t-\tau}, \ldots, X_{t-(E0-1)\tau}, Z_{t})
#' }
#' where \eqn{X} and \eqn{Z} are library and condition variables, respectively.
#'
#' @return
#' A data.frame where each row represents model statistics computed from a parameter set.
#' \tabular{ll}{
#' \code{E} \tab \code{:} embedding dimension \cr
#' \code{E0} \tab \code{:} embedding dimension of reference model \cr
#' \code{tau} \tab \code{:} time-lag \cr
#' \code{tp} \tab \code{:} time prediction horizon \cr
#' \code{nn} \tab \code{:} number of nearest neighbors \cr
#' \code{n_lib} \tab \code{:} number of time indices used for attractor reconstruction \cr
#' \code{n_pred} \tab \code{:} number of time indices used for model predictions \cr
#' \code{rmse} \tab \code{:} root mean squared error \cr
#' \code{te} \tab \code{:} transfer entropy \cr
#' \code{ete} \tab \code{:} effective transfer entropy \cr
#' \code{pval} \tab \code{:} p-value to test alternative hypothesis, te > 0 \cr
#' \code{n_surr} \tab \code{:} number of surrogate data \cr
#' }
#'
#' @seealso \link{xmap}, \link{uic}
#'
#' @examples
#' # simulate logistic map
#' tl <- 400 # time length
#' x <- y <- rep(NA, tl)
#' x[1] <- 0.4
#' y[1] <- 0.2
#' for (t in 1:(tl - 1)) { # causality : x -> y
#' x[t+1] = x[t] * (3.8 - 3.8 * x[t] - 0.0 * y[t])
#' y[t+1] = y[t] * (3.5 - 3.5 * y[t] - 0.1 * x[t])
#' }
#' block <- data.frame(t=1:tl, x=x, y=y)
#'
#' # simplex projecton
#' out0 <- simplex(block, lib_var="x", cond_var="y", E=0:8, tau=1, tp=1)
#' out1 <- simplex(block, lib_var="y", cond_var="x", E=0:8, tau=1, tp=1)
#' par(mfrow=c(2, 1))
#' with(out0, plot(E, te, type="b", pch=c(1,16)[1+(pval<0.05)]))
#' with(out1, plot(E, te, type="b", pch=c(1,16)[1+(pval<0.05)]))
#'
simplex = function (
block, lib = c(1, NROW(block)), pred = lib, group = NULL,
lib_var = 1, cond_var = NULL,
norm = 2, E = 1, tau = 1, tp = 0, nn = "e+1", num_surr = 1000, alpha = NULL,
exclusion_radius = NULL, epsilon = NULL,
is_naive = FALSE, knn_method = c("KD","BF"))
{
if (norm < 1) stop("norm must be >= 1.")
lib <- rbind(lib)
pred <- rbind(pred)
if (length(group) == 0) Group <- rep(1, nrow(block))
if (length(group) != 0) Group <- as.numeric(as.factor(block[,group[1]]))
E <- sort(unique(pmax(0, E)))
tau <- unique(pmax(1, tau))
tp <- unique(tp)
p <- ifelse(is.finite(norm), norm, 0)
num_surr <- pmax(0, num_surr)
KNN <- switch(match.arg(knn_method), "KD"=0, "BF"=1)
if (!is.numeric(nn) & tolower(nn) == "e+1") nn <- 0
if (nn < 0) nn <- -1
if (is.null(alpha)) alpha <- 1
if (is.null(exclusion_radius)) exclusion_radius <- 0
if (is.null(epsilon)) epsilon <- -1
X <- as.matrix(block[ lib_var])
Z <- as.matrix(block[cond_var])
if (alpha >= 1 || num_surr == 0) {
out <- .Call(`_rUIC_npmodel_R`,
X, X, Z, Group, lib, pred, E, E-1, tau, tp, nn, p, num_surr,
exclusion_radius, epsilon, is_naive, 0, KNN)
}
else {
out <- NULL
for (tpi in tp) for (taui in tau) {
E0 <- 0
for (Ei in E) {
outi <- .Call(`_rUIC_npmodel_R`,
X, X, Z, Group, lib, pred, Ei, E0, taui, tpi, nn, p,
num_surr, exclusion_radius, epsilon, is_naive, 0, KNN)
if(outi$pval < alpha) E0 <- Ei
out <- rbind(out, outi)
}
}
}
return(out)
}
# End | /R/simplex.R | no_license | yutakaos/rUIC | R | false | false | 4,385 | r | #' Simplex Projection
#'
#' \code{simplex} returns model statistics computed from given multiple time series
#' using simplex projection. This function performs simplex projections for all
#' possible combinations of \code{E}, \code{tau} and \code{tp}.
#'
#' @inheritParams uic
#' @param lib_var
#' the name or column index of a library (and target) variable.
#' The specified variable is used as a response variable and its time-delay variables are
#' used as explanatory variables.
#' @param alpha
#' the significant level to determine the embedding dimension of reference model
#' (i.e., E0). If \code{alpha = NULL}, E0 is set to E - 1. If \code{0 < alpha < 1}
#' E0 depends on the model results with lower embedding dimensions.
#'
#' @details
#' Transfer entropy is computed as follows:
#' \deqn{
#' \sum_{t} log p(X_{t+tp} | X_{t}, X_{t-\tau}, \ldots, X_{t-(E -1)\tau}, Z_{t}) -
#' log p(X_{t+tp} | X_{t}, X_{t-\tau}, \ldots, X_{t-(E0-1)\tau}, Z_{t})
#' }
#' where \eqn{X} and \eqn{Z} are library and condition variables, respectively.
#'
#' @return
#' A data.frame where each row represents model statistics computed from a parameter set.
#' \tabular{ll}{
#' \code{E} \tab \code{:} embedding dimension \cr
#' \code{E0} \tab \code{:} embedding dimension of reference model \cr
#' \code{tau} \tab \code{:} time-lag \cr
#' \code{tp} \tab \code{:} time prediction horizon \cr
#' \code{nn} \tab \code{:} number of nearest neighbors \cr
#' \code{n_lib} \tab \code{:} number of time indices used for attractor reconstruction \cr
#' \code{n_pred} \tab \code{:} number of time indices used for model predictions \cr
#' \code{rmse} \tab \code{:} root mean squared error \cr
#' \code{te} \tab \code{:} transfer entropy \cr
#' \code{ete} \tab \code{:} effective transfer entropy \cr
#' \code{pval} \tab \code{:} p-value to test alternative hypothesis, te > 0 \cr
#' \code{n_surr} \tab \code{:} number of surrogate data \cr
#' }
#'
#' @seealso \link{xmap}, \link{uic}
#'
#' @examples
#' # simulate logistic map
#' tl <- 400 # time length
#' x <- y <- rep(NA, tl)
#' x[1] <- 0.4
#' y[1] <- 0.2
#' for (t in 1:(tl - 1)) { # causality : x -> y
#' x[t+1] = x[t] * (3.8 - 3.8 * x[t] - 0.0 * y[t])
#' y[t+1] = y[t] * (3.5 - 3.5 * y[t] - 0.1 * x[t])
#' }
#' block <- data.frame(t=1:tl, x=x, y=y)
#'
#' # simplex projecton
#' out0 <- simplex(block, lib_var="x", cond_var="y", E=0:8, tau=1, tp=1)
#' out1 <- simplex(block, lib_var="y", cond_var="x", E=0:8, tau=1, tp=1)
#' par(mfrow=c(2, 1))
#' with(out0, plot(E, te, type="b", pch=c(1,16)[1+(pval<0.05)]))
#' with(out1, plot(E, te, type="b", pch=c(1,16)[1+(pval<0.05)]))
#'
simplex = function (
block, lib = c(1, NROW(block)), pred = lib, group = NULL,
lib_var = 1, cond_var = NULL,
norm = 2, E = 1, tau = 1, tp = 0, nn = "e+1", num_surr = 1000, alpha = NULL,
exclusion_radius = NULL, epsilon = NULL,
is_naive = FALSE, knn_method = c("KD","BF"))
{
if (norm < 1) stop("norm must be >= 1.")
lib <- rbind(lib)
pred <- rbind(pred)
if (length(group) == 0) Group <- rep(1, nrow(block))
if (length(group) != 0) Group <- as.numeric(as.factor(block[,group[1]]))
E <- sort(unique(pmax(0, E)))
tau <- unique(pmax(1, tau))
tp <- unique(tp)
p <- ifelse(is.finite(norm), norm, 0)
num_surr <- pmax(0, num_surr)
KNN <- switch(match.arg(knn_method), "KD"=0, "BF"=1)
if (!is.numeric(nn) & tolower(nn) == "e+1") nn <- 0
if (nn < 0) nn <- -1
if (is.null(alpha)) alpha <- 1
if (is.null(exclusion_radius)) exclusion_radius <- 0
if (is.null(epsilon)) epsilon <- -1
X <- as.matrix(block[ lib_var])
Z <- as.matrix(block[cond_var])
if (alpha >= 1 || num_surr == 0) {
out <- .Call(`_rUIC_npmodel_R`,
X, X, Z, Group, lib, pred, E, E-1, tau, tp, nn, p, num_surr,
exclusion_radius, epsilon, is_naive, 0, KNN)
}
else {
out <- NULL
for (tpi in tp) for (taui in tau) {
E0 <- 0
for (Ei in E) {
outi <- .Call(`_rUIC_npmodel_R`,
X, X, Z, Group, lib, pred, Ei, E0, taui, tpi, nn, p,
num_surr, exclusion_radius, epsilon, is_naive, 0, KNN)
if(outi$pval < alpha) E0 <- Ei
out <- rbind(out, outi)
}
}
}
return(out)
}
# End |
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 1.11830391696877e-156, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615767180-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,803 | r | testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 1.11830391696877e-156, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) |
#' @keywords internal
deconvolve_inputs <- function(inputs, sep, header, out, control, njobs) {
## Helper function to deconvolve all inputs prior to running GIMME
#require(parallel) #should be moved to import in NAMESPACE for R package
#require(foreach)
#require(doSNOW)
#require(doParallel)
if (missing(njobs)) {
njobs <- parallel::detectCores(logical=TRUE) #on hyperthreading CPUs, use all threads (typically 2/core)
message("In deconvolve_inputs, njobs not specified. Defaulting to number of cores: ", njobs, ".")
}
if (njobs > 1) {
setDefaultClusterOptions(master="localhost")
clusterobj <- makeSOCKcluster(njobs)
registerDoSNOW(clusterobj)
#clusterobj <- makeCluster(njobs)
#registerDoParallel(clusterobj)
on.exit(try(stopCluster(clusterobj)))
}
stopifnot(is.numeric(control$TR) && control$TR > 0) #user must specify TR of data for deconvolution to be well specified
if (!exists('method', where=control)) {
message("Defaulting to Bush and Cisler 2013 deconvolution")
control$method <- "bush"
}
if (control$method == "bush") {
if (!exists('nev_lr', where=control)) {
message("nev_lr (learning rate) not specified for Bush deconvolution. Default to .01")
control$nev_lr <- .01 #neural events learning rate
}
if (!exists('epsilon', where=control)) {
message("epsilon not specified for Bush deconvolution. Default to .005")
control$epsilon <- .005 #convergence criterion
}
if (!exists('kernel', where=control)) {
message("HRF kernel not specified for Bush deconvolution. Default to spm double gamma")
control$kernel <- spm_hrf(control$TR)$hrf #default to canonical SPM difference of gammas with specified TR
}
}
if (control$method == "wu") {
if (!exists('threshold', where=control)) {
message("Activation threshold not specified for Wu deconvolution. Default to 1.0 SD")
control$threshold <- 1.0 #SD
}
if (!exists('max_lag', where=control)) {
message("Maximum event-to-neural onset lag not specified for Wu deconvolution. Default to 10 seconds.")
control$max_lag = ceiling(10/control$TR) #10 seconds maximum lag from neural event to HRF onset
}
}
##now proceed on deconvolution
stopifnot(file.exists(out))
deconv.dir <- file.path(out, "deconvolved_inputs")
dir.create(deconv.dir, showWarnings=FALSE)
#deconvolve inputs in parallel (over subjects/inputs)
#for (f in inputs) {
#using doSNOW, need to explicitly export functions and subfunctions for deconvolution to each worker
exportFuncs <- c("deconvolve_nlreg", "sigmoid", "dsigmoid", "generate_feature", "wgr_deconv_canonhrf_par",
"wgr_adjust_onset", "wgr_trigger_onset", "CanonicalBasisSet", "Fit_Canonical_HRF", "get_parameters2",
"spm_hrf", "spm_get_bf", "spm_gamma_bf", "spm_orth")
f = NULL
files <- foreach(f=inputs, .combine=c, .export=exportFuncs, .inorder=TRUE) %dopar% {
stopifnot(file.exists(f))
d <- as.matrix(read.table(f, header=header, sep=sep))
if (control$method == "wu") {
d_deconv <- wgr_deconv_canonhrf_par(d, thr=control$threshold, event_lag_max=control$max_lag, TR=control$TR)$data_deconv #already supports multi-variable (column) deconvolution
} else if (control$method == "bush") {
#parApply? Already parallel above, so would need to rework this or do the %:% nested parallel
d_deconv <- apply(d, 2, deconvolve_nlreg, kernel=control$kernel, nev_lr=control$nev_lr, epsilon=control$epsilon)
}
outfile <- file.path(deconv.dir, paste0(tools::file_path_sans_ext(basename(f)), "_deconvolve.txt"))
write.table(d_deconv, file=outfile, sep=sep, quote=FALSE, row.names=FALSE)
return(outfile)
}
return(files)
}
## R versions of deconvolution algorithms for testing in GIMME
## Ported from MATLAB by Michael Hallquist, September 2015
## R port of Bush and Cisler 2013, Magnetic Resonance Imaging
## Adapted from the original provided by Keith Bush
## Author: Keith Bush, PhD
## Institution: University of Arkansas at Little Rock
## Date: Aug. 9, 2013
deconvolve_nlreg <- function(BOLDobs, kernel, nev_lr=.01, epsilon=.005) {
## Description:
## This function deconvolves the BOLD signal using Bush 2011 method
##
## Inputs:
## BOLDobs - observed BOLD timeseries
## kernel - assumed kernel of the BOLD signal
## nev_lr - learning rate for the assignment of neural events
## epsilon - relative error change (termination condition)
##
## Outputs:
## encoding - reconstructed neural events
## Determine time series length
N = length(BOLDobs)
##Calc simulation steps related to simulation time
K = length(kernel)
A = K - 1 + N
##Termination Params
preverror = 1e9 #previous error
currerror = 0 #current error
##Construct random activation vector (fluctuate slightly around zero between -2e-9 and 2e-9)
activation = rep(2e-9, A)*runif(A) - 1e-9
##Presolve activations to fit target_adjust as encoding
max_hrf_id_adjust = which.max(kernel) - 1 #element of kernel 1 before max
BOLDobs_adjust = BOLDobs[max_hrf_id_adjust:N]
pre_encoding = BOLDobs_adjust - min(BOLDobs_adjust)
pre_encoding = pre_encoding/max(pre_encoding) #unit normalize
encoding = pre_encoding
activation[K:(K-1 + length(BOLDobs_adjust))] = log(pre_encoding/(1-pre_encoding))
while (abs(preverror-currerror) > epsilon) {
##Compute encoding vector
encoding = sigmoid(activation)
##Construct feature space
feature = generate_feature(encoding,K)
##Generate virtual bold response by multiplying feature (N x K) by kernel (K x 1) to get N x 1 estimated response
ytilde = feature[K:nrow(feature),] %*% kernel
##Convert to percent signal change
meanCurrent = mean(ytilde)
brf = ytilde - meanCurrent
brf = brf/meanCurrent
##Compute dEdbrf
dEdbrf = brf - BOLDobs
##Assume normalization does not impact deriv much.
dEdy = dEdbrf
##Precompute derivative components
dEde = diag(K) %*% kernel
back_error = c(rep(0, K-1), dEdy, rep(0, K-1))
##Backpropagate Errors
delta = c()
for (i in 1:A) {
active = activation[i]
deda = dsigmoid(active);
dEda = dEde * deda
this_error = back_error[i:(i-1+K)]
delta = c(delta, sum(dEda * this_error))
}
##Update estimate
activation = activation - nev_lr * delta
##Iterate Learning
preverror = currerror
currerror = sum(dEdbrf^2)
}
## remove the initial timepoints corresponding to HRF (so that returned signal matches in time and length)
encoding <- encoding[K:length(encoding)]
return(encoding)
}
## Support functions
sigmoid <- function(x) {
y <- 1/(1+exp(-x))
return(y)
}
dsigmoid <- function(x) {
y=(1-sigmoid(x))*sigmoid(x)
return(y)
}
generate_feature <- function(encoding, K) {
fmatrix = matrix(0, length(encoding), K)
fmatrix[,1] = encoding
for (i in 2:K) {
fmatrix[,i] = c(rep(0, i-1), encoding[1:(length(encoding) - (i-1))])
}
return(fmatrix)
}
#####
## Wu code
## R port of Wu et al. 2013, Medical Image Analysis
## Adapted from the original provided by Daniele Marinazzo
wgr_deconv_canonhrf_par <- function(data, thr=1.0, event_lag_max, TR) {
### function [data_deconv event HRF adjust_global PARA] = wgr_deconv_canonhrf_par(data,thr,event_lag_max,TR)
### this function implements the method described in
### Wu et al,
### A blind deconvolution approach to recover effective connectivity brain networks from resting state fMRI data,
### Med Image Anal. 2013 Jan 29. pii: S1361-8415(13)00004-2. doi: 10.1016/j.media.2013.01.003
### input
### data, dimensions time points x number of voxels, normalized
### threshold, assuming data are normalized
### event_lag_max: maximum time from neural event to BOLD event in bins, not time
### (e.g. if we assume 10seconds, and TR=2s, set the value to 10/2=5)
### TR is the TR parameter, in seconds.
### Some parts of the code (subfunction: Fit_Canonical_HRF, CanonicalBasisSet, get_parameters2) were modified from the hemodynamic response estimation toolbox(http://www.stat.columbia.edu/~martin/HRF_Est_Toolbox.zip).
###
### the code uses the parallel for loop parfor. In case of older matlab versions, parfor can be changed to standard for.
###
### The default is using canonical hrf and two derivatives, as described in the paper.
### The function can be modified to use instead point processes, FIR model, etc.
##force data to explicit matrix form (time x variables) for proper looping
##this will allow data to be passed in as a 1-D vector for single variable problems
if (!inherits(data, "matrix")) { data <- matrix(data, ncol=1) }
N = nrow(data); nvar = ncol(data)
even_new = wgr_trigger_onset(data,thr)
p_m=3 #define what HRF to fit
## Options: p_m=1 - only canonical HRF
## p_m=2 - canonical + temporal derivative
## p_m=3 - canonical + time and dispersion derivative
T = round(30/TR) ## assume HRF effect lasts 30s.
data_deconv = matrix(0, nrow=N, ncol=nvar)
HRF = matrix(0, nrow=T, ncol=nvar)
PARA = matrix(0, nrow=3, ncol=nvar)
event = list()
event_lag <- rep(0, nvar)
##warning off
##can parallelize over nvar
for (i in 1:nvar) {
out = wgr_adjust_onset(data[,i], even_new[[i]], event_lag_max, TR, p_m, T, N)
data_deconv[,i] <- out$data_deconv
HRF[,i] <- out$hrf
event[[i]] <- out$events
event_lag[i] <- out$event_lag
PARA[,i] <- out$param
}
##warning on
return(list(data_deconv=data_deconv, event=event, HRF=HRF, event_lag=event_lag, PARA=PARA))
}
wgr_adjust_onset <- function(dat,even_new,event_lag_max,TR,p_m,T,N) {
## global adjust.
kk=1 #this is just a 1-based version of the loop iterator event_lag...
hrf = matrix(NA_real_, nrow=T, ncol=event_lag_max+1)
param = matrix(NA_real_, nrow=p_m, ncol=event_lag_max+1)
Cov_E = rep(NA_real_, event_lag_max+1)
for (event_lag in 0:event_lag_max) {
RR = even_new - event_lag; RR = RR[RR >= 0]
design = matrix(0, nrow=N, ncol=1)
design[RR,1] = 1 #add pseudo-events to design matrix
fit = Fit_Canonical_HRF(dat,TR,design,T,p_m);
hrf[,kk] <- fit$hrf
param[,kk] <- fit$param
Cov_E[kk] <- cov(fit$e) #covariance of residual
kk = kk+1;
}
C = min(Cov_E)
ind = which.min(Cov_E)
ad_global = ind - 1 #begin with 0.
even_new = even_new - ad_global
even_new = even_new[even_new>=0]
hrf = hrf[,ind] #keep only best HRF (minimize error of pseudo-event timing)
param = param[,ind] #keep only best params
## linear deconvolution.
H = fft(c(hrf, rep(0, N-T))) ## H=fft([hrf; zeros(N-T,1)]);
M = fft(dat)
data_deconv = Re(fft(Conj(H)*M/(H*Conj(H)+C), inverse=TRUE)/length(H)) ## only keep real part -- there is a tiny imaginary residue in R
return(list(data_deconv=data_deconv, hrf=hrf, events=even_new, event_lag=ad_global, param=param))
}
wgr_trigger_onset <- function(mat, thr) {
##function [oneset] = wgr_trigger_onset(mat,thr)
N = nrow(mat); nvar = ncol(mat)
mat = apply(mat, 2, scale) #z-score columns
oneset <- list()
## Computes pseudo event.
for (i in 1:nvar) {
oneset_temp = c()
for (t in 2:(N-1)) {
if (mat[t,i] > thr && mat[t-1,i] < mat[t,i] && mat[t,i] > mat[t+1,i]) { ## detects threshold
oneset_temp = c(oneset_temp, t)
}
}
oneset[[i]] = oneset_temp
}
return(oneset)
}
#####
## Helper functions for Wu deconvolution algorithm
## Original code from Lindquist and Wager HRF Toolbox
CanonicalBasisSet <- function(TR) {
len = round(30/TR) # 30 secs worth of images
xBF <- list()
xBF$dt = TR
xBF$length= len
xBF$name = 'hrf (with time and dispersion derivatives)'
xBF = spm_get_bf(xBF)
v1 = xBF$bf[1:len,1]
v2 = xBF$bf[1:len,2]
v3 = xBF$bf[1:len,3]
## orthogonalize
h = v1
dh = v2 - (v2 %*% v1/norm(v1, "2")^2)*v1
dh2 = v3 - (v3 %*% v1/norm(v1, "2")^2)*v1 - (v3 %*% dh/norm(dh, "2")^2)*dh
## normalize amplitude
h = h/max(h)
dh = dh/max(dh)
dh2 = dh2/max(dh2)
return(list(h=h, dh=dh, dh2=dh2))
}
Fit_Canonical_HRF <- function(tc, TR, Run, T, p) {
##function [hrf, e, param] = Fit_Canonical_HRF(tc,TR,Run,T,p)
##
## Fits GLM using canonical hrf (with option of using time and dispersion derivatives)';
##
## INPUTS:
##
## tc - time course
## TR - time resolution
## Runs - expermental design
## T - length of estimated HRF
## p - Model type
##
## Options: p=1 - only canonical HRF
## p=2 - canonical + temporal derivative
## p=3 - canonical + time and dispersion derivative
##
## OUTPUTS:
##
## hrf - estimated hemodynamic response function
## fit - estimated time course
## e - residual time course
## param - estimated amplitude, height and width
len = length(Run)
X = matrix(0, nrow=len, ncol=p)
bf = CanonicalBasisSet(TR)
h = bf$h; dh = bf$dh; dh2 = bf$dh2
v = convolve(Run,rev(h), type="open") #this is the R equivalent of conv(Run, h)
X[,1] = v[1:len]
if (p > 1) {
v = convolve(Run,rev(dh), type="open")
X[,2] = v[1:len]
}
if (p > 2) {
v = convolve(Run,rev(dh2), type="open")
X[,3] = v[1:len]
}
X = cbind(rep(1, len), X) #add intercept
b = MASS::ginv(X) %*% tc
e = tc - X %*% b
fit = X %*% b
b = b[2:length(b)]
if (p == 2) {
bc = sign(b[1])*sqrt(b[1]^2 + b[2]^2)
H = cbind(h, dh)
} else if (p == 1) {
bc = b[1]
H = matrix(h, ncol=1)
} else if (p>2) {
bc = sign(b[1])*sqrt(b[1]^2 + b[2]^2 + b[3]^2)
H = cbind(h, dh, dh2)
}
hrf = H %*% b
param = get_parameters2(hrf,T)
return(list(hrf=hrf, e=e, param=param))
}
get_parameters2 <- function(hdrf, t) {
##function [param] = get_parameters2(hdrf,t)
## Find model parameters
##
## Height - h
## Time to peak - p (in time units of TR seconds)
## Width (at half peak) - w
## Calculate Heights and Time to peak:
## n = t(end)*0.6;
n = round(t*0.8)
p = which.max(abs(hdrf[1:n]))
h = hdrf[p]
##if (p > t(end)*0.6), warning('Late time to peak'), end;
if (h > 0) {
v = as.numeric(hdrf >= h/2)
} else {
v = as.numeric(hdrf <= h/2)
}
b = which.min(diff(v))
v[(b+1):length(v)] = 0
w = sum(v)
cnt = p-1
g = hdrf[2:length(hdrf)] - hdrf[1:(length(hdrf)-1)]
while(cnt > 0 && abs(g[cnt]) < 0.001) {
h = hdrf[cnt]
p = cnt
cnt = cnt-1
}
param = rep(0,3)
param[1] = h
param[2] = p
param[3] = w
return(param)
} | /gimme/R/deconvolve_funcs.R | no_license | stlane/gimme | R | false | false | 14,766 | r | #' @keywords internal
deconvolve_inputs <- function(inputs, sep, header, out, control, njobs) {
## Helper function to deconvolve all inputs prior to running GIMME
#require(parallel) #should be moved to import in NAMESPACE for R package
#require(foreach)
#require(doSNOW)
#require(doParallel)
if (missing(njobs)) {
njobs <- parallel::detectCores(logical=TRUE) #on hyperthreading CPUs, use all threads (typically 2/core)
message("In deconvolve_inputs, njobs not specified. Defaulting to number of cores: ", njobs, ".")
}
if (njobs > 1) {
setDefaultClusterOptions(master="localhost")
clusterobj <- makeSOCKcluster(njobs)
registerDoSNOW(clusterobj)
#clusterobj <- makeCluster(njobs)
#registerDoParallel(clusterobj)
on.exit(try(stopCluster(clusterobj)))
}
stopifnot(is.numeric(control$TR) && control$TR > 0) #user must specify TR of data for deconvolution to be well specified
if (!exists('method', where=control)) {
message("Defaulting to Bush and Cisler 2013 deconvolution")
control$method <- "bush"
}
if (control$method == "bush") {
if (!exists('nev_lr', where=control)) {
message("nev_lr (learning rate) not specified for Bush deconvolution. Default to .01")
control$nev_lr <- .01 #neural events learning rate
}
if (!exists('epsilon', where=control)) {
message("epsilon not specified for Bush deconvolution. Default to .005")
control$epsilon <- .005 #convergence criterion
}
if (!exists('kernel', where=control)) {
message("HRF kernel not specified for Bush deconvolution. Default to spm double gamma")
control$kernel <- spm_hrf(control$TR)$hrf #default to canonical SPM difference of gammas with specified TR
}
}
if (control$method == "wu") {
if (!exists('threshold', where=control)) {
message("Activation threshold not specified for Wu deconvolution. Default to 1.0 SD")
control$threshold <- 1.0 #SD
}
if (!exists('max_lag', where=control)) {
message("Maximum event-to-neural onset lag not specified for Wu deconvolution. Default to 10 seconds.")
control$max_lag = ceiling(10/control$TR) #10 seconds maximum lag from neural event to HRF onset
}
}
##now proceed on deconvolution
stopifnot(file.exists(out))
deconv.dir <- file.path(out, "deconvolved_inputs")
dir.create(deconv.dir, showWarnings=FALSE)
#deconvolve inputs in parallel (over subjects/inputs)
#for (f in inputs) {
#using doSNOW, need to explicitly export functions and subfunctions for deconvolution to each worker
exportFuncs <- c("deconvolve_nlreg", "sigmoid", "dsigmoid", "generate_feature", "wgr_deconv_canonhrf_par",
"wgr_adjust_onset", "wgr_trigger_onset", "CanonicalBasisSet", "Fit_Canonical_HRF", "get_parameters2",
"spm_hrf", "spm_get_bf", "spm_gamma_bf", "spm_orth")
f = NULL
files <- foreach(f=inputs, .combine=c, .export=exportFuncs, .inorder=TRUE) %dopar% {
stopifnot(file.exists(f))
d <- as.matrix(read.table(f, header=header, sep=sep))
if (control$method == "wu") {
d_deconv <- wgr_deconv_canonhrf_par(d, thr=control$threshold, event_lag_max=control$max_lag, TR=control$TR)$data_deconv #already supports multi-variable (column) deconvolution
} else if (control$method == "bush") {
#parApply? Already parallel above, so would need to rework this or do the %:% nested parallel
d_deconv <- apply(d, 2, deconvolve_nlreg, kernel=control$kernel, nev_lr=control$nev_lr, epsilon=control$epsilon)
}
outfile <- file.path(deconv.dir, paste0(tools::file_path_sans_ext(basename(f)), "_deconvolve.txt"))
write.table(d_deconv, file=outfile, sep=sep, quote=FALSE, row.names=FALSE)
return(outfile)
}
return(files)
}
## R versions of deconvolution algorithms for testing in GIMME
## Ported from MATLAB by Michael Hallquist, September 2015
## R port of Bush and Cisler 2013, Magnetic Resonance Imaging
## Adapted from the original provided by Keith Bush
## Author: Keith Bush, PhD
## Institution: University of Arkansas at Little Rock
## Date: Aug. 9, 2013
deconvolve_nlreg <- function(BOLDobs, kernel, nev_lr=.01, epsilon=.005) {
## Description:
## This function deconvolves the BOLD signal using Bush 2011 method
##
## Inputs:
## BOLDobs - observed BOLD timeseries
## kernel - assumed kernel of the BOLD signal
## nev_lr - learning rate for the assignment of neural events
## epsilon - relative error change (termination condition)
##
## Outputs:
## encoding - reconstructed neural events
## Determine time series length
N = length(BOLDobs)
##Calc simulation steps related to simulation time
K = length(kernel)
A = K - 1 + N
##Termination Params
preverror = 1e9 #previous error
currerror = 0 #current error
##Construct random activation vector (fluctuate slightly around zero between -2e-9 and 2e-9)
activation = rep(2e-9, A)*runif(A) - 1e-9
##Presolve activations to fit target_adjust as encoding
max_hrf_id_adjust = which.max(kernel) - 1 #element of kernel 1 before max
BOLDobs_adjust = BOLDobs[max_hrf_id_adjust:N]
pre_encoding = BOLDobs_adjust - min(BOLDobs_adjust)
pre_encoding = pre_encoding/max(pre_encoding) #unit normalize
encoding = pre_encoding
activation[K:(K-1 + length(BOLDobs_adjust))] = log(pre_encoding/(1-pre_encoding))
while (abs(preverror-currerror) > epsilon) {
##Compute encoding vector
encoding = sigmoid(activation)
##Construct feature space
feature = generate_feature(encoding,K)
##Generate virtual bold response by multiplying feature (N x K) by kernel (K x 1) to get N x 1 estimated response
ytilde = feature[K:nrow(feature),] %*% kernel
##Convert to percent signal change
meanCurrent = mean(ytilde)
brf = ytilde - meanCurrent
brf = brf/meanCurrent
##Compute dEdbrf
dEdbrf = brf - BOLDobs
##Assume normalization does not impact deriv much.
dEdy = dEdbrf
##Precompute derivative components
dEde = diag(K) %*% kernel
back_error = c(rep(0, K-1), dEdy, rep(0, K-1))
##Backpropagate Errors
delta = c()
for (i in 1:A) {
active = activation[i]
deda = dsigmoid(active);
dEda = dEde * deda
this_error = back_error[i:(i-1+K)]
delta = c(delta, sum(dEda * this_error))
}
##Update estimate
activation = activation - nev_lr * delta
##Iterate Learning
preverror = currerror
currerror = sum(dEdbrf^2)
}
## remove the initial timepoints corresponding to HRF (so that returned signal matches in time and length)
encoding <- encoding[K:length(encoding)]
return(encoding)
}
## Support functions
sigmoid <- function(x) {
y <- 1/(1+exp(-x))
return(y)
}
dsigmoid <- function(x) {
y=(1-sigmoid(x))*sigmoid(x)
return(y)
}
generate_feature <- function(encoding, K) {
fmatrix = matrix(0, length(encoding), K)
fmatrix[,1] = encoding
for (i in 2:K) {
fmatrix[,i] = c(rep(0, i-1), encoding[1:(length(encoding) - (i-1))])
}
return(fmatrix)
}
#####
## Wu code
## R port of Wu et al. 2013, Medical Image Analysis
## Adapted from the original provided by Daniele Marinazzo
wgr_deconv_canonhrf_par <- function(data, thr=1.0, event_lag_max, TR) {
### function [data_deconv event HRF adjust_global PARA] = wgr_deconv_canonhrf_par(data,thr,event_lag_max,TR)
### this function implements the method described in
### Wu et al,
### A blind deconvolution approach to recover effective connectivity brain networks from resting state fMRI data,
### Med Image Anal. 2013 Jan 29. pii: S1361-8415(13)00004-2. doi: 10.1016/j.media.2013.01.003
### input
### data, dimensions time points x number of voxels, normalized
### threshold, assuming data are normalized
### event_lag_max: maximum time from neural event to BOLD event in bins, not time
### (e.g. if we assume 10seconds, and TR=2s, set the value to 10/2=5)
### TR is the TR parameter, in seconds.
### Some parts of the code (subfunction: Fit_Canonical_HRF, CanonicalBasisSet, get_parameters2) were modified from the hemodynamic response estimation toolbox(http://www.stat.columbia.edu/~martin/HRF_Est_Toolbox.zip).
###
### the code uses the parallel for loop parfor. In case of older matlab versions, parfor can be changed to standard for.
###
### The default is using canonical hrf and two derivatives, as described in the paper.
### The function can be modified to use instead point processes, FIR model, etc.
##force data to explicit matrix form (time x variables) for proper looping
##this will allow data to be passed in as a 1-D vector for single variable problems
if (!inherits(data, "matrix")) { data <- matrix(data, ncol=1) }
N = nrow(data); nvar = ncol(data)
even_new = wgr_trigger_onset(data,thr)
p_m=3 #define what HRF to fit
## Options: p_m=1 - only canonical HRF
## p_m=2 - canonical + temporal derivative
## p_m=3 - canonical + time and dispersion derivative
T = round(30/TR) ## assume HRF effect lasts 30s.
data_deconv = matrix(0, nrow=N, ncol=nvar)
HRF = matrix(0, nrow=T, ncol=nvar)
PARA = matrix(0, nrow=3, ncol=nvar)
event = list()
event_lag <- rep(0, nvar)
##warning off
##can parallelize over nvar
for (i in 1:nvar) {
out = wgr_adjust_onset(data[,i], even_new[[i]], event_lag_max, TR, p_m, T, N)
data_deconv[,i] <- out$data_deconv
HRF[,i] <- out$hrf
event[[i]] <- out$events
event_lag[i] <- out$event_lag
PARA[,i] <- out$param
}
##warning on
return(list(data_deconv=data_deconv, event=event, HRF=HRF, event_lag=event_lag, PARA=PARA))
}
wgr_adjust_onset <- function(dat,even_new,event_lag_max,TR,p_m,T,N) {
## global adjust.
kk=1 #this is just a 1-based version of the loop iterator event_lag...
hrf = matrix(NA_real_, nrow=T, ncol=event_lag_max+1)
param = matrix(NA_real_, nrow=p_m, ncol=event_lag_max+1)
Cov_E = rep(NA_real_, event_lag_max+1)
for (event_lag in 0:event_lag_max) {
RR = even_new - event_lag; RR = RR[RR >= 0]
design = matrix(0, nrow=N, ncol=1)
design[RR,1] = 1 #add pseudo-events to design matrix
fit = Fit_Canonical_HRF(dat,TR,design,T,p_m);
hrf[,kk] <- fit$hrf
param[,kk] <- fit$param
Cov_E[kk] <- cov(fit$e) #covariance of residual
kk = kk+1;
}
C = min(Cov_E)
ind = which.min(Cov_E)
ad_global = ind - 1 #begin with 0.
even_new = even_new - ad_global
even_new = even_new[even_new>=0]
hrf = hrf[,ind] #keep only best HRF (minimize error of pseudo-event timing)
param = param[,ind] #keep only best params
## linear deconvolution.
H = fft(c(hrf, rep(0, N-T))) ## H=fft([hrf; zeros(N-T,1)]);
M = fft(dat)
data_deconv = Re(fft(Conj(H)*M/(H*Conj(H)+C), inverse=TRUE)/length(H)) ## only keep real part -- there is a tiny imaginary residue in R
return(list(data_deconv=data_deconv, hrf=hrf, events=even_new, event_lag=ad_global, param=param))
}
wgr_trigger_onset <- function(mat, thr) {
##function [oneset] = wgr_trigger_onset(mat,thr)
N = nrow(mat); nvar = ncol(mat)
mat = apply(mat, 2, scale) #z-score columns
oneset <- list()
## Computes pseudo event.
for (i in 1:nvar) {
oneset_temp = c()
for (t in 2:(N-1)) {
if (mat[t,i] > thr && mat[t-1,i] < mat[t,i] && mat[t,i] > mat[t+1,i]) { ## detects threshold
oneset_temp = c(oneset_temp, t)
}
}
oneset[[i]] = oneset_temp
}
return(oneset)
}
#####
## Helper functions for Wu deconvolution algorithm
## Original code from Lindquist and Wager HRF Toolbox
CanonicalBasisSet <- function(TR) {
len = round(30/TR) # 30 secs worth of images
xBF <- list()
xBF$dt = TR
xBF$length= len
xBF$name = 'hrf (with time and dispersion derivatives)'
xBF = spm_get_bf(xBF)
v1 = xBF$bf[1:len,1]
v2 = xBF$bf[1:len,2]
v3 = xBF$bf[1:len,3]
## orthogonalize
h = v1
dh = v2 - (v2 %*% v1/norm(v1, "2")^2)*v1
dh2 = v3 - (v3 %*% v1/norm(v1, "2")^2)*v1 - (v3 %*% dh/norm(dh, "2")^2)*dh
## normalize amplitude
h = h/max(h)
dh = dh/max(dh)
dh2 = dh2/max(dh2)
return(list(h=h, dh=dh, dh2=dh2))
}
Fit_Canonical_HRF <- function(tc, TR, Run, T, p) {
##function [hrf, e, param] = Fit_Canonical_HRF(tc,TR,Run,T,p)
##
## Fits GLM using canonical hrf (with option of using time and dispersion derivatives)';
##
## INPUTS:
##
## tc - time course
## TR - time resolution
## Runs - expermental design
## T - length of estimated HRF
## p - Model type
##
## Options: p=1 - only canonical HRF
## p=2 - canonical + temporal derivative
## p=3 - canonical + time and dispersion derivative
##
## OUTPUTS:
##
## hrf - estimated hemodynamic response function
## fit - estimated time course
## e - residual time course
## param - estimated amplitude, height and width
len = length(Run)
X = matrix(0, nrow=len, ncol=p)
bf = CanonicalBasisSet(TR)
h = bf$h; dh = bf$dh; dh2 = bf$dh2
v = convolve(Run,rev(h), type="open") #this is the R equivalent of conv(Run, h)
X[,1] = v[1:len]
if (p > 1) {
v = convolve(Run,rev(dh), type="open")
X[,2] = v[1:len]
}
if (p > 2) {
v = convolve(Run,rev(dh2), type="open")
X[,3] = v[1:len]
}
X = cbind(rep(1, len), X) #add intercept
b = MASS::ginv(X) %*% tc
e = tc - X %*% b
fit = X %*% b
b = b[2:length(b)]
if (p == 2) {
bc = sign(b[1])*sqrt(b[1]^2 + b[2]^2)
H = cbind(h, dh)
} else if (p == 1) {
bc = b[1]
H = matrix(h, ncol=1)
} else if (p>2) {
bc = sign(b[1])*sqrt(b[1]^2 + b[2]^2 + b[3]^2)
H = cbind(h, dh, dh2)
}
hrf = H %*% b
param = get_parameters2(hrf,T)
return(list(hrf=hrf, e=e, param=param))
}
get_parameters2 <- function(hdrf, t) {
##function [param] = get_parameters2(hdrf,t)
## Find model parameters
##
## Height - h
## Time to peak - p (in time units of TR seconds)
## Width (at half peak) - w
## Calculate Heights and Time to peak:
## n = t(end)*0.6;
n = round(t*0.8)
p = which.max(abs(hdrf[1:n]))
h = hdrf[p]
##if (p > t(end)*0.6), warning('Late time to peak'), end;
if (h > 0) {
v = as.numeric(hdrf >= h/2)
} else {
v = as.numeric(hdrf <= h/2)
}
b = which.min(diff(v))
v[(b+1):length(v)] = 0
w = sum(v)
cnt = p-1
g = hdrf[2:length(hdrf)] - hdrf[1:(length(hdrf)-1)]
while(cnt > 0 && abs(g[cnt]) < 0.001) {
h = hdrf[cnt]
p = cnt
cnt = cnt-1
}
param = rep(0,3)
param[1] = h
param[2] = p
param[3] = w
return(param)
} |
\name{data.examples}
\alias{data.examples}
\alias{example}
\alias{pop01}
\alias{pop02}
\alias{pop03}
\alias{pop03p}
\alias{pop04}
\alias{pop04p}
\alias{pop05}
\alias{pop05p}
\alias{pop06p}
\alias{pop07}
\alias{pop07p}
\alias{pop07pp}
\alias{bounds}
\docType{data}
\title{Artificial Household Survey Data}
\description{
Example data frames. Allow to run \R code contained in the \sQuote{Examples} section of the \pkg{ReGenesees} package help pages.
}
\usage{data(data.examples)}
\format{
The main data frame, named \code{example}, contains (artificial) data from a two stage stratified cluster sampling design. The sample is made up of 3000 final units, for which the following 21 variables were observed:
\describe{
\item{\code{towcod}}{Code identifying "variance PSUs": towns (PSUs) in not-self-representing (NSR) strata, families (SSUs) in self-representing (SR) strata, \code{numeric}}
\item{\code{famcod}}{Code identifying families (SSUs), \code{numeric}}
\item{\code{key}}{Key identifying final units (individuals), \code{numeric}}
\item{\code{weight}}{Initial weights, \code{numeric}}
\item{\code{stratum}}{Stratification variable, \code{factor} with levels \code{801} \code{802} \code{803} \code{901} \code{902} \code{903} \code{904} \code{905} \code{906} \code{907} \code{908} \code{1001} \code{1002} \code{1003} \code{1004} \code{1005} \code{1006} \code{1007} \code{1008} \code{1009} \code{1101} \code{1102} \code{1103} \code{1104} \code{3001} \code{3002} \code{3003} \code{3004} \code{3005} \code{3006} \code{3007} \code{3008} \code{3009} \code{3010} \code{3011} \code{3012} \code{3101} \code{3102} \code{3103} \code{3104} \code{3105} \code{3106} \code{3107} \code{3108} \code{3201} \code{3202} \code{3203} \code{3204} \code{5401} \code{5402} \code{5403} \code{5404} \code{5405} \code{5406} \code{5407} \code{5408} \code{5409} \code{5410} \code{5411} \code{5412} \code{5413} \code{5414} \code{5415} \code{5416} \code{5501} \code{5502} \code{5503} \code{5504} \code{9301} \code{9302} \code{9303} \code{9304} \code{9305} \code{9306} \code{9307} \code{9308} \code{9309} \code{9310} \code{9311} \code{9312}}
\item{\code{SUPERSTRATUM}}{Collapsed strata variable (eliminates lonely PSUs), \code{factor} with levels \code{1} \code{2} \code{3} \code{4} \code{5} \code{6} \code{7} \code{8} \code{9} \code{10} \code{11} \code{12} \code{13} \code{14} \code{15} \code{16} \code{17} \code{18} \code{19} \code{20} \code{21} \code{22} \code{23} \code{24} \code{25} \code{26} \code{27} \code{28} \code{29} \code{30} \code{31} \code{32} \code{33} \code{34} \code{35} \code{36} \code{37} \code{38} \code{39} \code{40} \code{41} \code{42} \code{43} \code{44} \code{45} \code{46} \code{47} \code{48} \code{49} \code{50} \code{51} \code{52} \code{53} \code{54} \code{55}}
\item{\code{sr}}{Strata type, \code{integer} with values \code{0} (NSR strata) and \code{1} (SR strata)}
\item{\code{regcod}}{Code identifying regions, \code{factor} with levels \code{6} \code{7} \code{10}}
\item{\code{procod}}{Code identifying provinces, \code{factor} with levels \code{8} \code{9} \code{10} \code{11} \code{30} \code{31} \code{32} \code{54} \code{55} \code{93}}
\item{\code{x1}}{Indicator variable (integer), \code{numeric}}
\item{\code{x2}}{Indicator variable (integer), \code{numeric}}
\item{\code{x3}}{Indicator variable (integer), \code{numeric}}
\item{\code{y1}}{Indicator variable (integer), \code{numeric}}
\item{\code{y2}}{Indicator variable (integer), \code{numeric}}
\item{\code{y3}}{Indicator variable (integer), \code{numeric}}
\item{\code{age5c}}{Age variable with 5 classes, \code{factor} with levels \code{1} \code{2} \code{3} \code{4} \code{5}}
\item{\code{age10c}}{Age variable with 10 classes, \code{factor} with levels \code{1} \code{2} \code{3} \code{4} \code{5} \code{6} \code{7} \code{8} \code{9} \code{10}}
\item{\code{sex}}{Sex variable, \code{factor} with levels \code{f} \code{m}}
\item{\code{marstat}}{Marital status variable, \code{factor} with levels \code{married} \code{unmarried} \code{widowed}}
\item{\code{z}}{A continuous quantitative variable, \code{numeric}}
\item{\code{income}}{Income variable, \code{numeric}}
}
}
\details{
Objects \code{pop01}, \ldots, \code{pop07pp} contain known population totals for various calibration models. Object pairs with names differing in the '\code{p}' suffix (such as \code{pop03} and \code{pop03p}) refer to the \emph{same} calibration problem but pertain to \emph{different} solution methods (global and partitioned respectively, see \code{\link{e.calibrate}}). The two-component numeric vector \code{bounds} expresses a possible choice for the allowed range for the ratios between calibrated weights and direct weights in the aforementioned calibration problems.
}
\section{Warning}{
\strong{Data in the \code{example} data frame are artificial.} The \emph{structure} of \code{example} intentionally resembles the one of typical household survey data, but the \emph{values} it stores are unreliable. The only purpose of such data is that they can be fruitfully exploited to illustrate the syntax and the working mechanism of the functions provided by the \pkg{ReGenesees} package.
}
\examples{
data(data.examples)
head(example)
str(example)
}
\keyword{datasets} | /man/data.examples.Rd | no_license | PedroJMA/ReGenesees | R | false | false | 5,305 | rd | \name{data.examples}
\alias{data.examples}
\alias{example}
\alias{pop01}
\alias{pop02}
\alias{pop03}
\alias{pop03p}
\alias{pop04}
\alias{pop04p}
\alias{pop05}
\alias{pop05p}
\alias{pop06p}
\alias{pop07}
\alias{pop07p}
\alias{pop07pp}
\alias{bounds}
\docType{data}
\title{Artificial Household Survey Data}
\description{
Example data frames. Allow to run \R code contained in the \sQuote{Examples} section of the \pkg{ReGenesees} package help pages.
}
\usage{data(data.examples)}
\format{
The main data frame, named \code{example}, contains (artificial) data from a two stage stratified cluster sampling design. The sample is made up of 3000 final units, for which the following 21 variables were observed:
\describe{
\item{\code{towcod}}{Code identifying "variance PSUs": towns (PSUs) in not-self-representing (NSR) strata, families (SSUs) in self-representing (SR) strata, \code{numeric}}
\item{\code{famcod}}{Code identifying families (SSUs), \code{numeric}}
\item{\code{key}}{Key identifying final units (individuals), \code{numeric}}
\item{\code{weight}}{Initial weights, \code{numeric}}
\item{\code{stratum}}{Stratification variable, \code{factor} with levels \code{801} \code{802} \code{803} \code{901} \code{902} \code{903} \code{904} \code{905} \code{906} \code{907} \code{908} \code{1001} \code{1002} \code{1003} \code{1004} \code{1005} \code{1006} \code{1007} \code{1008} \code{1009} \code{1101} \code{1102} \code{1103} \code{1104} \code{3001} \code{3002} \code{3003} \code{3004} \code{3005} \code{3006} \code{3007} \code{3008} \code{3009} \code{3010} \code{3011} \code{3012} \code{3101} \code{3102} \code{3103} \code{3104} \code{3105} \code{3106} \code{3107} \code{3108} \code{3201} \code{3202} \code{3203} \code{3204} \code{5401} \code{5402} \code{5403} \code{5404} \code{5405} \code{5406} \code{5407} \code{5408} \code{5409} \code{5410} \code{5411} \code{5412} \code{5413} \code{5414} \code{5415} \code{5416} \code{5501} \code{5502} \code{5503} \code{5504} \code{9301} \code{9302} \code{9303} \code{9304} \code{9305} \code{9306} \code{9307} \code{9308} \code{9309} \code{9310} \code{9311} \code{9312}}
\item{\code{SUPERSTRATUM}}{Collapsed strata variable (eliminates lonely PSUs), \code{factor} with levels \code{1} \code{2} \code{3} \code{4} \code{5} \code{6} \code{7} \code{8} \code{9} \code{10} \code{11} \code{12} \code{13} \code{14} \code{15} \code{16} \code{17} \code{18} \code{19} \code{20} \code{21} \code{22} \code{23} \code{24} \code{25} \code{26} \code{27} \code{28} \code{29} \code{30} \code{31} \code{32} \code{33} \code{34} \code{35} \code{36} \code{37} \code{38} \code{39} \code{40} \code{41} \code{42} \code{43} \code{44} \code{45} \code{46} \code{47} \code{48} \code{49} \code{50} \code{51} \code{52} \code{53} \code{54} \code{55}}
\item{\code{sr}}{Strata type, \code{integer} with values \code{0} (NSR strata) and \code{1} (SR strata)}
\item{\code{regcod}}{Code identifying regions, \code{factor} with levels \code{6} \code{7} \code{10}}
\item{\code{procod}}{Code identifying provinces, \code{factor} with levels \code{8} \code{9} \code{10} \code{11} \code{30} \code{31} \code{32} \code{54} \code{55} \code{93}}
\item{\code{x1}}{Indicator variable (integer), \code{numeric}}
\item{\code{x2}}{Indicator variable (integer), \code{numeric}}
\item{\code{x3}}{Indicator variable (integer), \code{numeric}}
\item{\code{y1}}{Indicator variable (integer), \code{numeric}}
\item{\code{y2}}{Indicator variable (integer), \code{numeric}}
\item{\code{y3}}{Indicator variable (integer), \code{numeric}}
\item{\code{age5c}}{Age variable with 5 classes, \code{factor} with levels \code{1} \code{2} \code{3} \code{4} \code{5}}
\item{\code{age10c}}{Age variable with 10 classes, \code{factor} with levels \code{1} \code{2} \code{3} \code{4} \code{5} \code{6} \code{7} \code{8} \code{9} \code{10}}
\item{\code{sex}}{Sex variable, \code{factor} with levels \code{f} \code{m}}
\item{\code{marstat}}{Marital status variable, \code{factor} with levels \code{married} \code{unmarried} \code{widowed}}
\item{\code{z}}{A continuous quantitative variable, \code{numeric}}
\item{\code{income}}{Income variable, \code{numeric}}
}
}
\details{
Objects \code{pop01}, \ldots, \code{pop07pp} contain known population totals for various calibration models. Object pairs with names differing in the '\code{p}' suffix (such as \code{pop03} and \code{pop03p}) refer to the \emph{same} calibration problem but pertain to \emph{different} solution methods (global and partitioned respectively, see \code{\link{e.calibrate}}). The two-component numeric vector \code{bounds} expresses a possible choice for the allowed range for the ratios between calibrated weights and direct weights in the aforementioned calibration problems.
}
\section{Warning}{
\strong{Data in the \code{example} data frame are artificial.} The \emph{structure} of \code{example} intentionally resembles the one of typical household survey data, but the \emph{values} it stores are unreliable. The only purpose of such data is that they can be fruitfully exploited to illustrate the syntax and the working mechanism of the functions provided by the \pkg{ReGenesees} package.
}
\examples{
data(data.examples)
head(example)
str(example)
}
\keyword{datasets} |
#! /usr/bin/env Rscript
######################################################################################
## Simulations of thermal death time data & prediction of derived thermal mortality ##
######################################################################################
# Loop with 100 simulations for different thermal thresholds
# output:
# * mort_min_tdtsim_sensors_full: predicted thermal mortality at minute resolution
# * simdata_full: data of larval death time of all the simulations
# * tdtsim_full: tolerance landscape (TDT curves) of all the simulations
# * sensors_minute_spline_: thermal profiles form microclimatic data at minute resolution
# Pacakges ----------------------------------------------------------------
library(data.table)
library(tidyverse)
library(readxl)
library(lubridate)
library(zoo)
library(colorspace)
library(patchwork)
library(future)
library(parallel)
library(furrr)
# Parallellization setup --------------------------------------------------
plan(multicore, workers = availableCores("multicore")-40)
# Functions ---------------------------------------------------------------
source("code/Thermal_landscape_functions_mod.R")
source("code/function_trunk_mortality.R")
# Data --------------------------------------------------------------------
tdt <- read.table("data/TDT_experiment.txt", header = T, sep = ",", dec = ",")
sensor <- read.csv ("data/sensors_hourly.csv")
all.sens <- sensor %>%
mutate(winter_jday = if_else(winter_year == 2016, winter_jday - 1, as.double(winter_jday))) %>%
#To guarantee that day correspondence of 2016 is the same as other years
group_by(sensor, winter_jday, winter_hour) %>%
summarise(ta.h = mean(TEMP, na.rm = T)) %>%
#hourly means across years (as Rezende)
split(.$sensor) %>%
map(.,
~ spline(.$ta.h, n = (nrow(.)-1)*60)) %>%
#Last hour (23 to 24h) of the last days can't be added because we don't know the last temperature (00h)
map(bind_cols) %>%
map(rename,
hour = x,
ta.min = y)
juliandays <- sensor %>%
mutate(winter_jday = if_else(winter_year == 2016, winter_jday - 1, as.double(winter_jday))) %>% #Pq el 2016 tingui la mateixa correspondรจncia de dies que els altres anys
group_by(sensor, winter_jday, winter_hour) %>%
summarise(ta.h = mean(TEMP, na.rm = T)) %>%
group_by(sensor) %>%
distinct(winter_jday) %>%
mutate(day = min_rank(winter_jday),
duration = n_distinct(winter_jday))
# Simulation of thermal death time and calculation of thermal mort --------
## TDT curves from original data
tdt.split <- tdt %>%
filter(!is.na(aprox_minute_dead),
!is.na(SENSOR_mean_temp)) %>%
split(.$sp)
tdt_curve <- tdt.split %>%
map(~ tdt.curve(ta = .$SENSOR_mean_temp, time = .$aprox_minute_dead))
## Simulations & estimates of daily mortality
thres <- c(100, 45, 42.5, 40, 37.5, 35)
sims <- 100
pb <- txtProgressBar(min = 0, max = sims*length(thres), initial = 0, char = "*", style = 3)
for (i in seq_along(thres)) {
for (j in 1:sims) {
## Tolerance landscape of simulated data
simdata <- tdt_curve %>%
map(pluck,
"model") %>%
map(~ data.frame(treatment = rep(c(40, 42, 44), each = 35),
inter = unname(coefficients(.)[1], force = T),
slope = unname(coefficients(.)[2], force = T),
sigma = sigma(.))) %>%
map(mutate,
error = rnorm(n = n(), sd = sigma),
log10time = inter + slope*treatment + error,
time = 10^log10time)
tl.sim <- simdata %>%
map(~ tolerance.landscape(ta = .$treatment, time = .$time))
## Estimates of daily mortality
subtibble <- trunk_mortality(tl = tl.sim, sensdata = all.sens, thres = thres[i]) %>%
mutate(alive = if_else(is.na(alive), 0, alive),
mort = 100-alive,
day = as.numeric(day)) %>%
left_join(juliandays,
by = c("sensor" = "sensor", "day" = "day")) %>%
group_by(sensor, winter_jday, sp_comp) %>%
summarise(mort = max(mort, na.rm = T),
tmax = max(ta, na.rm = T),
tmin = min(ta, na.rm = T),
tmean = mean(ta, na.rm = T),
surv = min(alive, na.rm = T),
thres = thres[i],
simulation = j)
if(i == 1 & j == 1){
fwrite(subtibble,
file = paste0("data/sim_mortalities/mort_min_tdtsim_sensors_full",
Sys.Date(),
".csv"))
simdata %>%
bind_rows(.id = "sp") %>%
select(sp, treatment, time) %>%
mutate(thres = thres[i],
simulation = j) %>%
fwrite(file = paste0("data/sim_mortalities/simdata_full",
Sys.Date(),
".csv"))
tl.sim %>%
map(~ keep(., names(.) %in% c("ctmax", "z"))) %>%
map(bind_rows) %>%
bind_rows(.id = "sp") %>%
mutate(simulation = j,
thres = thres[i]) %>%
fwrite(file = paste0("data/sim_mortalities/tdtsim_full",
Sys.Date(),
".csv"))
} else {
fwrite(subtibble,
file = paste0("data/sim_mortalities/mort_min_tdtsim_sensors_full",
Sys.Date(),
".csv"),
append = T)
simdata %>%
bind_rows(.id = "sp") %>%
select(sp, treatment, time) %>%
mutate(thres = thres[i],
simulation = j) %>%
fwrite(file = paste0("data/sim_mortalities/simdata_full",
Sys.Date(),
".csv"),
append = T)
tl.sim %>%
map(~ keep(., names(.) %in% c("ctmax", "z"))) %>%
map(bind_rows) %>%
bind_rows(.id = "sp") %>%
mutate(simulation = j,
thres = thres[i]) %>%
fwrite(file = paste0("data/sim_mortalities/tdtsim_full",
Sys.Date(),
".csv"),
append = T)
}
setTxtProgressBar(pb, sims*(i-1)+j)
}
}
all.sens %>%
bind_rows(.id = "sensor") %>%
write_rds(file = paste0("data/sim_mortalities/sensors_minute_spline_",
Sys.Date(),
".RDS"))
| /code/thermal_mortality/sim_thermal_mortality.R | no_license | mvives-ingla/ecotones | R | false | false | 6,459 | r | #! /usr/bin/env Rscript
######################################################################################
## Simulations of thermal death time data & prediction of derived thermal mortality ##
######################################################################################
# Loop with 100 simulations for different thermal thresholds
# output:
# * mort_min_tdtsim_sensors_full: predicted thermal mortality at minute resolution
# * simdata_full: data of larval death time of all the simulations
# * tdtsim_full: tolerance landscape (TDT curves) of all the simulations
# * sensors_minute_spline_: thermal profiles form microclimatic data at minute resolution
# Pacakges ----------------------------------------------------------------
library(data.table)
library(tidyverse)
library(readxl)
library(lubridate)
library(zoo)
library(colorspace)
library(patchwork)
library(future)
library(parallel)
library(furrr)
# Parallellization setup --------------------------------------------------
plan(multicore, workers = availableCores("multicore")-40)
# Functions ---------------------------------------------------------------
source("code/Thermal_landscape_functions_mod.R")
source("code/function_trunk_mortality.R")
# Data --------------------------------------------------------------------
tdt <- read.table("data/TDT_experiment.txt", header = T, sep = ",", dec = ",")
sensor <- read.csv ("data/sensors_hourly.csv")
all.sens <- sensor %>%
mutate(winter_jday = if_else(winter_year == 2016, winter_jday - 1, as.double(winter_jday))) %>%
#To guarantee that day correspondence of 2016 is the same as other years
group_by(sensor, winter_jday, winter_hour) %>%
summarise(ta.h = mean(TEMP, na.rm = T)) %>%
#hourly means across years (as Rezende)
split(.$sensor) %>%
map(.,
~ spline(.$ta.h, n = (nrow(.)-1)*60)) %>%
#Last hour (23 to 24h) of the last days can't be added because we don't know the last temperature (00h)
map(bind_cols) %>%
map(rename,
hour = x,
ta.min = y)
juliandays <- sensor %>%
mutate(winter_jday = if_else(winter_year == 2016, winter_jday - 1, as.double(winter_jday))) %>% #Pq el 2016 tingui la mateixa correspondรจncia de dies que els altres anys
group_by(sensor, winter_jday, winter_hour) %>%
summarise(ta.h = mean(TEMP, na.rm = T)) %>%
group_by(sensor) %>%
distinct(winter_jday) %>%
mutate(day = min_rank(winter_jday),
duration = n_distinct(winter_jday))
# Simulation of thermal death time and calculation of thermal mort --------
## TDT curves from original data
tdt.split <- tdt %>%
filter(!is.na(aprox_minute_dead),
!is.na(SENSOR_mean_temp)) %>%
split(.$sp)
tdt_curve <- tdt.split %>%
map(~ tdt.curve(ta = .$SENSOR_mean_temp, time = .$aprox_minute_dead))
## Simulations & estimates of daily mortality
thres <- c(100, 45, 42.5, 40, 37.5, 35)
sims <- 100
pb <- txtProgressBar(min = 0, max = sims*length(thres), initial = 0, char = "*", style = 3)
for (i in seq_along(thres)) {
for (j in 1:sims) {
## Tolerance landscape of simulated data
simdata <- tdt_curve %>%
map(pluck,
"model") %>%
map(~ data.frame(treatment = rep(c(40, 42, 44), each = 35),
inter = unname(coefficients(.)[1], force = T),
slope = unname(coefficients(.)[2], force = T),
sigma = sigma(.))) %>%
map(mutate,
error = rnorm(n = n(), sd = sigma),
log10time = inter + slope*treatment + error,
time = 10^log10time)
tl.sim <- simdata %>%
map(~ tolerance.landscape(ta = .$treatment, time = .$time))
## Estimates of daily mortality
subtibble <- trunk_mortality(tl = tl.sim, sensdata = all.sens, thres = thres[i]) %>%
mutate(alive = if_else(is.na(alive), 0, alive),
mort = 100-alive,
day = as.numeric(day)) %>%
left_join(juliandays,
by = c("sensor" = "sensor", "day" = "day")) %>%
group_by(sensor, winter_jday, sp_comp) %>%
summarise(mort = max(mort, na.rm = T),
tmax = max(ta, na.rm = T),
tmin = min(ta, na.rm = T),
tmean = mean(ta, na.rm = T),
surv = min(alive, na.rm = T),
thres = thres[i],
simulation = j)
if(i == 1 & j == 1){
fwrite(subtibble,
file = paste0("data/sim_mortalities/mort_min_tdtsim_sensors_full",
Sys.Date(),
".csv"))
simdata %>%
bind_rows(.id = "sp") %>%
select(sp, treatment, time) %>%
mutate(thres = thres[i],
simulation = j) %>%
fwrite(file = paste0("data/sim_mortalities/simdata_full",
Sys.Date(),
".csv"))
tl.sim %>%
map(~ keep(., names(.) %in% c("ctmax", "z"))) %>%
map(bind_rows) %>%
bind_rows(.id = "sp") %>%
mutate(simulation = j,
thres = thres[i]) %>%
fwrite(file = paste0("data/sim_mortalities/tdtsim_full",
Sys.Date(),
".csv"))
} else {
fwrite(subtibble,
file = paste0("data/sim_mortalities/mort_min_tdtsim_sensors_full",
Sys.Date(),
".csv"),
append = T)
simdata %>%
bind_rows(.id = "sp") %>%
select(sp, treatment, time) %>%
mutate(thres = thres[i],
simulation = j) %>%
fwrite(file = paste0("data/sim_mortalities/simdata_full",
Sys.Date(),
".csv"),
append = T)
tl.sim %>%
map(~ keep(., names(.) %in% c("ctmax", "z"))) %>%
map(bind_rows) %>%
bind_rows(.id = "sp") %>%
mutate(simulation = j,
thres = thres[i]) %>%
fwrite(file = paste0("data/sim_mortalities/tdtsim_full",
Sys.Date(),
".csv"),
append = T)
}
setTxtProgressBar(pb, sims*(i-1)+j)
}
}
all.sens %>%
bind_rows(.id = "sensor") %>%
write_rds(file = paste0("data/sim_mortalities/sensors_minute_spline_",
Sys.Date(),
".RDS"))
|
\name{E4.4}
\alias{E4.4}
\title{ Measures of Quality for Agencies Delivering Transportation for
the Elderly and the Handicapped }
\concept{Measures of Quality for Agencies Delivering Transportation for the Elderly and the Handicapped }
\usage{data(E4.4)}
\description{
The \code{E4.4} data frame has 40 rows and 3 columns.
}
\format{
This data frame contains the following columns:
\describe{
\item{QUAL}{
a numeric vector, a quality measure made using psychometric methods from results
of questionares.
}
\item{X.1}{
a numeric vector, an indicator variable for private ownership.
}
\item{X.2}{
a numeric vector, an indicator variable for private for profit ownership.
}
}
}
\details{
The quality data, \code{QUAL}, is constructed from questionares given
to users of such services in the state of Illinois. Multiple services
in the state of Illinois was scored using this method. The indicator variables
was constructed to give first (\code{X.1}) a comparison between private
and public services, then (\code{X.2}) a comparison between private
not-for-profit and private for profit services.
}
\source{
Slightly modified version of data supplied by Ms. Claire McKnight of
the Department of Civil Engineering, City University of New York.
}
\examples{
data(E4.4)
summary(E4.4)
}
\keyword{datasets}
\concept{regression}
| /man/E4.4.Rd | no_license | cran/SenSrivastava | R | false | false | 1,347 | rd | \name{E4.4}
\alias{E4.4}
\title{ Measures of Quality for Agencies Delivering Transportation for
the Elderly and the Handicapped }
\concept{Measures of Quality for Agencies Delivering Transportation for the Elderly and the Handicapped }
\usage{data(E4.4)}
\description{
The \code{E4.4} data frame has 40 rows and 3 columns.
}
\format{
This data frame contains the following columns:
\describe{
\item{QUAL}{
a numeric vector, a quality measure made using psychometric methods from results
of questionares.
}
\item{X.1}{
a numeric vector, an indicator variable for private ownership.
}
\item{X.2}{
a numeric vector, an indicator variable for private for profit ownership.
}
}
}
\details{
The quality data, \code{QUAL}, is constructed from questionares given
to users of such services in the state of Illinois. Multiple services
in the state of Illinois was scored using this method. The indicator variables
was constructed to give first (\code{X.1}) a comparison between private
and public services, then (\code{X.2}) a comparison between private
not-for-profit and private for profit services.
}
\source{
Slightly modified version of data supplied by Ms. Claire McKnight of
the Department of Civil Engineering, City University of New York.
}
\examples{
data(E4.4)
summary(E4.4)
}
\keyword{datasets}
\concept{regression}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/caching.R
\name{set.cache.dir}
\alias{set.cache.dir}
\title{Sets the caching directory path}
\usage{
set.cache.dir(path)
}
\arguments{
\item{path}{the path of the caching directory}
}
\description{
Sets the caching directory path
}
| /man/set.cache.dir.Rd | permissive | pydupont/cacheR | R | false | true | 310 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/caching.R
\name{set.cache.dir}
\alias{set.cache.dir}
\title{Sets the caching directory path}
\usage{
set.cache.dir(path)
}
\arguments{
\item{path}{the path of the caching directory}
}
\description{
Sets the caching directory path
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rstatscn.R
\name{statscnRowNamePrefix}
\alias{statscnRowNamePrefix}
\title{statscnRowNamePrefix}
\usage{
statscnRowNamePrefix(p = "nrow")
}
\arguments{
\item{p}{, how to set the rowname prefix.
it is 'nrow' by default , and it is the only supported value currently
to unset the row name prefix, call this function with p=NULL}
}
\value{
no return
}
\description{
set the rowName prefix in the dataframe
}
\details{
in case you encounter the following error:
Error in `row.names<-.data.frame`(`*tmp*`, value = value) :
duplicate 'row.names' are not allowed
you need to call this function
}
| /man/statscnRowNamePrefix.Rd | no_license | jiang-hang/rstatscn | R | false | true | 673 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rstatscn.R
\name{statscnRowNamePrefix}
\alias{statscnRowNamePrefix}
\title{statscnRowNamePrefix}
\usage{
statscnRowNamePrefix(p = "nrow")
}
\arguments{
\item{p}{, how to set the rowname prefix.
it is 'nrow' by default , and it is the only supported value currently
to unset the row name prefix, call this function with p=NULL}
}
\value{
no return
}
\description{
set the rowName prefix in the dataframe
}
\details{
in case you encounter the following error:
Error in `row.names<-.data.frame`(`*tmp*`, value = value) :
duplicate 'row.names' are not allowed
you need to call this function
}
|
library(dplyr)
library(caret)
set.seed(98765)
if (!exists("dataTrain.raw")){dataTrain.raw <- read.csv('data/pml-training.csv')}
if (!exists("dataTest.raw")){dataTest.raw <- read.csv('data/pml-testing.csv')}
# Clean up the column set
pml_data_column_cleanup = function(df){
# Drop purely admin related columns
df <- df %>% select(-starts_with("X"))
df <- df %>% select(-starts_with("raw_timestamp_"))
df <- df %>% select(-starts_with("cvtd_timestamp"))
df <- df %>% select(-starts_with("new_window"))
df <- df %>% select(-starts_with("num_window"))
# Drop columns that relate only to "New Window = Yes"
df <- df %>% select(-starts_with("kurtosis_roll_"))
df <- df %>% select(-starts_with("kurtosis_picth_"))
df <- df %>% select(-starts_with("kurtosis_yaw_"))
df <- df %>% select(-starts_with("skewness_roll_"))
df <- df %>% select(-starts_with("skewness_pitch_"))
df <- df %>% select(-starts_with("skewness_yaw_"))
df <- df %>% select(-starts_with("max_roll_"))
df <- df %>% select(-starts_with("max_picth_"))
df <- df %>% select(-starts_with("max_yaw_"))
df <- df %>% select(-starts_with("min_roll_"))
df <- df %>% select(-starts_with("min_pitch_"))
df <- df %>% select(-starts_with("min_yaw_"))
df <- df %>% select(-starts_with("amplitude_roll_"))
df <- df %>% select(-starts_with("amplitude_pitch_"))
df <- df %>% select(-starts_with("amplitude_yaw_"))
df <- df %>% select(-starts_with("var_total_accel_"))
df <- df %>% select(-starts_with("avg_roll_"))
df <- df %>% select(-starts_with("stddev_roll_"))
df <- df %>% select(-starts_with("var_roll_"))
df <- df %>% select(-starts_with("avg_pitch_"))
df <- df %>% select(-starts_with("stddev_pitch_"))
df <- df %>% select(-starts_with("var_pitch_"))
df <- df %>% select(-starts_with("avg_yaw_"))
df <- df %>% select(-starts_with("stddev_yaw_"))
df <- df %>% select(-starts_with("var_yaw_"))
df <- df %>% select(-starts_with("var_accel_"))
df
}
dataTrain <- pml_data_column_cleanup(dataTrain.raw)
dataTest <- pml_data_column_cleanup(dataTest.raw)
if (!exists("dataTrain.model")){
# Partition our data 75% training and 25% testing
dataTrain.partition <- createDataPartition(y=dataTrain$classe, p=0.75, list=FALSE)
dataTrain.train <- dataTrain[dataTrain.partition,]
dataTrain.test <- dataTrain[-dataTrain.partition,]
# Implemented a Random Forest (limited to depth 100) training strategy
dataTrain.model <- train(classe~., data=dataTrain.train, method="rf", ntree=100)
}
# Train Test prediction
dataTrain.test.predict <- predict(dataTrain.model, newdata=dataTrain.test)
# Test prediction (Validation)
dataTest.predict <- predict(dataTrain.model, newdata=dataTest)
# print(dataTrain.test.predict)
print(dataTest.predict)
# Calculate outside error rate and accuracy of the validation
outsideErrorRate.accuracy <- sum(dataTrain.test.predict == dataTrain.test$classe)/length(dataTrain.test.predict)
outsideErrorRate.error <- (1 - outsideErrorRate.accuracy)
print(
ggplot(dataTrain.model) + ggtitle("Accuracy vs. Predictor")
)
print(dataTrain.model)
# Wrap up with the answers for the submission
pml_write_files = function(x){
dir.create(file.path(getwd(), 'submission'), showWarnings = FALSE)
n = length(x)
for(i in 1:n){
filename = paste0("submission/problem_id_", i, ".txt")
write.table(x[i], file=filename, quote=FALSE, row.names=FALSE, col.names=FALSE)
}
}
pml_write_files(as.character(dataTest.predict)) | /Review.R | no_license | schonken/Practical_Machine_Learning_Project | R | false | false | 3,502 | r | library(dplyr)
library(caret)
set.seed(98765)
if (!exists("dataTrain.raw")){dataTrain.raw <- read.csv('data/pml-training.csv')}
if (!exists("dataTest.raw")){dataTest.raw <- read.csv('data/pml-testing.csv')}
# Clean up the column set
pml_data_column_cleanup = function(df){
# Drop purely admin related columns
df <- df %>% select(-starts_with("X"))
df <- df %>% select(-starts_with("raw_timestamp_"))
df <- df %>% select(-starts_with("cvtd_timestamp"))
df <- df %>% select(-starts_with("new_window"))
df <- df %>% select(-starts_with("num_window"))
# Drop columns that relate only to "New Window = Yes"
df <- df %>% select(-starts_with("kurtosis_roll_"))
df <- df %>% select(-starts_with("kurtosis_picth_"))
df <- df %>% select(-starts_with("kurtosis_yaw_"))
df <- df %>% select(-starts_with("skewness_roll_"))
df <- df %>% select(-starts_with("skewness_pitch_"))
df <- df %>% select(-starts_with("skewness_yaw_"))
df <- df %>% select(-starts_with("max_roll_"))
df <- df %>% select(-starts_with("max_picth_"))
df <- df %>% select(-starts_with("max_yaw_"))
df <- df %>% select(-starts_with("min_roll_"))
df <- df %>% select(-starts_with("min_pitch_"))
df <- df %>% select(-starts_with("min_yaw_"))
df <- df %>% select(-starts_with("amplitude_roll_"))
df <- df %>% select(-starts_with("amplitude_pitch_"))
df <- df %>% select(-starts_with("amplitude_yaw_"))
df <- df %>% select(-starts_with("var_total_accel_"))
df <- df %>% select(-starts_with("avg_roll_"))
df <- df %>% select(-starts_with("stddev_roll_"))
df <- df %>% select(-starts_with("var_roll_"))
df <- df %>% select(-starts_with("avg_pitch_"))
df <- df %>% select(-starts_with("stddev_pitch_"))
df <- df %>% select(-starts_with("var_pitch_"))
df <- df %>% select(-starts_with("avg_yaw_"))
df <- df %>% select(-starts_with("stddev_yaw_"))
df <- df %>% select(-starts_with("var_yaw_"))
df <- df %>% select(-starts_with("var_accel_"))
df
}
dataTrain <- pml_data_column_cleanup(dataTrain.raw)
dataTest <- pml_data_column_cleanup(dataTest.raw)
if (!exists("dataTrain.model")){
# Partition our data 75% training and 25% testing
dataTrain.partition <- createDataPartition(y=dataTrain$classe, p=0.75, list=FALSE)
dataTrain.train <- dataTrain[dataTrain.partition,]
dataTrain.test <- dataTrain[-dataTrain.partition,]
# Implemented a Random Forest (limited to depth 100) training strategy
dataTrain.model <- train(classe~., data=dataTrain.train, method="rf", ntree=100)
}
# Train Test prediction
dataTrain.test.predict <- predict(dataTrain.model, newdata=dataTrain.test)
# Test prediction (Validation)
dataTest.predict <- predict(dataTrain.model, newdata=dataTest)
# print(dataTrain.test.predict)
print(dataTest.predict)
# Calculate outside error rate and accuracy of the validation
outsideErrorRate.accuracy <- sum(dataTrain.test.predict == dataTrain.test$classe)/length(dataTrain.test.predict)
outsideErrorRate.error <- (1 - outsideErrorRate.accuracy)
print(
ggplot(dataTrain.model) + ggtitle("Accuracy vs. Predictor")
)
print(dataTrain.model)
# Wrap up with the answers for the submission
pml_write_files = function(x){
dir.create(file.path(getwd(), 'submission'), showWarnings = FALSE)
n = length(x)
for(i in 1:n){
filename = paste0("submission/problem_id_", i, ".txt")
write.table(x[i], file=filename, quote=FALSE, row.names=FALSE, col.names=FALSE)
}
}
pml_write_files(as.character(dataTest.predict)) |
tabOthers <- f7Tab(
tabName = "Others",
icon = f7Icon("more_round"),
f7Align(
side = "center",
h1("miniUI 2.0 brings other elements")
),
# skeletons
f7BlockTitle(title = "f7Skeleton") %>% f7Align(side = "center"),
f7List(
f7ListItem(title = "Item 1"),
f7ListItem(title = "Item 2")
) %>% f7Skeleton(duration = 5000),
br(),
# Messages
f7BlockTitle(title = "f7Messages") %>% f7Align(side = "center"),
f7Messages(
id = "messagelist",
f7Message(
"Lorem ipsum dolor sit amet,
consectetur adipiscing elit.
Duo Reges: constructio interrete",
src = "https://cdn.framework7.io/placeholder/people-100x100-7.jpg",
author = "David",
date = "2019-09-12",
state = "received",
type = "text"
),
f7Message(
"https://cdn.framework7.io/placeholder/cats-200x260-4.jpg",
src = "https://cdn.framework7.io/placeholder/people-100x100-9.jpg",
author = "Lia",
date = NULL,
state = "sent",
type = "img"
),
f7Message(
"Hi Bro",
src = "https://cdn.framework7.io/placeholder/people-100x100-9.jpg",
author = NULL,
date = "2019-08-15",
state = "sent",
type = "text"
)
),
br(),
# Badges
f7BlockTitle(title = "f7Badge") %>% f7Align(side = "center"),
f7Block(
strong = TRUE,
f7Badge(32, color = "purple"),
f7Badge("Badge", color = "green"),
f7Badge(10, color = "teal"),
f7Badge("Ok", color = "orange")
),
br(),
# chips
f7BlockTitle(title = "f7Chip") %>% f7Align(side = "center"),
f7Block(
strong = TRUE,
f7Chip(label = "Example Chip"),
f7Chip(label = "Example Chip", outline = TRUE),
f7Chip(label = "Example Chip", icon = f7Icon("add_round"), icon_status = "pink"),
f7Chip(label = "Example Chip", img = "https://lorempixel.com/64/64/people/9/"),
f7Chip(label = "Example Chip", closable = TRUE),
f7Chip(label = "Example Chip", status = "green"),
f7Chip(label = "Example Chip", status = "green", outline = TRUE)
),
br(),
# accordion
f7BlockTitle(title = "f7Accordion") %>% f7Align(side = "center"),
f7Accordion(
inputId = "accordion1",
f7AccordionItem(
title = "Item 1",
f7Block("Item 1 content")
),
f7AccordionItem(
title = "Item 2",
f7Block("Item 2 content")
)
),
f7Toggle(
inputId = "goAccordion",
label = "Toggle accordion item 1",
color = "orange"
),
br(),
# swiper
f7BlockTitle(title = "f7Swiper") %>% f7Align(side = "center"),
f7Swiper(
id = "my-swiper",
f7Slide(
plot_ly(z = ~volcano, type = "contour")
),
f7Slide(
plot_ly(data = iris, x = ~Sepal.Length, y = ~Petal.Length)
)
),
br(), br(), br(),
# timelines
f7BlockTitle(title = "f7PhotoBrowser") %>% f7Align(side = "center"),
f7Block(
f7PhotoBrowser(
id = "photobrowser1",
label = "Open",
theme = "light",
type = "standalone",
photos = c(
"https://cdn.framework7.io/placeholder/sports-1024x1024-1.jpg",
"https://cdn.framework7.io/placeholder/sports-1024x1024-2.jpg",
"https://cdn.framework7.io/placeholder/sports-1024x1024-3.jpg"
)
)
),
br(), br(),
# timelines
f7BlockTitle(title = "f7Timeline") %>% f7Align(side = "center"),
f7Timeline(
sides = TRUE,
f7TimelineItem(
"Another text",
date = "01 Dec",
card = FALSE,
time = "12:30",
title = "Title",
subtitle = "Subtitle",
side = "left"
),
f7TimelineItem(
"Another text",
date = "02 Dec",
card = TRUE,
time = "13:00",
title = "Title",
subtitle = "Subtitle"
),
f7TimelineItem(
"Another text",
date = "03 Dec",
card = FALSE,
time = "14:45",
title = "Title",
subtitle = "Subtitle"
)
),
br(),
# progress bars
f7BlockTitle(title = "f7Progress") %>% f7Align(side = "center"),
f7Block(
strong = TRUE,
f7Progress(id = "pg1", value = 10, color = "yellow"),
f7Slider(
inputId = "updatepg1",
label = "Update progress 1",
max = 100,
min = 0,
value = 50,
scale = TRUE
),
br(),
f7Progress(id = "pg2", value = 100, color = "green"),
br(),
f7Progress(id = "pg3", value = 50, color = "deeppurple"),
br(),
f7ProgressInf()
),
br(),
# gauges
f7BlockTitle(title = "f7Gauge") %>% f7Align(side = "center"),
f7Block(
strong = TRUE,
f7Row(
f7Col(
f7Gauge(
id = "mygauge1",
type = "semicircle",
value = 50,
borderColor = "#2196f3",
borderWidth = 10,
valueText = "50%",
valueFontSize = 41,
valueTextColor = "#2196f3",
labelText = "amount of something"
)
),
f7Col(
f7Gauge(
id = "mygauge2",
type = "circle",
value = 30,
borderColor = "orange",
borderWidth = 10,
valueText = "30%",
valueFontSize = 41,
valueTextColor = "orange",
labelText = "Other thing"
)
)
),
f7Stepper(
inputId = "updategauge1",
label = "Update gauge 1",
step = 10,
min = 0,
max = 100,
value = 50
)
),
# update f7Panel
br(),
f7BlockTitle(title = "updateF7Panel") %>% f7Align(side = "center"),
f7Block(
f7Button(
inputId = "goPanel",
label = "Toggle left panel"
)
)
)
| /inst/examples/gallery/tabs/tabOthers.R | no_license | iMarcello/shinyMobile | R | false | false | 5,529 | r | tabOthers <- f7Tab(
tabName = "Others",
icon = f7Icon("more_round"),
f7Align(
side = "center",
h1("miniUI 2.0 brings other elements")
),
# skeletons
f7BlockTitle(title = "f7Skeleton") %>% f7Align(side = "center"),
f7List(
f7ListItem(title = "Item 1"),
f7ListItem(title = "Item 2")
) %>% f7Skeleton(duration = 5000),
br(),
# Messages
f7BlockTitle(title = "f7Messages") %>% f7Align(side = "center"),
f7Messages(
id = "messagelist",
f7Message(
"Lorem ipsum dolor sit amet,
consectetur adipiscing elit.
Duo Reges: constructio interrete",
src = "https://cdn.framework7.io/placeholder/people-100x100-7.jpg",
author = "David",
date = "2019-09-12",
state = "received",
type = "text"
),
f7Message(
"https://cdn.framework7.io/placeholder/cats-200x260-4.jpg",
src = "https://cdn.framework7.io/placeholder/people-100x100-9.jpg",
author = "Lia",
date = NULL,
state = "sent",
type = "img"
),
f7Message(
"Hi Bro",
src = "https://cdn.framework7.io/placeholder/people-100x100-9.jpg",
author = NULL,
date = "2019-08-15",
state = "sent",
type = "text"
)
),
br(),
# Badges
f7BlockTitle(title = "f7Badge") %>% f7Align(side = "center"),
f7Block(
strong = TRUE,
f7Badge(32, color = "purple"),
f7Badge("Badge", color = "green"),
f7Badge(10, color = "teal"),
f7Badge("Ok", color = "orange")
),
br(),
# chips
f7BlockTitle(title = "f7Chip") %>% f7Align(side = "center"),
f7Block(
strong = TRUE,
f7Chip(label = "Example Chip"),
f7Chip(label = "Example Chip", outline = TRUE),
f7Chip(label = "Example Chip", icon = f7Icon("add_round"), icon_status = "pink"),
f7Chip(label = "Example Chip", img = "https://lorempixel.com/64/64/people/9/"),
f7Chip(label = "Example Chip", closable = TRUE),
f7Chip(label = "Example Chip", status = "green"),
f7Chip(label = "Example Chip", status = "green", outline = TRUE)
),
br(),
# accordion
f7BlockTitle(title = "f7Accordion") %>% f7Align(side = "center"),
f7Accordion(
inputId = "accordion1",
f7AccordionItem(
title = "Item 1",
f7Block("Item 1 content")
),
f7AccordionItem(
title = "Item 2",
f7Block("Item 2 content")
)
),
f7Toggle(
inputId = "goAccordion",
label = "Toggle accordion item 1",
color = "orange"
),
br(),
# swiper
f7BlockTitle(title = "f7Swiper") %>% f7Align(side = "center"),
f7Swiper(
id = "my-swiper",
f7Slide(
plot_ly(z = ~volcano, type = "contour")
),
f7Slide(
plot_ly(data = iris, x = ~Sepal.Length, y = ~Petal.Length)
)
),
br(), br(), br(),
# timelines
f7BlockTitle(title = "f7PhotoBrowser") %>% f7Align(side = "center"),
f7Block(
f7PhotoBrowser(
id = "photobrowser1",
label = "Open",
theme = "light",
type = "standalone",
photos = c(
"https://cdn.framework7.io/placeholder/sports-1024x1024-1.jpg",
"https://cdn.framework7.io/placeholder/sports-1024x1024-2.jpg",
"https://cdn.framework7.io/placeholder/sports-1024x1024-3.jpg"
)
)
),
br(), br(),
# timelines
f7BlockTitle(title = "f7Timeline") %>% f7Align(side = "center"),
f7Timeline(
sides = TRUE,
f7TimelineItem(
"Another text",
date = "01 Dec",
card = FALSE,
time = "12:30",
title = "Title",
subtitle = "Subtitle",
side = "left"
),
f7TimelineItem(
"Another text",
date = "02 Dec",
card = TRUE,
time = "13:00",
title = "Title",
subtitle = "Subtitle"
),
f7TimelineItem(
"Another text",
date = "03 Dec",
card = FALSE,
time = "14:45",
title = "Title",
subtitle = "Subtitle"
)
),
br(),
# progress bars
f7BlockTitle(title = "f7Progress") %>% f7Align(side = "center"),
f7Block(
strong = TRUE,
f7Progress(id = "pg1", value = 10, color = "yellow"),
f7Slider(
inputId = "updatepg1",
label = "Update progress 1",
max = 100,
min = 0,
value = 50,
scale = TRUE
),
br(),
f7Progress(id = "pg2", value = 100, color = "green"),
br(),
f7Progress(id = "pg3", value = 50, color = "deeppurple"),
br(),
f7ProgressInf()
),
br(),
# gauges
f7BlockTitle(title = "f7Gauge") %>% f7Align(side = "center"),
f7Block(
strong = TRUE,
f7Row(
f7Col(
f7Gauge(
id = "mygauge1",
type = "semicircle",
value = 50,
borderColor = "#2196f3",
borderWidth = 10,
valueText = "50%",
valueFontSize = 41,
valueTextColor = "#2196f3",
labelText = "amount of something"
)
),
f7Col(
f7Gauge(
id = "mygauge2",
type = "circle",
value = 30,
borderColor = "orange",
borderWidth = 10,
valueText = "30%",
valueFontSize = 41,
valueTextColor = "orange",
labelText = "Other thing"
)
)
),
f7Stepper(
inputId = "updategauge1",
label = "Update gauge 1",
step = 10,
min = 0,
max = 100,
value = 50
)
),
# update f7Panel
br(),
f7BlockTitle(title = "updateF7Panel") %>% f7Align(side = "center"),
f7Block(
f7Button(
inputId = "goPanel",
label = "Toggle left panel"
)
)
)
|
##Merges expression and DHS data
##Generates plots to check the association between DHS+ LTRs and gene expression
setwd('~/Deniz_2019_AML/Expression')
library(vioplot)
##expression data
fpkm = read.delim('BP_RNA_FPKM.txt.gz',as.is=T)
##DHS data
dhs = read.delim('../DHS_analysis/allLTR_DHS_overlaps.txt',as.is=T)
######################
#### prepare data ####
##get average expression values for cell groups
hsc = rowMeans(log2(fpkm[,grep('stem.cell',colnames(fpkm))]+0.01))
mono = rowMeans(log2(fpkm[,grep('monocyte',colnames(fpkm))]+0.01))
macro = rowMeans(log2(fpkm[,grep('macrophage',colnames(fpkm))]+0.01))
aml = rowMeans(log2(fpkm[,grep('Leukemia',colnames(fpkm))]+0.01))
##make matching dhs and expression matrices for AML samples
meta = read.delim('../blueprint_files.tsv',as.is=T)
dhs.meta = meta[meta$Experiment=='DNase-Seq' & meta$Format=='BED' & meta$Sub.group=='Acute Myeloid Leukemia',]
for (i in 1:ncol(dhs)) {
id = grep(colnames(dhs)[i],dhs.meta$URL)[1]
if (!is.na(id)) colnames(dhs)[i] = dhs.meta$Donor[id]
}
expr.donor = gsub('Acute.Myeloid.Leukemia.','',colnames(fpkm))
aml.dhs = as.matrix(dhs[,colnames(dhs) %in% expr.donor]>=1)
aml.expr = log2(as.matrix(fpkm[,match(colnames(aml.dhs),expr.donor)])+0.01)
##get average expression values from dhs and non-dhs AML groups
av.dhs = numeric(nrow(aml.expr))
av.nodhs = numeric(nrow(aml.expr))
for (i in 1:nrow(aml.expr)) {
if (is.na(fpkm$LTR_name[i])) {
av.dhs[i] = NA
av.nodhs[i] = NA
} else {
is.dhs = aml.dhs[fpkm$LTR_name[i]==dhs$name]
av.dhs[i] = mean(aml.expr[i,is.dhs])
av.nodhs[i] = mean(aml.expr[i,!is.dhs])
}
}
##make LTRs groups based on DHS data
diff.dhs = dhs[,grepl('Monocytes',colnames(dhs)) | grepl('Macrophages',colnames(dhs))]>=1
nodhs.ltrs = dhs$name[rowSums(aml.dhs)==0 & rowSums(diff.dhs)==0]
aml.ltrs = dhs$name[rowSums(aml.dhs)>=2 & rowSums(diff.dhs)==0]
ubi.ltrs = dhs$name[rowSums(aml.dhs)>=2 & rowSums(diff.dhs)>=2]
##set distance threshold
near = fpkm$LTR_distance <= 50000
##############################################################
#### plot average AML expression for different LTR groups ####
vioplot(aml[near & fpkm$LTR_name %in% nodhs.ltrs],
aml[near & fpkm$LTR_name %in% aml.ltrs],
aml[near & fpkm$LTR_name %in% ubi.ltrs],
names=c('None','AML','Ubi'),
col='wheat',yaxt='n',ylab='log2 FPKM')
axis(2,seq(-5,15,5),las=1)
##############################################################
#### compare expression between DHS+ and DHS- AML samples ####
plus = av.dhs[near & fpkm$LTR_name %in% aml.ltrs]
minus = av.nodhs[near & fpkm$LTR_name %in% aml.ltrs]
plot(minus,plus,pch=19,cex=0.5,col='grey',las=1,
xlab='DHS- AMLs',ylab='DHS+ AMLs')
abline(0,1,lty=2,col='blue')
##pinpoint DHS-associated interest
sel = (plus>0 | minus>0) & plus-minus>2
points(minus[sel],plus[sel],pch=19,cex=0.5,col='red')
aml.genes = fpkm$gene_name[near & fpkm$LTR_name %in% aml.ltrs]
goi = cbind(aml.genes,minus,plus)[sel,]
#########################################################
#### plot expression of AML LTRs in other cell types ####
##(not included in the paper)
exp.list = list(hsc,mono,macro,av.dhs,av.nodhs)
aml.list = lapply(exp.list,function(x) x[near & fpkm$LTR_name %in% aml.ltrs])
vioplot(aml.list,names=c('HSC','Mono','Macro','DHS+','DHS-'),
col='wheat',yaxt='n',ylab='log2 FPKM')
axis(2,seq(-5,15,5),las=1)
| /Expression/LTR_nearest_expression.R | permissive | MBrancoLab/Deniz_2019_AML | R | false | false | 3,364 | r | ##Merges expression and DHS data
##Generates plots to check the association between DHS+ LTRs and gene expression
setwd('~/Deniz_2019_AML/Expression')
library(vioplot)
##expression data
fpkm = read.delim('BP_RNA_FPKM.txt.gz',as.is=T)
##DHS data
dhs = read.delim('../DHS_analysis/allLTR_DHS_overlaps.txt',as.is=T)
######################
#### prepare data ####
##get average expression values for cell groups
hsc = rowMeans(log2(fpkm[,grep('stem.cell',colnames(fpkm))]+0.01))
mono = rowMeans(log2(fpkm[,grep('monocyte',colnames(fpkm))]+0.01))
macro = rowMeans(log2(fpkm[,grep('macrophage',colnames(fpkm))]+0.01))
aml = rowMeans(log2(fpkm[,grep('Leukemia',colnames(fpkm))]+0.01))
##make matching dhs and expression matrices for AML samples
meta = read.delim('../blueprint_files.tsv',as.is=T)
dhs.meta = meta[meta$Experiment=='DNase-Seq' & meta$Format=='BED' & meta$Sub.group=='Acute Myeloid Leukemia',]
for (i in 1:ncol(dhs)) {
id = grep(colnames(dhs)[i],dhs.meta$URL)[1]
if (!is.na(id)) colnames(dhs)[i] = dhs.meta$Donor[id]
}
expr.donor = gsub('Acute.Myeloid.Leukemia.','',colnames(fpkm))
aml.dhs = as.matrix(dhs[,colnames(dhs) %in% expr.donor]>=1)
aml.expr = log2(as.matrix(fpkm[,match(colnames(aml.dhs),expr.donor)])+0.01)
##get average expression values from dhs and non-dhs AML groups
av.dhs = numeric(nrow(aml.expr))
av.nodhs = numeric(nrow(aml.expr))
for (i in 1:nrow(aml.expr)) {
if (is.na(fpkm$LTR_name[i])) {
av.dhs[i] = NA
av.nodhs[i] = NA
} else {
is.dhs = aml.dhs[fpkm$LTR_name[i]==dhs$name]
av.dhs[i] = mean(aml.expr[i,is.dhs])
av.nodhs[i] = mean(aml.expr[i,!is.dhs])
}
}
##make LTRs groups based on DHS data
diff.dhs = dhs[,grepl('Monocytes',colnames(dhs)) | grepl('Macrophages',colnames(dhs))]>=1
nodhs.ltrs = dhs$name[rowSums(aml.dhs)==0 & rowSums(diff.dhs)==0]
aml.ltrs = dhs$name[rowSums(aml.dhs)>=2 & rowSums(diff.dhs)==0]
ubi.ltrs = dhs$name[rowSums(aml.dhs)>=2 & rowSums(diff.dhs)>=2]
##set distance threshold
near = fpkm$LTR_distance <= 50000
##############################################################
#### plot average AML expression for different LTR groups ####
vioplot(aml[near & fpkm$LTR_name %in% nodhs.ltrs],
aml[near & fpkm$LTR_name %in% aml.ltrs],
aml[near & fpkm$LTR_name %in% ubi.ltrs],
names=c('None','AML','Ubi'),
col='wheat',yaxt='n',ylab='log2 FPKM')
axis(2,seq(-5,15,5),las=1)
##############################################################
#### compare expression between DHS+ and DHS- AML samples ####
plus = av.dhs[near & fpkm$LTR_name %in% aml.ltrs]
minus = av.nodhs[near & fpkm$LTR_name %in% aml.ltrs]
plot(minus,plus,pch=19,cex=0.5,col='grey',las=1,
xlab='DHS- AMLs',ylab='DHS+ AMLs')
abline(0,1,lty=2,col='blue')
##pinpoint DHS-associated interest
sel = (plus>0 | minus>0) & plus-minus>2
points(minus[sel],plus[sel],pch=19,cex=0.5,col='red')
aml.genes = fpkm$gene_name[near & fpkm$LTR_name %in% aml.ltrs]
goi = cbind(aml.genes,minus,plus)[sel,]
#########################################################
#### plot expression of AML LTRs in other cell types ####
##(not included in the paper)
exp.list = list(hsc,mono,macro,av.dhs,av.nodhs)
aml.list = lapply(exp.list,function(x) x[near & fpkm$LTR_name %in% aml.ltrs])
vioplot(aml.list,names=c('HSC','Mono','Macro','DHS+','DHS-'),
col='wheat',yaxt='n',ylab='log2 FPKM')
axis(2,seq(-5,15,5),las=1)
|
# Plots global active power versus time to png file.
plot2 <- function()
{
# Read required data. Uses powerUsage.R added to repository.
df <- powerUsage()
# Prepare to plot graph to png file.
png(file = "plot2.png")
# Hide x axis label.
par(mar = c(3.1, 4.1, 3.1, 2.1))
# Plot global active power versus time.
with(df, plot(Time, Global_active_power,
# Line plot.
type = "l",
# y axis label.
ylab = "Global Active Power (kilowatts)"))
# Finish plotting.
dev.off()
} | /plot2.R | no_license | rochbe/ExData_Plotting1 | R | false | false | 546 | r | # Plots global active power versus time to png file.
plot2 <- function()
{
# Read required data. Uses powerUsage.R added to repository.
df <- powerUsage()
# Prepare to plot graph to png file.
png(file = "plot2.png")
# Hide x axis label.
par(mar = c(3.1, 4.1, 3.1, 2.1))
# Plot global active power versus time.
with(df, plot(Time, Global_active_power,
# Line plot.
type = "l",
# y axis label.
ylab = "Global Active Power (kilowatts)"))
# Finish plotting.
dev.off()
} |
#### Load libraries ####
# Load tidyverse - note the output lets you know this contains ggplot2
library(tidyverse)
# Load gapminder - this contains the data we will use
library(gapminder)
#### Import and view data ####
# Load data
gapminder <- gapminder::gapminder
# Look at the structure of the data. You can use glimpse(), summary(), or head().
glimpse(gapminder)
# Create a new data frame with only the data for 2007
gapminder07 <- filter(gapminder, year==2007)
#### Basic plot ####
# Scatterplot of population over time
ggplot(gapminder) +
geom_point(aes(x=year, y=pop))
# Add labels to the plot
ggplot(gapminder) +
geom_point(aes(x=year, y=pop)) +
labs(title="Population over time", x="Year", y="Population")
# Your turn: plot life expectancy as a function of GDP per capita for the year 2007, and add labels
ggplot(gapminder07) +
geom_point(aes(x=gdpPercap, y=lifeExp)) +
labs(title="Do people in richer countries live longer?", x="GDP per capita", y="Life expectancy")
#### Choosing geoms ####
# Let's look at geom_smooth(). Add a geom_smooth() layer to the previous plot.
ggplot(gapminder07, aes(x=gdpPercap, y=lifeExp)) +
geom_point() +
geom_smooth() +
labs(title="Do people in richer countries live longer?",
x="GDP per capita", y="Life expectancy",
subtitle="Gapminder 2007 data")
# How about geom_hline()? When you add it, you'll need to provide a new aesthetic option.
ggplot(gapminder07) +
geom_point(aes(x=gdpPercap, y=lifeExp)) +
geom_hline(aes(yintercept=mean(lifeExp))) +
labs(title="Do people in richer countries live longer?",
x="GDP per capita", y="Life expectancy",
subtitle="Gapminder 2007 data")
# Your turn: Plot the life expectancy of each continent in 2007
# Look at the ggplot cheatsheet and decide what kind of geom to use: https://raw.githubusercontent.com/rstudio/cheatsheets/master/data-visualization-2.1.pdf
# Option 1: Box plot
ggplot(gapminder07) +
geom_boxplot(aes(x=continent, y=lifeExp))
# Option 2: Column plot
ggplot(gapminder07) +
geom_col(aes(x=continent, y=lifeExp))
#### Grouping variables ####
# Grouping by color
ggplot(gapminder) +
geom_point(aes(x=gdpPercap, y=lifeExp, col=year)) +
labs(title="Do people in richer countries live longer?", x="GDP per capita", y="Life expectancy")
# Grouping by facet
ggplot(gapminder, aes(x=gdpPercap, y=lifeExp)) +
geom_point() +
geom_smooth() +
facet_wrap(~year) +
labs(title="Do people in richer countries live longer?", x="GDP per capita", y="Life expectancy")
# Add scales="free" in facet_wrap() and see what happens
ggplot(gapminder, aes(x=gdpPercap, y=lifeExp)) +
geom_point() +
geom_smooth() +
facet_wrap(~year, scales="free") +
labs(title="Do people in richer countries live longer?", x="GDP per capita", y="Life expectancy")
# Your turn: Let's visualize life expectancy by continent in 2007 again. This time, use grouping by color or facet.
# Bonus: Use two geoms.
# Option 1: By colors
ggplot(gapminder07, aes(x=gdpPercap, y=lifeExp, col=continent, size=pop)) +
geom_point() +
labs(title="Life expectancy as a function of GDP per capita, by continent", x="GDP per capita", y="Life expectancy")
# Option 2: By facet
ggplot(gapminder07, aes(x=gdpPercap, y=lifeExp, size=pop)) +
geom_point() +
geom_smooth() +
facet_wrap(~continent, scales="free") +
labs(title="Life expectancy as a function of GDP per capita, by continent", x="GDP per capita", y="Life expectancy")
# Option 3: By facet
ggplot(gapminder07, aes(x=lifeExp)) +
geom_density() +
facet_wrap(~continent, scales="free")+
labs(title="Life expectancy by continent", x="Life expectancy", y="")
#### Choose your own adventure! ####
# Create a plot that includes two geoms and facets by some variable.
| /answers/exercise1_answers.R | no_license | kumarhk/2021-teaching-r-workshop | R | false | false | 3,876 | r | #### Load libraries ####
# Load tidyverse - note the output lets you know this contains ggplot2
library(tidyverse)
# Load gapminder - this contains the data we will use
library(gapminder)
#### Import and view data ####
# Load data
gapminder <- gapminder::gapminder
# Look at the structure of the data. You can use glimpse(), summary(), or head().
glimpse(gapminder)
# Create a new data frame with only the data for 2007
gapminder07 <- filter(gapminder, year==2007)
#### Basic plot ####
# Scatterplot of population over time
ggplot(gapminder) +
geom_point(aes(x=year, y=pop))
# Add labels to the plot
ggplot(gapminder) +
geom_point(aes(x=year, y=pop)) +
labs(title="Population over time", x="Year", y="Population")
# Your turn: plot life expectancy as a function of GDP per capita for the year 2007, and add labels
ggplot(gapminder07) +
geom_point(aes(x=gdpPercap, y=lifeExp)) +
labs(title="Do people in richer countries live longer?", x="GDP per capita", y="Life expectancy")
#### Choosing geoms ####
# Let's look at geom_smooth(). Add a geom_smooth() layer to the previous plot.
ggplot(gapminder07, aes(x=gdpPercap, y=lifeExp)) +
geom_point() +
geom_smooth() +
labs(title="Do people in richer countries live longer?",
x="GDP per capita", y="Life expectancy",
subtitle="Gapminder 2007 data")
# How about geom_hline()? When you add it, you'll need to provide a new aesthetic option.
ggplot(gapminder07) +
geom_point(aes(x=gdpPercap, y=lifeExp)) +
geom_hline(aes(yintercept=mean(lifeExp))) +
labs(title="Do people in richer countries live longer?",
x="GDP per capita", y="Life expectancy",
subtitle="Gapminder 2007 data")
# Your turn: Plot the life expectancy of each continent in 2007
# Look at the ggplot cheatsheet and decide what kind of geom to use: https://raw.githubusercontent.com/rstudio/cheatsheets/master/data-visualization-2.1.pdf
# Option 1: Box plot
ggplot(gapminder07) +
geom_boxplot(aes(x=continent, y=lifeExp))
# Option 2: Column plot
ggplot(gapminder07) +
geom_col(aes(x=continent, y=lifeExp))
#### Grouping variables ####
# Grouping by color
ggplot(gapminder) +
geom_point(aes(x=gdpPercap, y=lifeExp, col=year)) +
labs(title="Do people in richer countries live longer?", x="GDP per capita", y="Life expectancy")
# Grouping by facet
ggplot(gapminder, aes(x=gdpPercap, y=lifeExp)) +
geom_point() +
geom_smooth() +
facet_wrap(~year) +
labs(title="Do people in richer countries live longer?", x="GDP per capita", y="Life expectancy")
# Add scales="free" in facet_wrap() and see what happens
ggplot(gapminder, aes(x=gdpPercap, y=lifeExp)) +
geom_point() +
geom_smooth() +
facet_wrap(~year, scales="free") +
labs(title="Do people in richer countries live longer?", x="GDP per capita", y="Life expectancy")
# Your turn: Let's visualize life expectancy by continent in 2007 again. This time, use grouping by color or facet.
# Bonus: Use two geoms.
# Option 1: By colors
ggplot(gapminder07, aes(x=gdpPercap, y=lifeExp, col=continent, size=pop)) +
geom_point() +
labs(title="Life expectancy as a function of GDP per capita, by continent", x="GDP per capita", y="Life expectancy")
# Option 2: By facet
ggplot(gapminder07, aes(x=gdpPercap, y=lifeExp, size=pop)) +
geom_point() +
geom_smooth() +
facet_wrap(~continent, scales="free") +
labs(title="Life expectancy as a function of GDP per capita, by continent", x="GDP per capita", y="Life expectancy")
# Option 3: By facet
ggplot(gapminder07, aes(x=lifeExp)) +
geom_density() +
facet_wrap(~continent, scales="free")+
labs(title="Life expectancy by continent", x="Life expectancy", y="")
#### Choose your own adventure! ####
# Create a plot that includes two geoms and facets by some variable.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/config-tools.R
\name{load_config}
\alias{load_config}
\title{Load and validate configuration file}
\usage{
load_config(config_path)
}
\arguments{
\item{config_path}{Path to the WGSAParsr configuration file to load and
validate}
}
\value{
a tibble that can be used for building field lists for parsing
}
\description{
WGSAParsr configuration files are flexible tab-separated files. They must
include a header with field names, and the following fields, in any order:
\itemize{
\item \strong{field} column headings matching the WGSA output file to be
parsed
\item \strong{SNV} logical (TRUE/FALSE) indicating whether the field
should be parsed for snv annotation
\item \strong{indel} logical (TRUE/FALSE) indicating whether the field
should be parsed for indel annotation
\item \strong{dbnsfp} logical (TRUE/FALSE) indicating whether the field
should be parsed for dbnsfp annotation
\item \strong{pivotGroup} numerical value to group annotations for pivoting
\item \strong{pivotChar} character separating fields that should be used
for pivoting
\item \strong{parseGroup} numerical value to group annotations for other
parsing
\item \strong{transformation} a string describing the transformation to be
performed. Values may include:
\itemize{
\item \strong{max} select the maximum value
\item \strong{min} select the minimum value
\item \strong{pick_Y} select "Y" if present
\item \strong{pick_N} select "N" if present
\item \strong{pick_A} select A>D>P>N (MutationTaster_pred field)
\item \strong{clean} remove the \{n\}. E.g.: "Enhancer\{4\}" ->
"Enhancer"
\item \strong{distinct} select unique values. NOTE: must have a
pivotGroup and pivotChar = |
}
}
}
\details{
Additionally, the following fields may be included, and are processed during
configuration file loading, but are not required:
\itemize{
\item \strong{order} numerical value for column ordering in parsed output
\item \strong{sourceGroup} numerical value for column grouping/ordering in
output
}
Other columns (such as notes) may be included in the configuration file,
but will not be validated or imported with this function. The configuration
file may include comments beginning with #.
}
\examples{
\dontrun{
local_config <- load_config("config.tsv")
}
freeze_5_config <- load_config(wgsaparsr_example("fr_5_config.tsv"))
}
| /man/load_config.Rd | no_license | bestbioinformatics/wgsaparsr | R | false | true | 2,470 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/config-tools.R
\name{load_config}
\alias{load_config}
\title{Load and validate configuration file}
\usage{
load_config(config_path)
}
\arguments{
\item{config_path}{Path to the WGSAParsr configuration file to load and
validate}
}
\value{
a tibble that can be used for building field lists for parsing
}
\description{
WGSAParsr configuration files are flexible tab-separated files. They must
include a header with field names, and the following fields, in any order:
\itemize{
\item \strong{field} column headings matching the WGSA output file to be
parsed
\item \strong{SNV} logical (TRUE/FALSE) indicating whether the field
should be parsed for snv annotation
\item \strong{indel} logical (TRUE/FALSE) indicating whether the field
should be parsed for indel annotation
\item \strong{dbnsfp} logical (TRUE/FALSE) indicating whether the field
should be parsed for dbnsfp annotation
\item \strong{pivotGroup} numerical value to group annotations for pivoting
\item \strong{pivotChar} character separating fields that should be used
for pivoting
\item \strong{parseGroup} numerical value to group annotations for other
parsing
\item \strong{transformation} a string describing the transformation to be
performed. Values may include:
\itemize{
\item \strong{max} select the maximum value
\item \strong{min} select the minimum value
\item \strong{pick_Y} select "Y" if present
\item \strong{pick_N} select "N" if present
\item \strong{pick_A} select A>D>P>N (MutationTaster_pred field)
\item \strong{clean} remove the \{n\}. E.g.: "Enhancer\{4\}" ->
"Enhancer"
\item \strong{distinct} select unique values. NOTE: must have a
pivotGroup and pivotChar = |
}
}
}
\details{
Additionally, the following fields may be included, and are processed during
configuration file loading, but are not required:
\itemize{
\item \strong{order} numerical value for column ordering in parsed output
\item \strong{sourceGroup} numerical value for column grouping/ordering in
output
}
Other columns (such as notes) may be included in the configuration file,
but will not be validated or imported with this function. The configuration
file may include comments beginning with #.
}
\examples{
\dontrun{
local_config <- load_config("config.tsv")
}
freeze_5_config <- load_config(wgsaparsr_example("fr_5_config.tsv"))
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/exploits.r
\name{shodan_exploit_search}
\alias{shodan_exploit_search}
\title{Search for Exploits}
\usage{
shodan_exploit_search(query = NULL, facets = NULL, page = 1)
}
\arguments{
\item{query}{Search query used to search the database of known exploits. See
\url{https://developer.shodan.io/api/exploits/rest} for all supported
search filters.}
\item{facets}{A comma-separated list of properties to get summary information on.
The following facets are currently supported: "\code{author}",
"\code{platform}", "\code{port}", "\code{source}" and "\code{type}.
If \code{length(facets) > 1)} this function will
concatenate the vector with commas to send to Shodan.}
\item{page}{The page number to page through results \code{100} at a time
(default: \code{1})}
}
\description{
Search across a variety of data sources for exploits and use facets to
get summary information.
}
\references{
\url{https://developer.shodan.io/api/exploits/rest}
}
| /man/shodan_exploit_search.Rd | no_license | Leocodefocus/shodan | R | false | false | 1,026 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/exploits.r
\name{shodan_exploit_search}
\alias{shodan_exploit_search}
\title{Search for Exploits}
\usage{
shodan_exploit_search(query = NULL, facets = NULL, page = 1)
}
\arguments{
\item{query}{Search query used to search the database of known exploits. See
\url{https://developer.shodan.io/api/exploits/rest} for all supported
search filters.}
\item{facets}{A comma-separated list of properties to get summary information on.
The following facets are currently supported: "\code{author}",
"\code{platform}", "\code{port}", "\code{source}" and "\code{type}.
If \code{length(facets) > 1)} this function will
concatenate the vector with commas to send to Shodan.}
\item{page}{The page number to page through results \code{100} at a time
(default: \code{1})}
}
\description{
Search across a variety of data sources for exploits and use facets to
get summary information.
}
\references{
\url{https://developer.shodan.io/api/exploits/rest}
}
|
#' Kernel RV Coefficient Test (KRV)
#'
#' Kernel RV coefficient test to evaluate the overall association between
#' microbiome composition and high-dimensional or structured phenotype or genotype.
#'
#' kernels.otu should be a list of numerical n by n kernel matrices, or a single
#' n by n kernel matrix, where n is sample size.
#'
#' When kernel.y is a method ("Gaussian" or "linear") to compute the kernel of
#' phenotype, y should be a numerical phenotype matrix, and X (if not NULL)
#' should be a numeric matrix of covariates. Both y and X should have n rows.
#'
#' When kernel.y is a kernel matrix for the phenotype, there is no need to provide
#' X and y, and they will be ignored if provided. In this case, kernel.y and
#' kernel.otu should both be numeric matrices with the same number of rows and columns.
#'
#' Missing data is not permitted. Please remove all individuals with missing
#' kernel.otu, y (if not NULL), X (if not NULL), and kernel.y (if a matrix is
#' entered) prior to analysis.
#'
#' @param y A numeric n by p matrix of p continuous phenotype variables and
#' sample size n (default = NULL). If it is NULL, a
#' phenotype kernel matrix must be entered for "kernel.y". Defaults to NULL.
#' @param X A numeric n by q matrix, containing q additional covariates
#' (default = NULL). If NULL, an intercept only model is used. If the first
#' column of X is not uniformly 1, then an intercept column will be added.
#' @param adjust.type Possible values are "none" (default if X is null),
#' "phenotype" to adjust only the y variable (only possible if y is a numeric
#' phenotype matrix rather than a pre-computed kernel), or "both" to adjust
#' both the X and Y kernels.
#' @param kernels.otu A numeric OTU n by n kernel matrix or a list of matrices,
#' where n is the sample size. It can be constructed from microbiome data, such
#' as by transforming from a distance metric.
#' @param kernel.y Either a numerical n by n kernel matrix for phenotypes or a
#' method to compute the kernel of phenotype. Methods are "Gaussian" or "linear".
#' A Gaussian kernel (kernel.y="Gaussian") can capture the general relationship
#' between microbiome and phenotypes; a linear kernel (kernel.y="linear")
#' may be preferred if the underlying relationship is close to linear.
#' @param omnibus A string equal to either "Cauchy" or "kernel_om" (or unambiguous
#' abbreviations thereof), specifying whether to use the Cauchy combination test
#' or an omnibus kernel to generate the omnibus p-value.
#' @param returnKRV A logical indicating whether to return the KRV statistic. Defaults to FALSE.
#' @param returnR2 A logical indicating whether to return the R-squared coefficient. Defaults to FALSE.
#'
#' @return
#' If only one candidate kernel matrix is considered, returns a list containing the p-value for the candidate kernel matrix.
#' If more than one candidate kernel matrix is considered, returns a list of two elements:
#' \item{p_values}{P-value for each candidate kernel matrix}
#' \item{omnibus_p}{Omnibus p-value}
#' \item{KRV}{A vector of kernel RV statistics (a measure of effect size), one for each candidate kernel matrix. Only returned if returnKRV = TRUE}
#' \item{R2}{A vector of R-squared statistics, one for each candidate kernel matrix. Only returned if returnR2 = TRUE}
#'
#'@author
#'Nehemiah Wilson, Haotian Zheng, Xiang Zhan, Ni Zhao
#'
#'@references
#' Zheng, Haotian, Zhan, X., Plantinga, A., Zhao, N., and Wu, M.C. A Fast Small-Sample Kernel Independence Test for Microbiome
#' Community-Level Association Analysis. Biometrics. 2017 Mar 10. doi: 10.1111/biom.12684.
#'
#' Liu, Hongjiao, Ling, W., Hua, X., Moon, J.Y., Williams-Nguyen, J., Zhan, X., Plantinga, A.M., Zhao, N.,
#' Zhang, A., Durazo-Arzivu, R.A., Knight, R., Qi, Q., Burk, R.D., Kaplan, R.C., and Wu, M.C.
#' Kernel-based genetic association analysis for microbiome phenotypes identifies host genetic
#' drivers of beta-diversity. 2021+
#'
#'@importFrom PearsonDS ppearsonIII
#'
#'@examples
#'library(GUniFrac)
#'library(MASS)
#'
#'data(throat.tree)
#'data(throat.otu.tab)
#'data(throat.meta)
#'
#' ## Simulate covariate data
#' set.seed(123)
#' n = nrow(throat.otu.tab)
#' Sex <- throat.meta$Sex
#' Smoker <- throat.meta$SmokingStatus
#' anti <- throat.meta$AntibioticUsePast3Months_TimeFromAntibioticUsage
#' Male = (Sex == "Male")**2
#' Smoker = (Smoker == "Smoker") **2
#' Anti = (anti != "None")^2
#' cova = cbind(1, Male, Smoker, Anti)
#'
#' ## Simulate microbiome data
#' otu.tab.rff <- Rarefy(throat.otu.tab)$otu.tab.rff
#' unifracs <- GUniFrac(otu.tab.rff, throat.tree, alpha=c(0, 0.5, 1))$unifracs
#' # Distance matrices
#' D.weighted = unifracs[,,"d_1"]
#' D.unweighted = unifracs[,,"d_UW"]
#' # Kernel matrices
#' K.weighted = D2K(D.weighted)
#' K.unweighted = D2K(D.unweighted)
#'
#' if (requireNamespace("vegan")) {
#' library(vegan)
#' D.BC = as.matrix(vegdist(otu.tab.rff, method="bray"))
#' K.BC = D2K(D.BC)
#'}
#'
#' # Simulate phenotype data
#' rho = 0.2
#' Va = matrix(rep(rho, (2*n)^2), 2*n, 2*n)+diag(1-rho, 2*n)
#' Phe = mvrnorm(n, rep(0, 2*n), Va)
#' K.y = Phe %*% t(Phe) # phenotype kernel
#'
#' # Simulate genotype data
#' G = matrix(rbinom(n*10, 2, 0.1), n, 10)
#' K.g = G %*% t(G) # genotype kernel
#'
#'
#' ## Unadjusted analysis (microbiome and phenotype)
#' KRV(y = Phe, kernels.otu = K.weighted, kernel.y = "Gaussian") # numeric y
#' KRV(kernels.otu = K.weighted, kernel.y = K.y) # kernel y
#'
#' ## Adjusted analysis (phenotype only)
#' KRV(kernels.otu = K.weighted, y = Phe, kernel.y = "linear", X = cova, adjust.type = "phenotype")
#'
#' if (requireNamespace("vegan")) {
#' ## Adjusted analysis (adjust both kernels; microbiome and phenotype)
#' KRV(kernels.otu = K.BC, kernel.y = K.y, X = cova, adjust.type='both')
#'
#' ## Adjusted analysis (adjust both kernels; microbiome and genotype)
#' KRV(kernels.otu = K.BC, kernel.y = K.g, X = cova, adjust.type='both')
#' }
#'
#'
#'@export
KRV <- function(y = NULL, X = NULL, adjust.type = NULL, kernels.otu, kernel.y,
omnibus = "kernel_om", returnKRV = FALSE, returnR2 = FALSE){
## Check input
om <- substring(tolower(omnibus), 1, 1)
if (!is.null(adjust.type)) {
adjust.type <- substring(tolower(adjust.type), 1, 1)
if (adjust.type == "p") {
if (!kernel.y %in% c("linear", "Gaussian")) {
stop("Phenotype-only adjustment may only be used with numeric y, not pre-computed phenotype kernel. Try option 'both' instead or double check that you have entered either 'linear' or 'Gaussian' for kernel.y.")
}
}
if (!adjust.type %in% c("p", "b")) {
stop("I don't know that covariate adjustment choice. Please choose 'phenotype' or 'both', or leave this option NULL.")
}
}
if (!is.list(kernels.otu)) {
kernels.otu <- list(kernels.otu)
}
if (!all(unlist(lapply(kernels.otu, FUN = function(k) is.matrix(k))))) {
stop("Please ensure kernels.otu is either a single n x n kernel matrix or a list of n x n kernel matrices.")
}
if (!is.null(X) & !all(X[,1] == 1)) {
X1 <- cbind(1, X)
X <- X1
}
if (is.null(X) & !is.null(adjust.type)) {
warning("X is NULL, so no covariate adjustment will be done.")
adjust.type = NULL
}
if (is.matrix(kernel.y)){
n = nrow(kernels.otu[[1]])
if (!is.null(y)){
warning("When a phenotype kernel is provided, argument \"y\" will be ignored.\n")
}
if (ncol(kernels.otu[[1]]) != n | nrow(kernel.y) != n | ncol(kernel.y) != n){
stop("Kernel matrices need to be n x n, where n is the sample size.\n ")
}
} else if (!is.matrix(kernel.y)){
if (!(kernel.y %in% c("Gaussian", "linear"))){
stop("Please choose kernel.y = \"Gaussian\" or \"linear\", or enter a kernel matrix for \"kernel.y\".\n")
}
if(is.null(y)){
stop("Please enter a phenotype matrix for argument \"y\" or enter a kernel matrix for argument \"kernel.y\".\n")
}
n = NROW(y)
if (!all(unlist(lapply(kernels.otu, FUN = function(x) nrow(x) == n & ncol(x) == n)))) {
stop("Kernel matrix/matrices must be n x n, where n is the sample size. \n ")
}
if (any(is.na(y))){
ids = which(is.na(y))
stop(paste("Missing response for subject(s)", ids, "- please remove before proceeding. \n"))
}
if (!is.null(X)){
if (any(is.na(X))){
stop("NAs in covariates X, please impute or remove subjects with missing covariates values.\n")
}
if(NROW(X)!= NROW(y)) stop("Dimensions of X and y don't match.\n")
}
}
## Actual test
pvals <- c()
if (returnKRV) { KRVs <- c() } else { KRVs = NULL }
if (returnR2) { R2 <- c() } else { R2 = NULL }
for(i in 1:length(kernels.otu)){
res <- inner.KRV(y = y, X = X, adjust.type = adjust.type, kernel.otu=kernels.otu[[i]],
kernel.y = kernel.y, returnKRV = TRUE, returnR2 = TRUE)
pvals[i] <- res$pv
if (returnKRV) { KRVs[i] <- res$KRV }
if (returnR2) { R2[i] <- res$R2 }
}
# Naming the p-values with the Kernel matrix names from Ks
kernel.names <- names(kernels.otu)
names(pvals) <- kernel.names
if (returnKRV) { names(KRVs) <- kernel.names }
if (returnR2) { names(R2) <- kernel.names }
#Omnibus Test
if (length(kernels.otu) > 1) {
if (om == "k") {
K.om <- matrix(0, nrow = nrow(kernels.otu[[1]]), ncol = ncol(kernels.otu[[1]]))
for(i in 1:length(kernels.otu)){
K.om = K.om + kernels.otu[[i]]/tr(kernels.otu[[i]])
}
omnibus_p <- as.numeric(inner.KRV(y = y, X = X, adjust.type = adjust.type,
kernel.otu = K.om, kernel.y = kernel.y)$pv)
} else if (om == "c") {
cauchy.t <- sum(tan((0.5 - pvals)*pi))/length(pvals)
omnibus_p <- 1 - pcauchy(cauchy.t)
} else {
stop("I don't know that omnibus option. Please choose 'kernel_om' or 'Cauchy'.")
}
## Return for multiple kernels
if (is.null(KRVs) & is.null(R2)) {
return(list(p_values = pvals, omnibus_p = omnibus_p))
} else if (is.null(KRVs) & !is.null(R2)) {
return(list(p_values = pvals, omnibus_p = omnibus_p, R2 = R2))
} else if (!is.null(KRVs) & is.null(R2)) {
return(list(p_values = pvals, omnibus_p = omnibus_p, KRV = KRVs))
} else {
return(list(p_values = pvals, omnibus_p = omnibus_p, KRV = KRVs, R2 = R2))
}
}
## Return for single kernels
if (is.null(KRVs) & is.null(R2)) {
return(list(p_values = pvals))
} else if (is.null(KRVs) & !is.null(R2)) {
return(list(p_values = pvals, R2 = R2))
} else if (!is.null(KRVs) & is.null(R2)) {
return(list(p_values = pvals, KRV = KRVs))
} else {
return(list(p_values = pvals, KRV = KRVs, R2 = R2))
}
}
| /R/KRV.R | no_license | cran/MiRKAT | R | false | false | 10,847 | r | #' Kernel RV Coefficient Test (KRV)
#'
#' Kernel RV coefficient test to evaluate the overall association between
#' microbiome composition and high-dimensional or structured phenotype or genotype.
#'
#' kernels.otu should be a list of numerical n by n kernel matrices, or a single
#' n by n kernel matrix, where n is sample size.
#'
#' When kernel.y is a method ("Gaussian" or "linear") to compute the kernel of
#' phenotype, y should be a numerical phenotype matrix, and X (if not NULL)
#' should be a numeric matrix of covariates. Both y and X should have n rows.
#'
#' When kernel.y is a kernel matrix for the phenotype, there is no need to provide
#' X and y, and they will be ignored if provided. In this case, kernel.y and
#' kernel.otu should both be numeric matrices with the same number of rows and columns.
#'
#' Missing data is not permitted. Please remove all individuals with missing
#' kernel.otu, y (if not NULL), X (if not NULL), and kernel.y (if a matrix is
#' entered) prior to analysis.
#'
#' @param y A numeric n by p matrix of p continuous phenotype variables and
#' sample size n (default = NULL). If it is NULL, a
#' phenotype kernel matrix must be entered for "kernel.y". Defaults to NULL.
#' @param X A numeric n by q matrix, containing q additional covariates
#' (default = NULL). If NULL, an intercept only model is used. If the first
#' column of X is not uniformly 1, then an intercept column will be added.
#' @param adjust.type Possible values are "none" (default if X is null),
#' "phenotype" to adjust only the y variable (only possible if y is a numeric
#' phenotype matrix rather than a pre-computed kernel), or "both" to adjust
#' both the X and Y kernels.
#' @param kernels.otu A numeric OTU n by n kernel matrix or a list of matrices,
#' where n is the sample size. It can be constructed from microbiome data, such
#' as by transforming from a distance metric.
#' @param kernel.y Either a numerical n by n kernel matrix for phenotypes or a
#' method to compute the kernel of phenotype. Methods are "Gaussian" or "linear".
#' A Gaussian kernel (kernel.y="Gaussian") can capture the general relationship
#' between microbiome and phenotypes; a linear kernel (kernel.y="linear")
#' may be preferred if the underlying relationship is close to linear.
#' @param omnibus A string equal to either "Cauchy" or "kernel_om" (or unambiguous
#' abbreviations thereof), specifying whether to use the Cauchy combination test
#' or an omnibus kernel to generate the omnibus p-value.
#' @param returnKRV A logical indicating whether to return the KRV statistic. Defaults to FALSE.
#' @param returnR2 A logical indicating whether to return the R-squared coefficient. Defaults to FALSE.
#'
#' @return
#' If only one candidate kernel matrix is considered, returns a list containing the p-value for the candidate kernel matrix.
#' If more than one candidate kernel matrix is considered, returns a list of two elements:
#' \item{p_values}{P-value for each candidate kernel matrix}
#' \item{omnibus_p}{Omnibus p-value}
#' \item{KRV}{A vector of kernel RV statistics (a measure of effect size), one for each candidate kernel matrix. Only returned if returnKRV = TRUE}
#' \item{R2}{A vector of R-squared statistics, one for each candidate kernel matrix. Only returned if returnR2 = TRUE}
#'
#'@author
#'Nehemiah Wilson, Haotian Zheng, Xiang Zhan, Ni Zhao
#'
#'@references
#' Zheng, Haotian, Zhan, X., Plantinga, A., Zhao, N., and Wu, M.C. A Fast Small-Sample Kernel Independence Test for Microbiome
#' Community-Level Association Analysis. Biometrics. 2017 Mar 10. doi: 10.1111/biom.12684.
#'
#' Liu, Hongjiao, Ling, W., Hua, X., Moon, J.Y., Williams-Nguyen, J., Zhan, X., Plantinga, A.M., Zhao, N.,
#' Zhang, A., Durazo-Arzivu, R.A., Knight, R., Qi, Q., Burk, R.D., Kaplan, R.C., and Wu, M.C.
#' Kernel-based genetic association analysis for microbiome phenotypes identifies host genetic
#' drivers of beta-diversity. 2021+
#'
#'@importFrom PearsonDS ppearsonIII
#'
#'@examples
#'library(GUniFrac)
#'library(MASS)
#'
#'data(throat.tree)
#'data(throat.otu.tab)
#'data(throat.meta)
#'
#' ## Simulate covariate data
#' set.seed(123)
#' n = nrow(throat.otu.tab)
#' Sex <- throat.meta$Sex
#' Smoker <- throat.meta$SmokingStatus
#' anti <- throat.meta$AntibioticUsePast3Months_TimeFromAntibioticUsage
#' Male = (Sex == "Male")**2
#' Smoker = (Smoker == "Smoker") **2
#' Anti = (anti != "None")^2
#' cova = cbind(1, Male, Smoker, Anti)
#'
#' ## Simulate microbiome data
#' otu.tab.rff <- Rarefy(throat.otu.tab)$otu.tab.rff
#' unifracs <- GUniFrac(otu.tab.rff, throat.tree, alpha=c(0, 0.5, 1))$unifracs
#' # Distance matrices
#' D.weighted = unifracs[,,"d_1"]
#' D.unweighted = unifracs[,,"d_UW"]
#' # Kernel matrices
#' K.weighted = D2K(D.weighted)
#' K.unweighted = D2K(D.unweighted)
#'
#' if (requireNamespace("vegan")) {
#' library(vegan)
#' D.BC = as.matrix(vegdist(otu.tab.rff, method="bray"))
#' K.BC = D2K(D.BC)
#'}
#'
#' # Simulate phenotype data
#' rho = 0.2
#' Va = matrix(rep(rho, (2*n)^2), 2*n, 2*n)+diag(1-rho, 2*n)
#' Phe = mvrnorm(n, rep(0, 2*n), Va)
#' K.y = Phe %*% t(Phe) # phenotype kernel
#'
#' # Simulate genotype data
#' G = matrix(rbinom(n*10, 2, 0.1), n, 10)
#' K.g = G %*% t(G) # genotype kernel
#'
#'
#' ## Unadjusted analysis (microbiome and phenotype)
#' KRV(y = Phe, kernels.otu = K.weighted, kernel.y = "Gaussian") # numeric y
#' KRV(kernels.otu = K.weighted, kernel.y = K.y) # kernel y
#'
#' ## Adjusted analysis (phenotype only)
#' KRV(kernels.otu = K.weighted, y = Phe, kernel.y = "linear", X = cova, adjust.type = "phenotype")
#'
#' if (requireNamespace("vegan")) {
#' ## Adjusted analysis (adjust both kernels; microbiome and phenotype)
#' KRV(kernels.otu = K.BC, kernel.y = K.y, X = cova, adjust.type='both')
#'
#' ## Adjusted analysis (adjust both kernels; microbiome and genotype)
#' KRV(kernels.otu = K.BC, kernel.y = K.g, X = cova, adjust.type='both')
#' }
#'
#'
#'@export
KRV <- function(y = NULL, X = NULL, adjust.type = NULL, kernels.otu, kernel.y,
omnibus = "kernel_om", returnKRV = FALSE, returnR2 = FALSE){
## Check input
om <- substring(tolower(omnibus), 1, 1)
if (!is.null(adjust.type)) {
adjust.type <- substring(tolower(adjust.type), 1, 1)
if (adjust.type == "p") {
if (!kernel.y %in% c("linear", "Gaussian")) {
stop("Phenotype-only adjustment may only be used with numeric y, not pre-computed phenotype kernel. Try option 'both' instead or double check that you have entered either 'linear' or 'Gaussian' for kernel.y.")
}
}
if (!adjust.type %in% c("p", "b")) {
stop("I don't know that covariate adjustment choice. Please choose 'phenotype' or 'both', or leave this option NULL.")
}
}
if (!is.list(kernels.otu)) {
kernels.otu <- list(kernels.otu)
}
if (!all(unlist(lapply(kernels.otu, FUN = function(k) is.matrix(k))))) {
stop("Please ensure kernels.otu is either a single n x n kernel matrix or a list of n x n kernel matrices.")
}
if (!is.null(X) & !all(X[,1] == 1)) {
X1 <- cbind(1, X)
X <- X1
}
if (is.null(X) & !is.null(adjust.type)) {
warning("X is NULL, so no covariate adjustment will be done.")
adjust.type = NULL
}
if (is.matrix(kernel.y)){
n = nrow(kernels.otu[[1]])
if (!is.null(y)){
warning("When a phenotype kernel is provided, argument \"y\" will be ignored.\n")
}
if (ncol(kernels.otu[[1]]) != n | nrow(kernel.y) != n | ncol(kernel.y) != n){
stop("Kernel matrices need to be n x n, where n is the sample size.\n ")
}
} else if (!is.matrix(kernel.y)){
if (!(kernel.y %in% c("Gaussian", "linear"))){
stop("Please choose kernel.y = \"Gaussian\" or \"linear\", or enter a kernel matrix for \"kernel.y\".\n")
}
if(is.null(y)){
stop("Please enter a phenotype matrix for argument \"y\" or enter a kernel matrix for argument \"kernel.y\".\n")
}
n = NROW(y)
if (!all(unlist(lapply(kernels.otu, FUN = function(x) nrow(x) == n & ncol(x) == n)))) {
stop("Kernel matrix/matrices must be n x n, where n is the sample size. \n ")
}
if (any(is.na(y))){
ids = which(is.na(y))
stop(paste("Missing response for subject(s)", ids, "- please remove before proceeding. \n"))
}
if (!is.null(X)){
if (any(is.na(X))){
stop("NAs in covariates X, please impute or remove subjects with missing covariates values.\n")
}
if(NROW(X)!= NROW(y)) stop("Dimensions of X and y don't match.\n")
}
}
## Actual test
pvals <- c()
if (returnKRV) { KRVs <- c() } else { KRVs = NULL }
if (returnR2) { R2 <- c() } else { R2 = NULL }
for(i in 1:length(kernels.otu)){
res <- inner.KRV(y = y, X = X, adjust.type = adjust.type, kernel.otu=kernels.otu[[i]],
kernel.y = kernel.y, returnKRV = TRUE, returnR2 = TRUE)
pvals[i] <- res$pv
if (returnKRV) { KRVs[i] <- res$KRV }
if (returnR2) { R2[i] <- res$R2 }
}
# Naming the p-values with the Kernel matrix names from Ks
kernel.names <- names(kernels.otu)
names(pvals) <- kernel.names
if (returnKRV) { names(KRVs) <- kernel.names }
if (returnR2) { names(R2) <- kernel.names }
#Omnibus Test
if (length(kernels.otu) > 1) {
if (om == "k") {
K.om <- matrix(0, nrow = nrow(kernels.otu[[1]]), ncol = ncol(kernels.otu[[1]]))
for(i in 1:length(kernels.otu)){
K.om = K.om + kernels.otu[[i]]/tr(kernels.otu[[i]])
}
omnibus_p <- as.numeric(inner.KRV(y = y, X = X, adjust.type = adjust.type,
kernel.otu = K.om, kernel.y = kernel.y)$pv)
} else if (om == "c") {
cauchy.t <- sum(tan((0.5 - pvals)*pi))/length(pvals)
omnibus_p <- 1 - pcauchy(cauchy.t)
} else {
stop("I don't know that omnibus option. Please choose 'kernel_om' or 'Cauchy'.")
}
## Return for multiple kernels
if (is.null(KRVs) & is.null(R2)) {
return(list(p_values = pvals, omnibus_p = omnibus_p))
} else if (is.null(KRVs) & !is.null(R2)) {
return(list(p_values = pvals, omnibus_p = omnibus_p, R2 = R2))
} else if (!is.null(KRVs) & is.null(R2)) {
return(list(p_values = pvals, omnibus_p = omnibus_p, KRV = KRVs))
} else {
return(list(p_values = pvals, omnibus_p = omnibus_p, KRV = KRVs, R2 = R2))
}
}
## Return for single kernels
if (is.null(KRVs) & is.null(R2)) {
return(list(p_values = pvals))
} else if (is.null(KRVs) & !is.null(R2)) {
return(list(p_values = pvals, R2 = R2))
} else if (!is.null(KRVs) & is.null(R2)) {
return(list(p_values = pvals, KRV = KRVs))
} else {
return(list(p_values = pvals, KRV = KRVs, R2 = R2))
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.