content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#sorting corpus based on word frequencies
# need to have word.freqs file loaded
#do two sorts based on term frequency and doc frequency
str(word.freqs)
str(termfreqs)
sorted.wordfreqs.termfreq <- word.freqs[order(-word.freqs$term.freq), ]
write.csv (sorted.wordfreqs.termfreq, file ="sorted.wordfreqs.termfreq.csv",)
sorted.wordfreqs.docfreq <- word.freqs[order(-word.freqs$doc.freq), ]
write.csv (sorted.wordfreqs.docfreq, file ="sorted.wordfreqs.docfreq.csv",)
# both these csv files are available in GitHub-->SOTL project folder
| /scripts/Sortingattempts 7-27-16.R | no_license | Fairlane100/SOTL-project | R | false | false | 534 | r | #sorting corpus based on word frequencies
# need to have word.freqs file loaded
#do two sorts based on term frequency and doc frequency
str(word.freqs)
str(termfreqs)
sorted.wordfreqs.termfreq <- word.freqs[order(-word.freqs$term.freq), ]
write.csv (sorted.wordfreqs.termfreq, file ="sorted.wordfreqs.termfreq.csv",)
sorted.wordfreqs.docfreq <- word.freqs[order(-word.freqs$doc.freq), ]
write.csv (sorted.wordfreqs.docfreq, file ="sorted.wordfreqs.docfreq.csv",)
# both these csv files are available in GitHub-->SOTL project folder
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.1,family="gaussian",standardize=FALSE)
sink('./breast_026.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/breast/breast_026.R | no_license | esbgkannan/QSMART | R | false | false | 350 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.1,family="gaussian",standardize=FALSE)
sink('./breast_026.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/code/Spat_CV_clustered_random.R | no_license | abdelkrim-bsr/SpatialValidation | R | false | false | 23,280 | r | ||
library(AER)
library(foreign)
library(sandwich) # for all sorts of robust standard erros
library(lmtest)
library(dplyr) #install.packages("dplyr") if you do you not have it
download.file("http://wps.pearsoned.co.uk/wps/media/objects/12401/12699039/empirical/empex_tb/Smoking.dta", "data/smoking.dta", mode="wb")
smoking <- read.dta("data/smoking.dta")
head(smoking)
## a
smoking %>% select(smoker) %>% summarise(probsmoking_all = mean(smoker, na.rm = NA)) # how do the %>% work, what does select do, what does summarise/ze do
smoking %>% filter(smkban == 0) %>% select(smoker) %>% summarise(probsmoking_NOsmkban = mean(smoker, na.rm = NA)) # what does filter do
smoking %>% filter(smkban == 1) %>% select(smoker) %>% summarise(probsmoking_WITHsmkban = mean(smoker, na.rm = NA))
## b
lpm1 <- lm(smoker ~ smkban, data = smoking)
summary(lpm1)
coeftest(lpm1, vcovHC(lpm1,"HC1"))
## c
smoking <- smoking %>% mutate(age2 = age^2) # what does mutate do?
# notice here i saved the result of the manipulation back in smoking, before i didn't. that's because before I only wanted to see the result
lpm2 <- lm(smoker ~ smkban + female + age + age2 + hsdrop + hsgrad + colsome + colgrad + black+ hispanic, data = smoking)
summary(lpm2)
coeftest(lpm2, vcovHC(lpm2, "HC1"))
## e
linearHypothesis(lpm2, c("hsdrop=0","hsgrad=0","colsome=0", "colgrad=0"), vcov=vcovHC(lpm2, "HC1"))
## f
new <- data.frame(smkban=0,
female=0,
black=0,
hispanic=0,
hsdrop=0, hsgrad=0, colsome=0, colgrad=1,
age = seq(from=18, to=65, by=1),
age2 = seq(from=18, to=65, by=1)^2)
pred <- predict(lpm2, new)
plot(new$age, pred, type = "l", xlab="Age", ylab="Probability of Smoking", ylim=c(0,0.22))
## g
probit2 <- glm(smoker ~ smkban + female + age + age2 + hsdrop + hsgrad + colsome + colgrad + black+ hispanic, family = binomial(link = "probit"), data = smoking)
summary(probit2)
coeftest(probit2, vcovHC(probit2, "HC1"))
## i
linearHypothesis(probit2, c("hsdrop=0","hsgrad=0","colsome=0", "colgrad=0"), vcov=vcovHC(probit2, "HC1"))
## j
mrA <- data.frame(smkban=c(0,1),
female=0,
black=0,
hispanic=0,
hsdrop=1, hsgrad=0, colsome=0, colgrad=0,
age = 20,
age2 = 20^2)
pred_mrA <- predict(probit2, mrA, type = "response")
pred_mrA
## k
lakisha <- data.frame(smkban=c(0,1),
female=1,
black=1,
hispanic=0,
hsdrop=0, hsgrad=0, colsome=0, colgrad=1,
age = 40,
age2 = 40^2)
pred_lakisha <- predict(probit2, lakisha, type = "response")
pred_lakisha
## l
pred_mrA_lpm <- predict(lpm2, mrA) # does it matter if i do not put type = "response" the lpm? try it with response = TRUE
pred_mrA_lpm
pred_lakisha_lpm <- predict(lpm2, lakisha)
pred_lakisha_lpm
##
predicted <- rbind(pred_mrA, pred_mrA_lpm, pred_lakisha, pred_lakisha_lpm)
colnames(predicted) <- c("w/o smoking ban", "w/ smoking ban")
rownames(predicted) <- c("Allan, probit", "Allan, lpm", "Lakisha, probit", "Lakisha, lpm")
predicted
| /ps6ex1.R | no_license | rossihabibi/Econometrics4Law16 | R | false | false | 3,193 | r |
library(AER)
library(foreign)
library(sandwich) # for all sorts of robust standard erros
library(lmtest)
library(dplyr) #install.packages("dplyr") if you do you not have it
download.file("http://wps.pearsoned.co.uk/wps/media/objects/12401/12699039/empirical/empex_tb/Smoking.dta", "data/smoking.dta", mode="wb")
smoking <- read.dta("data/smoking.dta")
head(smoking)
## a
smoking %>% select(smoker) %>% summarise(probsmoking_all = mean(smoker, na.rm = NA)) # how do the %>% work, what does select do, what does summarise/ze do
smoking %>% filter(smkban == 0) %>% select(smoker) %>% summarise(probsmoking_NOsmkban = mean(smoker, na.rm = NA)) # what does filter do
smoking %>% filter(smkban == 1) %>% select(smoker) %>% summarise(probsmoking_WITHsmkban = mean(smoker, na.rm = NA))
## b
lpm1 <- lm(smoker ~ smkban, data = smoking)
summary(lpm1)
coeftest(lpm1, vcovHC(lpm1,"HC1"))
## c
smoking <- smoking %>% mutate(age2 = age^2) # what does mutate do?
# notice here i saved the result of the manipulation back in smoking, before i didn't. that's because before I only wanted to see the result
lpm2 <- lm(smoker ~ smkban + female + age + age2 + hsdrop + hsgrad + colsome + colgrad + black+ hispanic, data = smoking)
summary(lpm2)
coeftest(lpm2, vcovHC(lpm2, "HC1"))
## e
linearHypothesis(lpm2, c("hsdrop=0","hsgrad=0","colsome=0", "colgrad=0"), vcov=vcovHC(lpm2, "HC1"))
## f
new <- data.frame(smkban=0,
female=0,
black=0,
hispanic=0,
hsdrop=0, hsgrad=0, colsome=0, colgrad=1,
age = seq(from=18, to=65, by=1),
age2 = seq(from=18, to=65, by=1)^2)
pred <- predict(lpm2, new)
plot(new$age, pred, type = "l", xlab="Age", ylab="Probability of Smoking", ylim=c(0,0.22))
## g
probit2 <- glm(smoker ~ smkban + female + age + age2 + hsdrop + hsgrad + colsome + colgrad + black+ hispanic, family = binomial(link = "probit"), data = smoking)
summary(probit2)
coeftest(probit2, vcovHC(probit2, "HC1"))
## i
linearHypothesis(probit2, c("hsdrop=0","hsgrad=0","colsome=0", "colgrad=0"), vcov=vcovHC(probit2, "HC1"))
## j
mrA <- data.frame(smkban=c(0,1),
female=0,
black=0,
hispanic=0,
hsdrop=1, hsgrad=0, colsome=0, colgrad=0,
age = 20,
age2 = 20^2)
pred_mrA <- predict(probit2, mrA, type = "response")
pred_mrA
## k
lakisha <- data.frame(smkban=c(0,1),
female=1,
black=1,
hispanic=0,
hsdrop=0, hsgrad=0, colsome=0, colgrad=1,
age = 40,
age2 = 40^2)
pred_lakisha <- predict(probit2, lakisha, type = "response")
pred_lakisha
## l
pred_mrA_lpm <- predict(lpm2, mrA) # does it matter if i do not put type = "response" the lpm? try it with response = TRUE
pred_mrA_lpm
pred_lakisha_lpm <- predict(lpm2, lakisha)
pred_lakisha_lpm
##
predicted <- rbind(pred_mrA, pred_mrA_lpm, pred_lakisha, pred_lakisha_lpm)
colnames(predicted) <- c("w/o smoking ban", "w/ smoking ban")
rownames(predicted) <- c("Allan, probit", "Allan, lpm", "Lakisha, probit", "Lakisha, lpm")
predicted
|
####lib####
library(dplyr) #yes!
library(nortest)
library(effsize)
library(ggplot2) #yes!
library(tidyverse)
library(hrbrthemes)
library(car)
library(rstatix)
library(ggpubr)
library(ggstatsplot)
library(grid) #yes!
library(gridExtra)
library(pwr)
library(psych)
library(ggstatsplot)
#====calculate_accuracy_ratio()=================================================
#Function calculate_accuracy_ratio(data_frame, variable, condition)
# Calculates ratio of correct answers for specific variable and experimental
# condition for each subject seperately
# Arguments:
# data_frame: specify dataframe, (MyData, ..., ...)
# variable: specify variable for ratio (..., "ReccencyACC", ...)
# condition: specify experimental condition (..., ..., "SameContext")
calculate_accuracy_ratio <- function(data_frame, variable, condition) {
Data_condition <- subset.data.frame(data_frame, Condition == condition)
Subject_calculate_accuracy_ratio <-
Data_condition %>%
group_by(Subject) %>%
summarise_at(vars(variable),
list(ratio = mean))
Subject_calculate_accuracy_ratio$Condition <- condition
Subject_calculate_accuracy_ratio$Condition <- as.factor(Subject_calculate_accuracy_ratio$Condition)
return(Subject_calculate_accuracy_ratio)
}
#====calculate_accuracy_ratio_all=================================
calculate_accuracy_ratio_all <- function(data_frame, variable, grouping_var) {
Subject_calculate_accuracy_ratio <-
# dplyr::group_by(grouping_var) %>%
group_by(grouping_var) %>%
summarise_at(vars(variable),
list(ratio = mean))
return(Subject_calculate_accuracy_ratio)
}
calculate_accuracy_ratio_block <- function(data_frame, variable, block) {
Data_block <- subset.data.frame(data_frame, Block == block)
Subject_calculate_accuracy_ratio <-
Data_block %>%
group_by(Subject) %>%
summarise_at(vars(variable),
list(ratio = mean))
Subject_calculate_accuracy_ratio$Block <- block
# Subject_calculate_accuracy_ratio$Block <- as.factor(Subject_calculate_accuracy_ratio$block)
return(Subject_calculate_accuracy_ratio)
}
#====calculate_accuracy_ratio_block()==========================================
#====create_MyAllData()=========================================================
# Function create_MyAllData() takes the input dataframe,
# calculates ratio,
# and creates the output dataframe with 3 variables
create_MyAllData <- function(AllData) {
MyAllData <- data.frame(matrix(ncol = 3, nrow = 0))
colnames(MyAllData) <- c(Subject, "ratio", GrVariable)
for (i in 1:length(ExpConditions)) {
tmp <- calculate_accuracy_ratio(AllData, MyVariable, ExpConditions[i])
MyAllData <- rbind(MyAllData, tmp)
}
return(MyAllData)
}
#====give_lillie_p()============================================================
# Function give_lillie_p() returns the p-value for the Lilliefors normality test
# Arguments:
# - variable to test for the normal distribution.(variable_to_test)
give_lillie_p <- function(variable_to_test) {
lillie_test <- lillie.test(variable_to_test)
lillie_p <- lillie_test$p.value
return(lillie_p)
}
#====norm_dist_check_color()====================================================
# Function norm_dist_check_color():
# - performs the Lilliefors test on the indicated variable.
# - gives a color depending if distribution is normal or not:
# - NORMAL distribution - green - "darkseagreen1"
# - NOT NORMAL distribution - pink - "darksalmon"
# Arguments:
# - variable to test for the normal distribution.(variable_to_test)
norm_dist_check_color <- function(variable_to_test) {
lillie_test <- lillie.test(variable_to_test)
if (lillie_test$p.value > 0.05) {
col <- "darkseagreen1"
} else {
col <- "darksalmon"
}
}
#====draw_a_hist()==============================================================
# Function draw_a_hist(hist_data, bw, which_condition, x_label, y_limit)
# - creates a histogram
# - bar colour depands on normal distribution
# - green: normal
# - pink: NOT normal
# - Title:
# - experimental condition
# - n
# - Lillefors p-value
# - mean
# Arguments:
# - dataframe - with calculated ratios (hist_data, ..., ..., ..., ...)
# - bin width (..., bw, ..., ..., ...)
# - specify the name of experimental condition (..., ..., "First", ..., ...)
# - the label for the x-axis (..., ..., ..., x_label, ...)
# - limit (hight) of the y-axis (..., ..., ..., ..., y_limit)
draw_a_hist <- function(hist_data, bw, which_condition, x_label, y_limit) {
# hist <- hist_data %>%
hist_data %>%
ggplot(aes(x=ratio), stat="count") +
ylim(y_limit) +
xlab(x_label) + ylab("Count") +
geom_histogram(binwidth=bw,
fill=norm_dist_check_color(hist_data$ratio),
color="black", alpha=0.5) +
geom_vline(aes(xintercept=mean(ratio)),
color="black", linetype="dashed", size=1) +
# geom_text(aes(x=(mean(hist_data$ratio)+0.1), y=6.8,
# label=paste("Mean:\n", round(mean(hist_data$ratio),2)))) +
ggtitle(paste("Condition: ", which_condition,
"\nn: ", nrow(hist_data),
"\nLilliefors normality test p-value: ",
round(give_lillie_p(hist_data$ratio), 4),
"\nMean: ", round(mean(hist_data$ratio),4),
sep = "")) +
theme(plot.title = element_text(size = 8, face = "bold"))
}
#====draw_my_boxplot()==========================================================
draw_my_boxplot <-function(box_data, y_label, boxpl_titel) {
head(box_data)
ggplot(box_data,
aes(x = Condition, y = ratio, fill = Condition)) +
geom_boxplot(notch = F,
outlier.shape=16, outlier.color = "red", outlier.size = 2) +
stat_summary(
aes(label = round(stat(y), 1)),
geom = "text",
fun.y = function(y) { o <- boxplot.stats(y)$out; if(length(o) == 0) NA else o },
hjust = -1) +
scale_fill_brewer(palette="Set2") +
geom_jitter(shape=16, position=position_jitter(0.2), alpha=0.4) +
xlab("Condition") + ylab(y_label) +
labs() +
ggtitle(boxpl_titel)
}
# Boxplot - violinplot
# https://indrajeetpatil.github.io/ggstatsplot/articles/web_only/ggbetweenstats.html
draw_my_boxplot2 <-function(box_data, box_ylabel, boxpl_titel) {
head(box_data)
ggbetweenstats(data = box_data,
plot.type = "box",
x = Condition,
y = ratio,
ylab = box_ylabel,
outlier.tagging = TRUE,
outlier.label = Subject,
package = "yarrr",
palette = "basel",
title = boxpl_titel)
}
draw_my_boxplot3 <-function(box_data, y_label, boxpl_titel) {
head(box_data)
ggbetweenstats(data = box_data,
plot.type = "box",
x = Condition,
y = ratio,
ylab = box_ylabel,
outlier.tagging = F,
# outlier.label = Subject,
package = "yarrr",
palette = "basel",
title = box_titel)
}
#====check_name_outliers()======================================================
# check_name_outliers <- function(data_to_check) {
# outliers <- data_to_check %>%
# group_by(Condition) %>%
# identify_outliers(ratio)
#
# outliers_subjects <- unique(c(outliers$Subject))
# are_any_outliers <- !is.null(outliers_subjects)
# if (are_any_outliers == F) {
# outliers_message <- "No outliers found"
# } else {
# outliers_message <- paste("OUTLIERS: Removed ", length(outliers_subjects), " subject(s) from the data. Subject(s) numbers: ", outliers_subjects, sep="")
# }
# return(c(are_any_outliers, length(outliers_subjects), outliers_subjects, outliers_message))
# }
check_name_outliers <- function(data_to_check) {
outliers <- data_to_check %>%
group_by(Condition) %>%
identify_outliers(ratio)
outliers_subjects <- unique(c(outliers$Subject))
return(outliers_subjects)
}
#=====are_any_outliers <- !is.null(outliers_subjects)
are_any_outliers <- function(outliers_subjects){
!is.null(outliers_subjects)
}
#====remove_outliers()=========================================================
remove_outliers <- function(data_to_check, list_of_outliers) {
for(i in 1:length(list_of_outliers)) {
data_to_check <- subset(data_to_check, Subject != (list_of_outliers[i]))}
return((data_to_check))
}
#====name_subjects_del()======================
name_subjects_del <- function(data_frame, subject_variable, variable, value) {
Subject_calculate_accuracy_ratio <-
data_frame %>%
group_by(Subject) %>%
summarise_at(vars(variable),
list(ratio = mean))
}
#====split_time_series()========================================================
split_time_series <- function(dat, variab, var_names, conditions) {
dati <- data.frame(matrix(ncol = length(var_names), nrow =nrow(dat)))
dato <- data.frame(matrix(ncol = length(var_names), nrow =0))
for (i in 1:length(variab)) {
# var_names <- c("Subject", "Experiment", "ratio", "Condition")
funny_var <- variab[i]
# print(funny_var)
dati[1] <- dat$Subject
dati[2] <- dat$Experiment
index_i <- which(names(dat) == funny_var)
varaible_to_paste <- as.vector(dat[index_i])
# colnames(varaible_to_paste) <- c("ratio")
# colnames(dati) <- var_names
dati[3] <- varaible_to_paste
dati[4] <- var_names[i]
colnames(dati) <- conditions
dato <- rbind(dato, dati)
print(names(dati))
}
# dato <- subset(dato, dato$ratio != ".")
return(dato)
}
#====Z netu zapisywanie tabelek=================================================
get.values<-function(x){
require(psych)
info<-describeBy(x[,2:5], group = x[,1])
n.companies<-length(levels(df[,1]))
n<-list()
mean<-list()
sd<-list()
min<-list()
max<-list()
for(i in 1:n.companies){
n[[i]]<-info[[i]][,2]
mean[[i]]<-info[[i]][,3]
sd[[i]]<-info[[i]][,4]
min[[i]]<-info[[i]][,8]
max[[i]]<-info[[i]][,9]
}
l<-Map(c, mean, sd, min, max, n)
valuedf<-do.call(rbind, l)
return(valuedf)
}
get.names<-function(x){
require(psych)
names<-rownames(describe(x[,2:5]))
avg<-character()
sd<-character()
min<-character()
max<-character()
total<-character()
for(i in 1:length(names)){
avg[i]<-paste("average number of", names[i])
sd[i]<-paste("standard deviation of", names[i])
min[i]<-paste("min number of", names[i])
max[i]<-paste("max number of", names[i])
total[i]<-paste("total number of", names[i])
}
cnames<-c(avg,sd,min,max,total)
return(cnames)
}
#====name_subjects_del()
# Function returns the vector with subject IDs for subjects that:
# have the mean performance in all conditions below given value
name_subjects_del <- function(data, subject_id, cut_point){
subjects_to_del <- c()
for(i in 1:length(unique(c(subject_id)))) {
tmp <- data %>%
filter(Subject == subject_id[i])
ratio_tmp <- mean(tmp$ratio)
if(ratio_tmp < cut_point) {
subjects_to_del <- append(subjects_to_del, subject_id[i])
}
}
return(subjects_to_del)
}
#====name_subjects_del_all_blocks()
# Function returns the vector with subject IDs for subjects that:
# have the mean performance in all conditions below given value
name_subjects_del <- function(data, subject_id, condition, cut_point){
cond <- unique(c(condition))
subjects <- unique(c(subject_id))
subjects_to_del <- tibble(Subject,
Condition,
counter)
for(e in 1:lenght(cond)){
tmp_tibble <- tibble(Subject=subjects,
Condition=cond[e],
counter=0)
for(i in 1:length(subjects)) {
tmp <- data %>%
filter(Subject == subjects[i])
ratio_tmp <- mean(tmp$ratio)
if(ratio_tmp < cut_point) {
subjects_to_del[subjects_to_del$Subject == subjects[i], "counter"] <-
subjects_to_del$counter[i] +1
}
subjects_to_del <- rbind(subjects_to_del, tmp_tibble)
}
}
return(subjects_to_del)
} | /Functions_.R | no_license | rilakkuma-ali/Event-Segmentation | R | false | false | 12,301 | r | ####lib####
library(dplyr) #yes!
library(nortest)
library(effsize)
library(ggplot2) #yes!
library(tidyverse)
library(hrbrthemes)
library(car)
library(rstatix)
library(ggpubr)
library(ggstatsplot)
library(grid) #yes!
library(gridExtra)
library(pwr)
library(psych)
library(ggstatsplot)
#====calculate_accuracy_ratio()=================================================
#Function calculate_accuracy_ratio(data_frame, variable, condition)
# Calculates ratio of correct answers for specific variable and experimental
# condition for each subject seperately
# Arguments:
# data_frame: specify dataframe, (MyData, ..., ...)
# variable: specify variable for ratio (..., "ReccencyACC", ...)
# condition: specify experimental condition (..., ..., "SameContext")
calculate_accuracy_ratio <- function(data_frame, variable, condition) {
Data_condition <- subset.data.frame(data_frame, Condition == condition)
Subject_calculate_accuracy_ratio <-
Data_condition %>%
group_by(Subject) %>%
summarise_at(vars(variable),
list(ratio = mean))
Subject_calculate_accuracy_ratio$Condition <- condition
Subject_calculate_accuracy_ratio$Condition <- as.factor(Subject_calculate_accuracy_ratio$Condition)
return(Subject_calculate_accuracy_ratio)
}
#====calculate_accuracy_ratio_all=================================
calculate_accuracy_ratio_all <- function(data_frame, variable, grouping_var) {
Subject_calculate_accuracy_ratio <-
# dplyr::group_by(grouping_var) %>%
group_by(grouping_var) %>%
summarise_at(vars(variable),
list(ratio = mean))
return(Subject_calculate_accuracy_ratio)
}
calculate_accuracy_ratio_block <- function(data_frame, variable, block) {
Data_block <- subset.data.frame(data_frame, Block == block)
Subject_calculate_accuracy_ratio <-
Data_block %>%
group_by(Subject) %>%
summarise_at(vars(variable),
list(ratio = mean))
Subject_calculate_accuracy_ratio$Block <- block
# Subject_calculate_accuracy_ratio$Block <- as.factor(Subject_calculate_accuracy_ratio$block)
return(Subject_calculate_accuracy_ratio)
}
#====calculate_accuracy_ratio_block()==========================================
#====create_MyAllData()=========================================================
# Function create_MyAllData() takes the input dataframe,
# calculates ratio,
# and creates the output dataframe with 3 variables
create_MyAllData <- function(AllData) {
MyAllData <- data.frame(matrix(ncol = 3, nrow = 0))
colnames(MyAllData) <- c(Subject, "ratio", GrVariable)
for (i in 1:length(ExpConditions)) {
tmp <- calculate_accuracy_ratio(AllData, MyVariable, ExpConditions[i])
MyAllData <- rbind(MyAllData, tmp)
}
return(MyAllData)
}
#====give_lillie_p()============================================================
# Function give_lillie_p() returns the p-value for the Lilliefors normality test
# Arguments:
# - variable to test for the normal distribution.(variable_to_test)
give_lillie_p <- function(variable_to_test) {
lillie_test <- lillie.test(variable_to_test)
lillie_p <- lillie_test$p.value
return(lillie_p)
}
#====norm_dist_check_color()====================================================
# Function norm_dist_check_color():
# - performs the Lilliefors test on the indicated variable.
# - gives a color depending if distribution is normal or not:
# - NORMAL distribution - green - "darkseagreen1"
# - NOT NORMAL distribution - pink - "darksalmon"
# Arguments:
# - variable to test for the normal distribution.(variable_to_test)
norm_dist_check_color <- function(variable_to_test) {
lillie_test <- lillie.test(variable_to_test)
if (lillie_test$p.value > 0.05) {
col <- "darkseagreen1"
} else {
col <- "darksalmon"
}
}
#====draw_a_hist()==============================================================
# Function draw_a_hist(hist_data, bw, which_condition, x_label, y_limit)
# - creates a histogram
# - bar colour depands on normal distribution
# - green: normal
# - pink: NOT normal
# - Title:
# - experimental condition
# - n
# - Lillefors p-value
# - mean
# Arguments:
# - dataframe - with calculated ratios (hist_data, ..., ..., ..., ...)
# - bin width (..., bw, ..., ..., ...)
# - specify the name of experimental condition (..., ..., "First", ..., ...)
# - the label for the x-axis (..., ..., ..., x_label, ...)
# - limit (hight) of the y-axis (..., ..., ..., ..., y_limit)
draw_a_hist <- function(hist_data, bw, which_condition, x_label, y_limit) {
# hist <- hist_data %>%
hist_data %>%
ggplot(aes(x=ratio), stat="count") +
ylim(y_limit) +
xlab(x_label) + ylab("Count") +
geom_histogram(binwidth=bw,
fill=norm_dist_check_color(hist_data$ratio),
color="black", alpha=0.5) +
geom_vline(aes(xintercept=mean(ratio)),
color="black", linetype="dashed", size=1) +
# geom_text(aes(x=(mean(hist_data$ratio)+0.1), y=6.8,
# label=paste("Mean:\n", round(mean(hist_data$ratio),2)))) +
ggtitle(paste("Condition: ", which_condition,
"\nn: ", nrow(hist_data),
"\nLilliefors normality test p-value: ",
round(give_lillie_p(hist_data$ratio), 4),
"\nMean: ", round(mean(hist_data$ratio),4),
sep = "")) +
theme(plot.title = element_text(size = 8, face = "bold"))
}
#====draw_my_boxplot()==========================================================
draw_my_boxplot <-function(box_data, y_label, boxpl_titel) {
head(box_data)
ggplot(box_data,
aes(x = Condition, y = ratio, fill = Condition)) +
geom_boxplot(notch = F,
outlier.shape=16, outlier.color = "red", outlier.size = 2) +
stat_summary(
aes(label = round(stat(y), 1)),
geom = "text",
fun.y = function(y) { o <- boxplot.stats(y)$out; if(length(o) == 0) NA else o },
hjust = -1) +
scale_fill_brewer(palette="Set2") +
geom_jitter(shape=16, position=position_jitter(0.2), alpha=0.4) +
xlab("Condition") + ylab(y_label) +
labs() +
ggtitle(boxpl_titel)
}
# Boxplot - violinplot
# https://indrajeetpatil.github.io/ggstatsplot/articles/web_only/ggbetweenstats.html
draw_my_boxplot2 <-function(box_data, box_ylabel, boxpl_titel) {
head(box_data)
ggbetweenstats(data = box_data,
plot.type = "box",
x = Condition,
y = ratio,
ylab = box_ylabel,
outlier.tagging = TRUE,
outlier.label = Subject,
package = "yarrr",
palette = "basel",
title = boxpl_titel)
}
draw_my_boxplot3 <-function(box_data, y_label, boxpl_titel) {
head(box_data)
ggbetweenstats(data = box_data,
plot.type = "box",
x = Condition,
y = ratio,
ylab = box_ylabel,
outlier.tagging = F,
# outlier.label = Subject,
package = "yarrr",
palette = "basel",
title = box_titel)
}
#====check_name_outliers()======================================================
# check_name_outliers <- function(data_to_check) {
# outliers <- data_to_check %>%
# group_by(Condition) %>%
# identify_outliers(ratio)
#
# outliers_subjects <- unique(c(outliers$Subject))
# are_any_outliers <- !is.null(outliers_subjects)
# if (are_any_outliers == F) {
# outliers_message <- "No outliers found"
# } else {
# outliers_message <- paste("OUTLIERS: Removed ", length(outliers_subjects), " subject(s) from the data. Subject(s) numbers: ", outliers_subjects, sep="")
# }
# return(c(are_any_outliers, length(outliers_subjects), outliers_subjects, outliers_message))
# }
check_name_outliers <- function(data_to_check) {
outliers <- data_to_check %>%
group_by(Condition) %>%
identify_outliers(ratio)
outliers_subjects <- unique(c(outliers$Subject))
return(outliers_subjects)
}
#=====are_any_outliers <- !is.null(outliers_subjects)
are_any_outliers <- function(outliers_subjects){
!is.null(outliers_subjects)
}
#====remove_outliers()=========================================================
remove_outliers <- function(data_to_check, list_of_outliers) {
for(i in 1:length(list_of_outliers)) {
data_to_check <- subset(data_to_check, Subject != (list_of_outliers[i]))}
return((data_to_check))
}
#====name_subjects_del()======================
name_subjects_del <- function(data_frame, subject_variable, variable, value) {
Subject_calculate_accuracy_ratio <-
data_frame %>%
group_by(Subject) %>%
summarise_at(vars(variable),
list(ratio = mean))
}
#====split_time_series()========================================================
split_time_series <- function(dat, variab, var_names, conditions) {
dati <- data.frame(matrix(ncol = length(var_names), nrow =nrow(dat)))
dato <- data.frame(matrix(ncol = length(var_names), nrow =0))
for (i in 1:length(variab)) {
# var_names <- c("Subject", "Experiment", "ratio", "Condition")
funny_var <- variab[i]
# print(funny_var)
dati[1] <- dat$Subject
dati[2] <- dat$Experiment
index_i <- which(names(dat) == funny_var)
varaible_to_paste <- as.vector(dat[index_i])
# colnames(varaible_to_paste) <- c("ratio")
# colnames(dati) <- var_names
dati[3] <- varaible_to_paste
dati[4] <- var_names[i]
colnames(dati) <- conditions
dato <- rbind(dato, dati)
print(names(dati))
}
# dato <- subset(dato, dato$ratio != ".")
return(dato)
}
#====Z netu zapisywanie tabelek=================================================
get.values<-function(x){
require(psych)
info<-describeBy(x[,2:5], group = x[,1])
n.companies<-length(levels(df[,1]))
n<-list()
mean<-list()
sd<-list()
min<-list()
max<-list()
for(i in 1:n.companies){
n[[i]]<-info[[i]][,2]
mean[[i]]<-info[[i]][,3]
sd[[i]]<-info[[i]][,4]
min[[i]]<-info[[i]][,8]
max[[i]]<-info[[i]][,9]
}
l<-Map(c, mean, sd, min, max, n)
valuedf<-do.call(rbind, l)
return(valuedf)
}
get.names<-function(x){
require(psych)
names<-rownames(describe(x[,2:5]))
avg<-character()
sd<-character()
min<-character()
max<-character()
total<-character()
for(i in 1:length(names)){
avg[i]<-paste("average number of", names[i])
sd[i]<-paste("standard deviation of", names[i])
min[i]<-paste("min number of", names[i])
max[i]<-paste("max number of", names[i])
total[i]<-paste("total number of", names[i])
}
cnames<-c(avg,sd,min,max,total)
return(cnames)
}
#====name_subjects_del()
# Function returns the vector with subject IDs for subjects that:
# have the mean performance in all conditions below given value
name_subjects_del <- function(data, subject_id, cut_point){
subjects_to_del <- c()
for(i in 1:length(unique(c(subject_id)))) {
tmp <- data %>%
filter(Subject == subject_id[i])
ratio_tmp <- mean(tmp$ratio)
if(ratio_tmp < cut_point) {
subjects_to_del <- append(subjects_to_del, subject_id[i])
}
}
return(subjects_to_del)
}
#====name_subjects_del_all_blocks()
# Function returns the vector with subject IDs for subjects that:
# have the mean performance in all conditions below given value
name_subjects_del <- function(data, subject_id, condition, cut_point){
cond <- unique(c(condition))
subjects <- unique(c(subject_id))
subjects_to_del <- tibble(Subject,
Condition,
counter)
for(e in 1:lenght(cond)){
tmp_tibble <- tibble(Subject=subjects,
Condition=cond[e],
counter=0)
for(i in 1:length(subjects)) {
tmp <- data %>%
filter(Subject == subjects[i])
ratio_tmp <- mean(tmp$ratio)
if(ratio_tmp < cut_point) {
subjects_to_del[subjects_to_del$Subject == subjects[i], "counter"] <-
subjects_to_del$counter[i] +1
}
subjects_to_del <- rbind(subjects_to_del, tmp_tibble)
}
}
return(subjects_to_del)
} |
\name{LowLand}
\alias{LowLand}
\title{LowLand}
\description{
LowLand
}
\usage{
LowLand(BestMatchingUnits, GeneralizedUmatrix, Data, Cls, Key, LowLimit)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{BestMatchingUnits}{[1:n,1:n,1:n] BestMatchingUnits =[BMkey, BMLineCoords, BMColCoords]}
\item{GeneralizedUmatrix}{[1:l,1:c] U-Matrix heights in Matrix form}
\item{Data}{[1:n,1:d] data cases in lines, variables in Columns or [] or 0}
\item{Cls}{[1:n] a possible classification of the data or [] or 0}
\item{Key}{[1:n] the keys of the data or [] or 0}
\item{LowLimit}{GeneralizedUmatrix heights up to this are considered to lie in
the low lands default: LowLimit = prctile(Uheights,80) nur die 80# tiefsten}
}
\value{
\item{LowLandBM}{the unique BestMatchingUnits in the low lands of an u-Matrix}
\item{LowLandInd}{index such that UniqueBM = BestMatchingUnits(UniqueInd,]}
\item{LowLandData}{Data reduced to LowLand: LowLandData = Data(LowLandInd,]}
\item{LowLandCls}{Cls reduced to LowLand: LowLandCls = Cls(LowLandInd)}
\item{LowLandKey}{Key reduced to LowLand: LowLandKey = Key(LowLandInd)}
}
\author{
ALU 2021 in matlab, MCT reimplemented in R
}
| /man/LowLand.Rd | no_license | cran/GeneralizedUmatrix | R | false | false | 1,215 | rd | \name{LowLand}
\alias{LowLand}
\title{LowLand}
\description{
LowLand
}
\usage{
LowLand(BestMatchingUnits, GeneralizedUmatrix, Data, Cls, Key, LowLimit)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{BestMatchingUnits}{[1:n,1:n,1:n] BestMatchingUnits =[BMkey, BMLineCoords, BMColCoords]}
\item{GeneralizedUmatrix}{[1:l,1:c] U-Matrix heights in Matrix form}
\item{Data}{[1:n,1:d] data cases in lines, variables in Columns or [] or 0}
\item{Cls}{[1:n] a possible classification of the data or [] or 0}
\item{Key}{[1:n] the keys of the data or [] or 0}
\item{LowLimit}{GeneralizedUmatrix heights up to this are considered to lie in
the low lands default: LowLimit = prctile(Uheights,80) nur die 80# tiefsten}
}
\value{
\item{LowLandBM}{the unique BestMatchingUnits in the low lands of an u-Matrix}
\item{LowLandInd}{index such that UniqueBM = BestMatchingUnits(UniqueInd,]}
\item{LowLandData}{Data reduced to LowLand: LowLandData = Data(LowLandInd,]}
\item{LowLandCls}{Cls reduced to LowLand: LowLandCls = Cls(LowLandInd)}
\item{LowLandKey}{Key reduced to LowLand: LowLandKey = Key(LowLandInd)}
}
\author{
ALU 2021 in matlab, MCT reimplemented in R
}
|
library(patchwork)
candidate_name <- c("Andrew Yang")
# Gets Candidate's Scripts ------------------------------------------------
source(here::here("R","candidate_scripts.R"))
file_name <- format_filename(candidate_name)
candidate_transcripts <-
get_candidate_transcripts(candidate_name, doc_col = TRUE)
candidate_token <-
tokenize_transcripts(candidate_transcripts, rm_stop = TRUE, rm_num = TRUE)
# Descriptive Statistics --------------------------------------------------
# Creates a df with count of words per document as well as speaking time
# Nonunique words with dropped stopwords
words_per_document <-
candidate_token %>%
count(document, speaking_time_seconds)
#Creats a dot plot of words by speaking time
words_per_time_plot <-
ggplot(words_per_document, aes(x = speaking_time_seconds, y = n)) +
geom_point() +
geom_smooth() +
labs(x = "Speaking Time in Seconds",
y = "Words",
title = "Words per Seconds of Speaking Time")
#Creates a distribution plot of the number of words per document
distribution_of_words <- ggplot(words_per_document, aes(x = n)) +
geom_histogram(aes(y = ..density..), # Histogram with density instead of count on y-axis
binwidth = 1) +
geom_density(alpha = .2, fill = "#FF6666") +
labs(x = "Number of Words",
y = "Density",
title = "Number of words per Document")
# Creates a boxplot with the documents and words
boxplot_amt_words <-
ggplot(words_per_document, aes(x = document, y = n)) +
geom_boxplot() +
geom_jitter(alpha = .5) +
labs(x = "Documents",
y = "# of Words",
title = "Amount of words per document")
#Creates a plot with all three graphs
tri_plot <-
boxplot_amt_words + distribution_of_words + words_per_time_plot +
plot_annotation(title = "Descriptive Statistics of Documents",
subtitle = paste("Candidate:",candidate_name, "| Cut off time:", cut_off_time))
# Save's image
ggsave(here::here("output","graphs","candidates",paste(file_name),"descriptive_stats",paste(cut_off_time,"descriptivestats",paste(file_name),".png", sep = "_")),
plot = tri_plot, dpi = 1000, width = 8, height =6)
tri_plot
nrow(words_per_document)
| /R/descriptivestats_documents.R | no_license | wesley4546/DebateTranscripts2020 | R | false | false | 2,203 | r | library(patchwork)
candidate_name <- c("Andrew Yang")
# Gets Candidate's Scripts ------------------------------------------------
source(here::here("R","candidate_scripts.R"))
file_name <- format_filename(candidate_name)
candidate_transcripts <-
get_candidate_transcripts(candidate_name, doc_col = TRUE)
candidate_token <-
tokenize_transcripts(candidate_transcripts, rm_stop = TRUE, rm_num = TRUE)
# Descriptive Statistics --------------------------------------------------
# Creates a df with count of words per document as well as speaking time
# Nonunique words with dropped stopwords
words_per_document <-
candidate_token %>%
count(document, speaking_time_seconds)
#Creats a dot plot of words by speaking time
words_per_time_plot <-
ggplot(words_per_document, aes(x = speaking_time_seconds, y = n)) +
geom_point() +
geom_smooth() +
labs(x = "Speaking Time in Seconds",
y = "Words",
title = "Words per Seconds of Speaking Time")
#Creates a distribution plot of the number of words per document
distribution_of_words <- ggplot(words_per_document, aes(x = n)) +
geom_histogram(aes(y = ..density..), # Histogram with density instead of count on y-axis
binwidth = 1) +
geom_density(alpha = .2, fill = "#FF6666") +
labs(x = "Number of Words",
y = "Density",
title = "Number of words per Document")
# Creates a boxplot with the documents and words
boxplot_amt_words <-
ggplot(words_per_document, aes(x = document, y = n)) +
geom_boxplot() +
geom_jitter(alpha = .5) +
labs(x = "Documents",
y = "# of Words",
title = "Amount of words per document")
#Creates a plot with all three graphs
tri_plot <-
boxplot_amt_words + distribution_of_words + words_per_time_plot +
plot_annotation(title = "Descriptive Statistics of Documents",
subtitle = paste("Candidate:",candidate_name, "| Cut off time:", cut_off_time))
# Save's image
ggsave(here::here("output","graphs","candidates",paste(file_name),"descriptive_stats",paste(cut_off_time,"descriptivestats",paste(file_name),".png", sep = "_")),
plot = tri_plot, dpi = 1000, width = 8, height =6)
tri_plot
nrow(words_per_document)
|
library(ape)
testtree <- read.tree("3667_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3667_0_unrooted.txt") | /codeml_files/newick_trees_processed/3667_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("3667_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3667_0_unrooted.txt") |
# Descriptive statistics
library(ggplot)
library(tidyverse)
library(gridExtra)
full_data <- readRDS("data/full_data.rds")
full_data$race <- factor(full_data$race,levels=c("White","Black","Hispanic","Other"))
full_data$black <- ifelse(full_data$race=="Black",1,0)
full_data$west <- ifelse(full_data$region=="West",1,0)
full_data$examiner_id_factor <- relevel(factor(full_data$examiner_id,levels=as.character(c(3007,c(3001:3012)[-7]))),ref="3007")
# use this if you want the data that was actually used to fit the model
used_data<-model.frame(health_model_examiner_intgender)
# Health status
phealth <- full_data %>% filter(!is.na(health_status)) %>%
count(health_status) %>%
ggplot + geom_bar(aes(x = health_status,y=n),stat="identity")+
scale_x_continuous(breaks=1:5,labels = c("Excellent","Very good","Good","Fair","Poor")) +
xlab("Health status (five-point)") +
ylab("Count") +
geom_label(aes(x = health_status,y=n,label=n))
phealth2 <- full_data %>% filter(!is.na(health_status)) %>%
count(health_status2) %>%
ggplot + geom_bar(aes(x = health_status2,y=n),stat="identity") +
scale_x_continuous(breaks = 0:1,labels = c("Very good or worse", "Excellent")) +
xlab("Health status (dichotomized)") +
ylab("Count") +
geom_label(aes(x = health_status2,y=n,label=n))
grid.arrange(phealth,phealth2,ncol=2,top ="Health Status among NHANES III Adults") %>% ggsave(file="charts/health_status_counts.png",width=7,height=3)
# Caseloads
pexaminers <- full_data %>% filter(!is.na(examiner_id_factor)) %>%
count(examiner_id_factor) %>%
ggplot + geom_bar(aes(x = examiner_id_factor,y=n), stat="identity") +
ylab("Count") +
geom_label(aes(x =examiner_id_factor, y=n, label=n)) +
xlab("Physician ID") +
ggtitle("Adult Caseloads for Examining Physicians")
pexaminers %>% ggsave(file="charts/examiner_distribution.png",width=7,height=3)
| /descriptives.R | no_license | sofisinozich/SURV617_Final_Project | R | false | false | 1,872 | r | # Descriptive statistics
library(ggplot)
library(tidyverse)
library(gridExtra)
full_data <- readRDS("data/full_data.rds")
full_data$race <- factor(full_data$race,levels=c("White","Black","Hispanic","Other"))
full_data$black <- ifelse(full_data$race=="Black",1,0)
full_data$west <- ifelse(full_data$region=="West",1,0)
full_data$examiner_id_factor <- relevel(factor(full_data$examiner_id,levels=as.character(c(3007,c(3001:3012)[-7]))),ref="3007")
# use this if you want the data that was actually used to fit the model
used_data<-model.frame(health_model_examiner_intgender)
# Health status
phealth <- full_data %>% filter(!is.na(health_status)) %>%
count(health_status) %>%
ggplot + geom_bar(aes(x = health_status,y=n),stat="identity")+
scale_x_continuous(breaks=1:5,labels = c("Excellent","Very good","Good","Fair","Poor")) +
xlab("Health status (five-point)") +
ylab("Count") +
geom_label(aes(x = health_status,y=n,label=n))
phealth2 <- full_data %>% filter(!is.na(health_status)) %>%
count(health_status2) %>%
ggplot + geom_bar(aes(x = health_status2,y=n),stat="identity") +
scale_x_continuous(breaks = 0:1,labels = c("Very good or worse", "Excellent")) +
xlab("Health status (dichotomized)") +
ylab("Count") +
geom_label(aes(x = health_status2,y=n,label=n))
grid.arrange(phealth,phealth2,ncol=2,top ="Health Status among NHANES III Adults") %>% ggsave(file="charts/health_status_counts.png",width=7,height=3)
# Caseloads
pexaminers <- full_data %>% filter(!is.na(examiner_id_factor)) %>%
count(examiner_id_factor) %>%
ggplot + geom_bar(aes(x = examiner_id_factor,y=n), stat="identity") +
ylab("Count") +
geom_label(aes(x =examiner_id_factor, y=n, label=n)) +
xlab("Physician ID") +
ggtitle("Adult Caseloads for Examining Physicians")
pexaminers %>% ggsave(file="charts/examiner_distribution.png",width=7,height=3)
|
#' @title reaction_time_class
#' @import ggplot2
#' @description An S4 class for storing results of reaction time Bayesian model.
#'
#' \strong{Functions}
#'
#' summary(`reaction_time_class`): prints a summary of the fit.
#'
#' print(`reaction_time_class`): prints a more detailed summary of the fit
#'
#' show(`reaction_time_class`): prints a more detailed summary of the fit.
#'
#' plot(`reaction_time_class`): plots fitted model against the data. Use this function to explore the quality of your fit.
#'
#' plot(`reaction_time_class`, subjects='boolean'): plots fitted model against the data. Use this function to explore the quality of your fit. You can plot on the subject level (subjects=TRUE) or on the group level (subjects=FALSE).
#'
#' plot_fit(`reaction_time_class`): plots fitted model against the data. Use this function to explore the quality of your fit.
#'
#' plot_fit(`reaction_time_class`, subjects='boolean'): plots fitted model against the data. Use this function to explore the quality of your fit. You can plot on the subject level (subjects=TRUE) or on the group level (subjects=FALSE).
#'
#' plot_trace(`reaction_time_class`): traceplot for main fitted model parameters.
#'
#' get_parameters(`reaction_time_class`): returns a dataframe with values of fitted parameters.
#'
#' get_subject_parameters(`reaction_time_class`): returns a dataframe with values of fitted parameters for each subject in the hierarchical model.
#'
#' compare_means(`reaction_time_class`, fit2=`reaction_time_class`): returns difference in reaction times between two groups. You can also provide the rope parameter or execute the comparison only through a chosen parameter - mu or lambda.
#'
#' compare_means(`reaction_time_class`, fits=`list`): returns difference in reaction times between multiple groups. You can also provide the rope parameter. You can also provide the rope parameter or execute the comparison only through a chosen parameter - mu or lambda.
#'
#' plot_means_difference(`reaction_time_class`, fit2=`reaction_time_class`): a visualization of the difference between two groups. You can also provide the rope and bins (number of bins in the histogram) parameters or visualize the comparison only through a chosen parameter - mu or lambda.
#'
#' plot_means_difference(`reaction_time_class`, fits=`list`): a visualization of the difference between multiple groups. You can also provide the rope and bins (number of bins in the histogram) parameters or visualize the comparison only through a chosen parameter - mu or lambda.
#'
#' plot_means(`reaction_time_class`): plots density of the means. You can also visualize the density only for a chosen parameter - mu or lambda.
#'
#' plot_means(`reaction_time_class`, fit2=`reaction_time_class`): plots density for the first and the second group means. You can also visualize the density only for a chosen parameter - mu or lambda.
#'
#' plot_means(`reaction_time_class`, fits=`list`): plots density for means of multiple groups. You can also visualize the density only for a chosen parameter - mu or lambda.
#'
#' compare_distributions(`reaction_time_class`, fit2=`reaction_time_class`): draws samples from distribution of the first group and compares them against samples drawn from the distribution of the second group. You can also provide the rope parameter.
#'
#' compare_distributions(`reaction_time_class`, fits=`lists`): draws and compares samples from distributions of multiple groups. You can also provide the rope parameter.
#'
#' plot_distributions(`reaction_time_class`): a visualization of the fitted distribution.
#'
#' plot_distributions(`reaction_time_class`, fit2=`reaction_time_class`): a visualization of the distribution for two fits.
#'
#' plot_distributions(`reaction_time_class`, fits=`list`): a visualization of the distribution for multiple fits.
#'
#' plot_distributions_difference(`reaction_time_class`, fit2=`reaction_time_class`): a visualization of the difference between the distribution of the first group and the second group. You can also provide the rope and bins (number of bins in the histogram) parameters.
#'
#' plot_distributions_difference(`reaction_time_class`, fits=`list`): a visualization of the difference between the distributions of multiple groups. You can also provide the rope and bins (number of bins in the histogram) parameters.
#'
#' @slot extract Extract from Stan fit.
#' @slot fit Stan fit.
#' @slot data Data on which the fit is based.
#'
#' @examples
#' \donttest{
#' # priors
#' mu_prior <- b_prior(family="normal", pars=c(0, 100))
#' sigma_prior <- b_prior(family="uniform", pars=c(0, 500))
#' lambda_prior <- b_prior(family="uniform", pars=c(0.05, 5))
#'
#' # attach priors to relevant parameters
#' priors <- list(c("mu_m", mu_prior),
#' c("sigma_m", sigma_prior),
#' c("mu_s", sigma_prior),
#' c("sigma_s", sigma_prior),
#' c("mu_l", lambda_prior),
#' c("sigma_l", sigma_prior))
#'
#'
#' # subjects
#' s <- rep(1:5, 20)
#'
#' # generate data and fit
#' rt1 <- emg::remg(100, mu=10, sigma=1, lambda=0.4)
#' fit1 <- b_reaction_time(t=rt1, s=s, priors=priors, chains=1)
#'
#' rt2 <- emg::remg(100, mu=10, sigma=2, lambda=0.1)
#' fit2 <- b_reaction_time(t=rt2, s=s, priors=priors, chains=1)
#'
#' rt3 <- emg::remg(100, mu=20, sigma=2, lambda=1)
#' fit3 <- b_reaction_time(t=rt3, s=s, priors=priors, chains=1)
#'
#' rt4 <- emg::remg(100, mu=15, sigma=2, lambda=0.5)
#' fit4 <- b_reaction_time(t=rt4, s=s, priors=priors, chains=1)
#'
#' # fit list
#' fit_list <- list(fit2, fit3, fit4)
#'
#' # a short summary of fitted parameters
#' summary(fit1)
#'
#' # a more detailed summary of fitted parameters
#' print(fit1)
#' show(fit1)
#'
#' # plot the fitted distribution against the data
#' plot(fit1)
#' plot_fit(fit1)
#'
#' # plot the fitted distribution against the data,
#' # plot on the top (group) level
#' plot(fit1, subjects=FALSE)
#' plot_fit(fit1, subjects=FALSE)
#'
#' # traceplot of the fitted parameters
#' plot_trace(fit1)
#'
#' # extract parameter values from the fit
#' parameters <- get_parameters(fit1)
#'
#' # extract parameter values on the bottom (subject) level from the fit
#' subject_parameters <- get_subject_parameters(fit1)
#'
#' # compare means between two fits, use a rope interval
#' compare_means(fit1, fit2=fit2, rope=0.5)
#'
#' # compare means between two fits,
#' # use only the mu parameter of the exponentially modified gaussian distribution
#' compare_means(fit1, fit2=fit2, par="mu")
#'
#' # compare means between multiple fits
#' compare_means(fit1, fits=fit_list)
#'
#' # visualize difference in means between two fits,
#' # specify number of histogram bins and rope interval
#' plot_means_difference(fit1, fit2=fit2, bins=20, rope=0.5)
#'
#' # visualize difference in means between two fits,
#' # use only the mu parameter of the exponentially modified gaussian distribution
#' plot_means_difference(fit1, fit2=fit2, par="mu")
#'
#' # visualize difference in means between multiple fits
#' plot_means_difference(fit1, fits=fit_list)
#'
#' # visualize means of a single fit
#' plot_means(fit1)
#'
#' # visualize means of two fits
#' plot_means(fit1, fit2=fit1)
#'
#' # visualize means of two fits,
#' # use only the mu parameter of the exponentially modified gaussian distribution
#' plot_means(fit1, fit2=fit2, par="mu")
#'
#' # visualize means of multiple fits
#' plot_means(fit1, fits=fit_list)
#'
#' # draw samples from distributions underlying two fits and compare them,
#' # use a rope interval
#' compare_distributions(fit1, fit2=fit2, rope=0.5)
#'
#' # draw samples from distributions underlying multiple fits and compare them
#' compare_distributions(fit1, fits=fit_list)
#'
#' # visualize the distribution underlying a fit
#' plot_distributions(fit1)
#'
#' # visualize distributions underlying two fits
#' plot_distributions(fit1, fit2=fit2)
#'
#' # visualize distributions underlying multiple fits
#' plot_distributions(fit1, fits=fit_list)
#'
#' # visualize difference between distributions underlying two fits,
#' # use a rope interval
#' plot_distributions_difference(fit1, fit2=fit2, rope=0.05)
#'
#' # visualize difference between distributions underlying multiple fits
#' plot_distributions_difference(fit1, fits=fit_list)
#' }
#'
reaction_time_class <- setClass(
"reaction_time_class",
slots = c(
extract = "list",
fit = "stanfit",
data = "list"
),
contains = "b_results"
)
#' @title summary
#' @description \code{summary} prints a summary of the Bayesian reaction time fit.
#' @param object reaction_time_class object.
#' @exportMethod summary
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="summary", signature(object="reaction_time_class"), definition=function(object) {
# get means
rt <- mean(object@extract$rt)
mu <- mean(object@extract$mu_m)
sigma <- mean(object@extract$mu_s)
lambda <- mean(object@extract$mu_l)
# HDI
rt_hdi <- mcmc_hdi(object@extract$rt)
mu_hdi <- mcmc_hdi(object@extract$mu_m)
sigma_hdi <- mcmc_hdi(object@extract$mu_s)
lambda_hdi <- mcmc_hdi(object@extract$mu_l)
# print
cat(sprintf("rt:\t\t%.2f +/- %.5f\t95%% HDI: [%.2f, %.2f]\n",
rt, mcmcse::mcse(object@extract$rt)$se, rt_hdi[1], rt_hdi[2]))
cat(sprintf("mu:\t\t%.2f +/- %.5f\t95%% HDI: [%.2f, %.2f]\n",
mu, mcmcse::mcse(object@extract$mu_m)$se, mu_hdi[1], mu_hdi[2]))
cat(sprintf("sigma:\t\t%.2f +/- %.5f\t95%% HDI: [%.2f, %.2f]\n",
sigma, mcmcse::mcse(object@extract$mu_s)$se, sigma_hdi[1], sigma_hdi[2]))
cat(sprintf("lambda:\t\t%.4f +/- %.5f\t95%% HDI: [%.4f, %.4f]\n",
lambda, mcmcse::mcse(object@extract$mu_l)$se, lambda_hdi[1], lambda_hdi[2]))
})
#' @title show
#' @description \code{show} prints a more detailed summary of the Bayesian reaction time fit.
#' @param object reaction_time_class object.
#' @exportMethod show
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="show", signature(object="reaction_time_class"), definition=function(object) {
# print
show(object@fit)
})
#' @title plot
#' @description \code{plot} plots fitted model against the data. Use this function to explore the quality of your fit. You can plot on the subjects level (subjects=TRUE) or on the group level (subjects=FALSE).
#' @param x reaction_time_class object.
#' @param y empty dummy variable, ignore this.
#' @param ... subjects - plot fits on a subject level (default = TRUE).
#' @exportMethod plot
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="plot", signature(x="reaction_time_class", y="missing"), definition=function(x, ...) {
return(plot_fit(object=x, ...))
})
#' @title plot_fit
#' @description \code{plot_fit} plots fitted model against the data. Use this function to explore the quality of your fit. You can plot on the subjects level (subjects=TRUE) or on the group level (subjects=FALSE).
#' @param object reaction_time_class object.
#' @param ... subjects - plot fits on a subject level (default = TRUE).
#' @rdname reaction_time_class-plot_fit
#' @aliases plot_fit_reaction_time
#' @return A ggplot visualization.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="plot_fit", signature(object="reaction_time_class"), definition=function(object, ...) {
# init local varibales for CRAN check
rt <- x <- y <- NULL
arguments <- list(...)
# plot on a subject level?
subjects <- TRUE
if (!is.null(arguments$subjects)) {
subjects <- arguments$subjects
}
df_data <- data.frame(rt=object@data$t, s=object@data$s)
x_min <- 0
if (!subjects) {
mu_m <- mean(object@extract$mu_m)
mu_s <- mean(object@extract$mu_s)
mu_l <- mean(object@extract$mu_l)
x_max <- max(emg::remg(1000, mu=mu_m, sigma=mu_s, lambda=mu_l), object@data$t)
x_max <- x_max + 0.1*abs(x_max)
step <- (x_max - x_min) / 1000
df_fit <- data.frame(x = seq(x_min, x_max, step),
y = emg::demg(seq(x_min, x_max, step),
mu = mu_m,
sigma = mu_s,
lambda = mu_l))
graph <- ggplot(df_data, aes(x=rt)) +
geom_density(fill="#3182bd", alpha=0.4, color=NA) +
geom_line(data=df_fit, aes(x=x, y=y)) +
xlab("reaction time")
} else {
df_fit <- NULL
n <- length(unique(object@data$s))
x_max <- max(object@data$t)
x_max <- x_max + 0.1*abs(x_max)
for (i in 1:n) {
step <- (x_max - x_min) / 1000
df <- data.frame(x = seq(x_min, x_max, step),
s = i,
y = emg::demg(seq(x_min, x_max, step),
mu = mean(object@extract$mu[,i]),
sigma = mean(object@extract$sigma[,i]),
lambda = mean(object@extract$lambda[,i])))
df_fit <- rbind(df_fit, df)
}
# ncol
n_col <- ceiling(sqrt(n))
# density per subject
graph <- ggplot(df_data, aes(x=rt)) +
geom_density(fill="#3182bd", alpha=0.4, color=NA) +
geom_line(data=df_fit, aes(x=x, y=y)) +
facet_wrap(~ s, ncol=n_col) +
xlab("reaction time")
}
return(graph)
})
#' @title plot_trace
#' @description \code{plot_trace} traceplot for main fitted model parameters.
#' @param object reaction_time_class object.
#' @rdname reaction_time_class-plot_trace
#' @aliases plot_trace_reaction_time
#' @return A ggplot visualization.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="plot_trace", signature(object="reaction_time_class"), definition=function(object) {
rstan::traceplot(object@fit, pars=c("mu_m", "mu_s", "mu_l"), inc_warmup = TRUE)
})
#' @title get_parameters
#' @description \code{get_parameters} returns a dataframe with values of fitted parameters.
#' @param object reaction_time_class object.
#' @rdname reaction_time_class-get_parameters
#' @aliases get_parameters_reaction_time
#' @return A data frame with parameter values.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="get_parameters", signature(object="reaction_time_class"), definition=function(object) {
df <- data.frame(rt=object@extract$rt,
mu=object@extract$mu_m,
sigma=object@extract$mu_s,
lambda=object@extract$mu_l)
return(df)
})
#' @title get_subject_parameters
#' @description \code{get_subject_parameters} returns a dataframe with values of fitted parameters for each subject in the hierarchical model.
#' @param object reaction_time_class object.
#' @rdname reaction_time_class-get_subject_parameters
#' @aliases get_subject_parameters_reaction_time
#' @return A data frame with parameter values.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="get_subject_parameters", signature(object="reaction_time_class"), definition=function(object) {
df <- data.frame(rt=numeric(), mu=numeric(), sigma=numeric(), lambda=numeric(), subject=numeric())
n <- length(unique(object@data$s))
for (i in 1:n) {
df_subject <- data.frame(rt = object@extract$rt_subjects[,i],
mu = object@extract$mu[,i],
sigma = object@extract$sigma[,i],
lambda = object@extract$lambda[,i],
subject = i)
df <- rbind(df, df_subject)
}
return(df)
})
#' @title compare_means
#' @description \code{compare_means} prints difference in reaction times between two groups or multiple groups.
#' @param object reaction_time_class object.
#' @param ... fit2 - a second reaction_time_class object, fits - a list of reaction_time_class objects, rope - region of practical equivalence, par - specific parameter of comparison (mu or lambda).
#' @rdname reaction_time_class-compare_means
#' @aliases compare_means_reaction_time
#' @return Comparison results or an error if something went wrong.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="compare_means", signature(object="reaction_time_class"), definition=function(object, ...) {
arguments <- list(...)
wrong_arguments <- "The provided arguments for the compare_means function are invalid, compare_means(reaction_time_class, fit2=reaction_time_class) or compare_means(reaction_time_class, fits=list) is required! You can optionallly provide the rope parameter, e.g. compare_means(reaction_time_class, fit2=reaction_time_class, rope=numeric). You can also execute the comparison through only the mu or the lamdba parameter, e.g. compare_means(reaction_time_class, fit2=reaction_time_class, par=\"mu\")."
if (length(arguments) == 0) {
stop(wrong_arguments)
}
# prepare rope
rope <- NULL
if (!is.null(arguments$rope)) {
rope <- arguments$rope
}
rope <- prepare_rope(rope)
# compare only through one parameter
par <- NULL
if (!is.null(arguments$par)) {
par <- arguments$par
if (!(par == "mu" || par == "lambda")) {
w <- sprintf("Parameter %s not recognized, parameters used in this model are mu and lambda! Using the default setting for comparison.", par)
warning(w)
par <- NULL
} else {
cat(sprintf("\n---------- Using only the %s parameter. ----------\n\n", par))
}
}
# first group data
y <- list()
if (is.null(par)) {
y[[1]] <- object@extract$rt
} else if (par == "mu") {
y[[1]] <- object@extract$mu_m
} else if (par == "lambda") {
y[[1]] <- object@extract$mu_l
}
# second group data
if (!is.null(arguments$fit2) || class(arguments[[1]])[1] == "reaction_time_class") {
# provided another fit
if (!is.null(arguments$fit2)) {
fit2 <- arguments$fit2
} else {
fit2 <- arguments[[1]]
}
if (is.null(par)) {
y[[2]] <- fit2@extract$rt
} else if (par == "mu") {
y[[2]] <- fit2@extract$mu_m
} else if (par == "lambda") {
y[[2]] <- fit2@extract$mu_l
}
} else if (!is.null(arguments$fits)) {
# provided a list of fits
i <- 2
for (fit in arguments$fits) {
if (!("reaction_time_class" %in% class(fit))) {
stop("One of the fits in the fits list is not a valid reaction_time_class object.")
}
if (is.null(par)) {
y[[i]] <- fit@extract$rt
} else if (par == "mu") {
y[[i]] <- fit@extract$mu_m
} else if (par == "lambda") {
y[[i]] <- fit@extract$mu_l
}
i <- i + 1
}
} else {
stop(wrong_arguments)
}
n <- length(y)
comparison_matrix <- matrix(nrow = n, ncol = n)
for (i in 1:(n-1)) {
for (j in (i+1):n) {
cat(sprintf("\n---------- Group %d vs Group %d ----------\n", i, j))
result <- difference(y1=y[[i]], y2=y[[j]], rope=rope, group1=i, group2=j)
comparison_matrix[j,i] <- result[1]
comparison_matrix[i,j] <- result[2]
cat("\n")
}
}
# largest/smallest probabilities
if (n > 2) {
cat("-----------------------------------------")
cat("\nProbabilities that a certain group is\nsmallest/largest or equal to all others:\n\n")
smallest_largest <- is_smallest_or_largest(data=y, rope=rope)
print(smallest_largest)
cat("\n\n")
return(list(comparison_matrix=comparison_matrix, smallest_largest=smallest_largest))
} else {
return(comparison_matrix)
}
})
#' @title plot_means_difference
#' @description \code{plot_means_difference} a visualization of the difference between two groups or multiple groups.
#' @param object reaction_time_class object.
#' @param ... fit2 - a second reaction_time_class object, fits - a list of reaction_time_class objects, rope - region of practical equivalence, bins - number of bins in the histogram, par - specific parameter of comparison (mu or lambda).
#' @rdname reaction_time_class-plot_means_difference
#' @aliases plot_means_difference_reaction_time
#' @return A ggplot visualization or an error if something went wrong.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="plot_means_difference", signature(object="reaction_time_class"), definition=function(object, ...) {
# init local varibales for CRAN check
value <- NULL
arguments <- list(...)
wrong_arguments <- "The provided arguments for the plot_means_difference function are invalid, plot_means_difference(reaction_time_class, fit2=reaction_time_class) or plot_means_difference(reaction_time_class, fits=list) is required! You can optionallly provide the rope and bins (number of bins in the histogram) parameters, e.g. plot_means_difference(reaction_time_class, fit2=reaction_time_class, rope=numeric, bins=numeric). You can also visualize the difference through only the mu or the lamdba parameter, e.g. plot_means_difference(reaction_time_class, fit2=reaction_time_class, par=\"mu\")."
if (length(arguments) == 0) {
stop(wrong_arguments)
}
# prepare rope
rope <- NULL
if (!is.null(arguments$rope)) {
rope <- arguments$rope
}
rope <- prepare_rope(rope)
# compare only through one parameter
par <- NULL
if (!is.null(arguments$par)) {
par <- arguments$par
if (!(par == "mu" || par == "lambda")) {
w <- sprintf("Parameter %s not recognized, parameters used in this model are mu and lambda! Using the default setting for comparison.", par)
warning(w)
par <- NULL
} else {
cat(sprintf("\n---------- Using only the %s parameter. ----------\n\n", par))
}
}
# first group data
y <- list()
if (is.null(par)) {
y[[1]] <- object@extract$rt
} else if (par == "mu") {
y[[1]] <- object@extract$mu_m
} else if (par == "lambda") {
y[[1]] <- object@extract$mu_l
}
# limits
x_max <- max(y[[1]])
# second group data
if (!is.null(arguments$fit2) || class(arguments[[1]])[1] == "reaction_time_class") {
# provided another fit
if (!is.null(arguments$fit2)) {
fit2 <- arguments$fit2
} else {
fit2 <- arguments[[1]]
}
if (is.null(par)) {
y[[2]] <- fit2@extract$rt
} else if (par == "mu") {
y[[2]] <- fit2@extract$mu_m
} else if (par == "lambda") {
y[[2]] <- fit2@extract$mu_l
}
} else if (!is.null(arguments$fits)) {
i <- 2
for (fit in arguments$fits) {
if (!("reaction_time_class" %in% class(fit))) {
stop("One of the fits in the fits list is not a valid reaction_time_class object.")
}
if (is.null(par)) {
y[[i]] <- fit@extract$rt
} else if (par == "mu") {
y[[i]] <- fit@extract$mu_m
} else if (par == "lambda") {
y[[i]] <- fit@extract$mu_l
}
# limits
x_max <- max(x_max, y[[i]])
i <- i + 1
}
} else {
stop(wrong_arguments)
}
# bins in the histogram
bins <- 30
if (!is.null(arguments$bins)) {
bins <- arguments$bins
}
# if no list is provided
if (is.null(arguments$fits)) {
# call plot difference shared function
graph <- plot_difference(y1=y[[1]], y2=y[[2]], rope=rope, bins=bins)
return(graph)
} else {
x_max <- x_max + 0.1*x_max
graphs <- list()
n <- length(y)
for (i in 1:n) {
for (j in i:n) {
# if both are equal plot means, else plot difference
if (i == j) {
df <- data.frame(value=y[[i]])
index <- (i-1)*n + i
graphs[[index]] <- ggplot() +
geom_density(data=df, aes(x=value), fill="#3182bd", color=NA, alpha=0.4) +
xlab("reaction time") +
xlim(0, x_max)
} else {
index1 <- (i-1)*n + j
graphs[[index1]] <- plot_difference(y1=y[[i]], y2=y[[j]], rope=rope, bins=bins, nrow=n)
index2 <- (j-1)*n + i
graphs[[index2]] <- plot_difference(y1=y[[j]], y2=y[[i]], rope=rope, bins=bins, nrow=n)
}
}
}
# cowplot
graph <- suppressWarnings(cowplot::plot_grid(plotlist=graphs, nrow=n, ncol=n, scale=0.9))
return(graph)
}
})
#' @title plot_means
#' @description \code{plot_means} plots density of means for one, two or multiple groups.
#' @param object reaction_time_class object.
#' @param ... fit2 - a second reaction_time_class object, fits - a list of reaction_time_class objects, par - plot a specific parameter (mu or lambda).
#' @rdname reaction_time_class-plot_means
#' @aliases plot_means_reaction_time
#' @return A ggplot visualization or an error if something went wrong.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="plot_means", signature(object="reaction_time_class"), definition=function(object, ...) {
# init local varibales for CRAN check
group <- value <- NULL
# extract arguments
arguments <- list(...)
# compare only through one parameter
par <- NULL
if (!is.null(arguments$par)) {
par <- arguments$par
if (!(par == "mu" || par == "lambda")) {
w <- sprintf("Parameter %s not recognized, parameters used in this model are mu and lambda! Using the default setting for comparison.", par)
warning(w)
par <- NULL
} else {
cat(sprintf("\n---------- Using only the %s parameter. ----------\n\n", par))
}
}
# first group data
df <- NULL
if (is.null(par)) {
df <- data.frame(value=object@extract$rt, group="1")
} else if (par == "mu") {
df <- data.frame(value=object@extract$mu_m, group="1")
} else if (par == "lambda") {
df <- data.frame(value=object@extract$mu_l, group="1")
}
# second group data
if (length(arguments) > 0) {
if (!is.null(arguments$fit2) || class(arguments[[1]])[1] == "reaction_time_class") {
# provided another fit
if (!is.null(arguments$fit2)) {
fit2 <- arguments$fit2
} else {
fit2 <- arguments[[1]]
}
if (is.null(par)) {
df <- rbind(df, data.frame(value=fit2@extract$rt, group="2"))
} else if (par == "mu") {
df <- rbind(df, data.frame(value=fit2@extract$mu_m, group="2"))
} else if (par == "lambda") {
df <- rbind(df, data.frame(value=fit2@extract$mu_l, group="2"))
}
} else if (!is.null(arguments$fits)) {
i <- 2
for (fit in arguments$fits) {
if (!("reaction_time_class" %in% class(fit))) {
stop("One of the fits in the fits list is not a valid reaction_time_class object.")
}
if (is.null(par)) {
df <- rbind(df, data.frame(value=fit@extract$rt, group=as.factor(i)))
} else if (par == "mu") {
df <- rbind(df, data.frame(value=fit@extract$mu_m, group=as.factor(i)))
} else if (par == "lambda") {
df <- rbind(df, data.frame(value=fit@extract$mu_l, group=as.factor(i)))
}
i <- i + 1
}
}
}
# limits
x_max <- max(df$value)
x_max <- x_max + 0.1*x_max
# plot
graph <- ggplot() +
geom_density(data=df, aes(x=value, fill=group), color=NA, alpha=0.4) +
xlab("reaction time") +
xlim(0, x_max)
n_groups <- max(as.numeric(df$group))
if (n_groups == 2) {
graph <- graph +
scale_fill_manual(values=c("#3182bd", "#ff4e3f"))
} else if (n_groups > 2) {
graph <- graph +
scale_fill_hue()
} else {
graph <- graph +
scale_fill_manual(values=c("#3182bd")) +
theme(legend.position="none")
}
return(suppressWarnings(graph))
})
#' @title compare_distributions
#' @description \code{compare_distributions} draws samples from distribution of the first group and compares them against samples drawn from the distribution of the second group or from samples drawn from distributions of multiple groups.
#' @param object reaction_time_class object.
#' @param ... fit2 - a second reaction_time_class object, fits - a list of reaction_time_class objects, rope - region of practical equivalence.
#' @rdname reaction_time_class-compare_distributions
#' @aliases compare_distributions_reaction_time
#' @return Comparison results or an error if something went wrong.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="compare_distributions", signature(object="reaction_time_class"), definition=function(object, ...) {
arguments <- list(...)
wrong_arguments <- "The provided arguments for the compare_distributions function are invalid, compare_distributions(reaction_time_class, fit2=reaction_time_class) or compare_distributions(reaction_time_class, fits=list) is required! You can also provide the rope parameter, e.g. compare_distributions(reaction_time_class, fit2=reaction_time_class, rope=numeric)."
if (length(arguments) == 0) {
stop(wrong_arguments)
}
# prepare rope
rope <- NULL
if (!is.null(arguments$rope)) {
rope <- arguments$rope
}
rope <- prepare_rope(rope)
# first group data
y <- list()
n <- 100000
y[[1]] <- emg::remg(n,
mu=mean(object@extract$mu_m),
sigma=mean(object@extract$mu_s),
lambda=mean(object@extract$mu_l))
# second group data
if (!is.null(arguments$fit2) || class(arguments[[1]])[1] == "reaction_time_class") {
# provided another fit
if (!is.null(arguments$fit2)) {
fit2 <- arguments$fit2
} else {
fit2 <- arguments[[1]]
}
y[[2]] <- emg::remg(n,
mu=mean(fit2@extract$mu_m),
sigma=mean(fit2@extract$mu_s),
lambda=mean(fit2@extract$mu_l))
} else if (!is.null(arguments$fits)) {
# provided a list of fits
i <- 2
for (fit in arguments$fits) {
if (!("reaction_time_class" %in% class(fit))) {
stop("One of the fits in the fits list is not a valid reaction_time_class object.")
}
y[[i]] <- emg::remg(n,
mu=mean(fit@extract$mu_m),
sigma=mean(fit@extract$mu_s),
lambda=mean(fit@extract$mu_l))
i <- i + 1
}
} else {
stop(wrong_arguments)
}
n <- length(y)
comparison_matrix <- matrix(nrow = n, ncol = n)
for (i in 1:(n-1)) {
for (j in (i+1):n) {
cat(sprintf("\n---------- Group %d vs Group %d ----------\n", i, j))
result <- difference(y1=y[[i]], y2=y[[j]], rope=rope, group1=i, group2=j)
comparison_matrix[j,i] <- result[1]
comparison_matrix[i,j] <- result[2]
cat("\n")
}
}
# largest/smallest probabilities
if (n > 2) {
cat("-----------------------------------------")
cat("\nProbabilities that a certain group is\nsmallest/largest or equal to all others:\n\n")
smallest_largest <- is_smallest_or_largest(data=y, rope=rope)
print(smallest_largest)
cat("\n\n")
return(list(comparison_matrix=comparison_matrix, smallest_largest=smallest_largest))
} else {
return(comparison_matrix)
}
})
#' @title plot_distributions
#' @description \code{plot_distributions} a visualization of the fitted distribution, for one, two or multiple fits.
#' @param object reaction_time_class object.
#' @param ... fit2 - a second reaction_time_class object, fits - a list of reaction_time_class objects.
#' @rdname reaction_time_class-plot_distributions
#' @aliases plot_distributions_reaction_time
#' @return A ggplot visualization or an error if something went wrong.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="plot_distributions", signature(object="reaction_time_class"), definition=function(object, ...) {
# init local varibales for CRAN check
group <- x <- y <- NULL
# first group data
mus <- vector()
sigmas <- vector()
lambdas <- vector()
mus[[1]] <- mean(object@extract$mu_m)
sigmas[[1]] <- mean(object@extract$mu_s)
lambdas[[1]] <- mean(object@extract$mu_l)
# limits
x_max <- max(emg::remg(10000, mu=mus[[1]], sigma=sigmas[[1]], lambda=lambdas[[1]]))
x_max <- x_max + 0.1*abs(x_max)
# second group data
arguments <- list(...)
if (length(arguments) > 0) {
if (!is.null(arguments$fit2) || class(arguments[[1]])[1] == "reaction_time_class") {
# provided another fit
if (!is.null(arguments$fit2)) {
fit2 <- arguments$fit2
} else {
fit2 <- arguments[[1]]
}
mus[[2]] <- mean(fit2@extract$mu_m)
sigmas[[2]] <- mean(fit2@extract$mu_s)
lambdas[[2]] <- mean(fit2@extract$mu_l)
x_max2 <- max(emg::remg(10000, mu=mus[[2]], sigma=sigmas[[2]], lambda=lambdas[[2]]))
x_max2 <- x_max2 + 0.1*abs(x_max2)
x_max <- max(x_max, x_max2)
} else if (!is.null(arguments$fits)) {
i <- 2
for (fit in arguments$fits) {
if (!("reaction_time_class" %in% class(fit))) {
stop("One of the fits in the fits list is not a valid reaction_time_class object.")
}
mus[[i]] <- mean(fit@extract$mu_m)
sigmas[[i]] <- mean(fit@extract$mu_s)
lambdas[[i]] <- mean(fit@extract$mu_l)
x_max2 <- max(emg::remg(10000, mu=mus[[2]], sigma=sigmas[[2]], lambda=lambdas[[2]]))
x_max2 <- x_max2 + 0.1*abs(x_max2)
x_max <- max(x_max, x_max2)
i <- i + 1
}
}
}
# calculate data points
step <- 1 / 1000
df <- data.frame(x=numeric(), y=numeric(), group=factor())
n_groups <- length(mus)
for (i in 1:n_groups) {
df_group <- data.frame(x = seq(0, x_max, step),
y = emg::demg(seq(0, x_max, step),
mu = mus[i],
sigma = sigmas[i],
lambda = lambdas[i]),
group=as.factor(i))
df <- rbind(df, df_group)
}
# plot
graph <- ggplot() +
geom_area(data=df, aes(x=x, y=y, fill=group), alpha=0.4, position="identity") +
xlab("reaction time") +
ylab("density")
if (n_groups == 2) {
graph <- graph +
scale_fill_manual(values=c("#3182bd", "#ff4e3f"))
} else if (n_groups > 2) {
graph <- graph +
scale_fill_hue()
} else {
graph <- graph +
scale_fill_manual(values=c("#3182bd")) +
theme(legend.position="none")
}
return(suppressWarnings(graph))
})
#' @title plot_distributions_difference
#' @description \code{plot_distributions_difference} a visualization of the difference between the distribution of the first group and the second group or between multiple groups.
#' @param object reaction_time_class object.
#' @param ... fit2 - a second reaction_time_class object, fits - a list of reaction_time_class objects, rope - region of practical equivalence, bins - number of bins in the histogram.
#' @rdname reaction_time_class-plot_distributions_difference
#' @aliases plot_distributions_difference_reaction_time
#' @return A ggplot visualization or an error if something went wrong.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="plot_distributions_difference", signature(object="reaction_time_class"), definition=function(object, ...) {
# init local varibales for CRAN check
value <- NULL
arguments <- list(...)
wrong_arguments <- "The provided arguments for the plot_distributions_difference function are invalid, plot_distributions_difference(reaction_time_class, fit2=reaction_time_class) or plot_distributions_difference(reaction_time_class, fits=list) is required! You can also provide the rope and bins (number of bins in the histogram) parameters, e.g. plot_distributions_difference(reaction_time_class, fit2=reaction_time_class, rope=numeric, bins=numeric)."
if (length(arguments) == 0) {
stop(wrong_arguments)
}
# prepare rope
rope <- NULL
if (!is.null(arguments$rope)) {
rope <- arguments$rope
}
rope <- prepare_rope(rope)
# first group data
y <- list()
n <- 100000
y[[1]] <- emg::remg(n,
mu=mean(object@extract$mu_m),
sigma=mean(object@extract$mu_s),
lambda=mean(object@extract$mu_l))
# limits
x_max <- max(y[[1]])
# second group data
if (!is.null(arguments$fit2) || class(arguments[[1]])[1] == "reaction_time_class") {
# provided another fit
if (!is.null(arguments$fit2)) {
fit2 <- arguments$fit2
} else {
fit2 <- arguments[[1]]
}
y[[2]] <- emg::remg(n,
mu=mean(fit2@extract$mu_m),
sigma=mean(fit2@extract$mu_s),
lambda=mean(fit2@extract$mu_l))
} else if (!is.null(arguments$fits)) {
i <- 2
for (fit in arguments$fits) {
if (!("reaction_time_class" %in% class(fit))) {
stop("One of the fits in the fits list is not a valid reaction_time_class object.")
}
y[[i]] <- emg::remg(n,
mu=mean(fit@extract$mu_m),
sigma=mean(fit@extract$mu_s),
lambda=mean(fit@extract$mu_l))
# limits
x_max <- max(x_max, y[[i]])
i <- i + 1
}
} else {
stop(wrong_arguments)
}
# bins in the histogram
bins <- 30
if (!is.null(arguments$bins)) {
bins <- arguments$bins
}
# if no list is provided
if (is.null(arguments$fits)) {
# call plot difference shared function
graph <- plot_difference(y1=y[[1]], y2=y[[2]], rope=rope, bins=bins)
return(graph)
} else {
x_max <- x_max + 0.1*x_max
graphs <- list()
n <- length(y)
for (i in 1:n) {
for (j in i:n) {
# if both are equal plot samples, else plot difference
if (i == j) {
df <- data.frame(value=y[[i]])
index <- (i-1)*n + i
graphs[[index]] <- ggplot() +
geom_density(data=df, aes(x=value), fill="#3182bd", color=NA, alpha=0.4) +
xlab("reaction time") +
xlim(0, x_max)
} else {
index1 <- (i-1)*n + j
graphs[[index1]] <- plot_difference(y1=y[[i]], y2=y[[j]], rope=rope, bins=bins, nrow=n)
index2 <- (j-1)*n + i
graphs[[index2]] <- plot_difference(y1=y[[j]], y2=y[[i]], rope=rope, bins=bins, nrow=n)
}
}
}
# cowplot
graph <- suppressWarnings(cowplot::plot_grid(plotlist=graphs, nrow=n, ncol=n, scale=0.9))
return(graph)
}
})
| /R/reaction_time_class.R | no_license | cran/bayes4psy | R | false | false | 41,407 | r | #' @title reaction_time_class
#' @import ggplot2
#' @description An S4 class for storing results of reaction time Bayesian model.
#'
#' \strong{Functions}
#'
#' summary(`reaction_time_class`): prints a summary of the fit.
#'
#' print(`reaction_time_class`): prints a more detailed summary of the fit
#'
#' show(`reaction_time_class`): prints a more detailed summary of the fit.
#'
#' plot(`reaction_time_class`): plots fitted model against the data. Use this function to explore the quality of your fit.
#'
#' plot(`reaction_time_class`, subjects='boolean'): plots fitted model against the data. Use this function to explore the quality of your fit. You can plot on the subject level (subjects=TRUE) or on the group level (subjects=FALSE).
#'
#' plot_fit(`reaction_time_class`): plots fitted model against the data. Use this function to explore the quality of your fit.
#'
#' plot_fit(`reaction_time_class`, subjects='boolean'): plots fitted model against the data. Use this function to explore the quality of your fit. You can plot on the subject level (subjects=TRUE) or on the group level (subjects=FALSE).
#'
#' plot_trace(`reaction_time_class`): traceplot for main fitted model parameters.
#'
#' get_parameters(`reaction_time_class`): returns a dataframe with values of fitted parameters.
#'
#' get_subject_parameters(`reaction_time_class`): returns a dataframe with values of fitted parameters for each subject in the hierarchical model.
#'
#' compare_means(`reaction_time_class`, fit2=`reaction_time_class`): returns difference in reaction times between two groups. You can also provide the rope parameter or execute the comparison only through a chosen parameter - mu or lambda.
#'
#' compare_means(`reaction_time_class`, fits=`list`): returns difference in reaction times between multiple groups. You can also provide the rope parameter. You can also provide the rope parameter or execute the comparison only through a chosen parameter - mu or lambda.
#'
#' plot_means_difference(`reaction_time_class`, fit2=`reaction_time_class`): a visualization of the difference between two groups. You can also provide the rope and bins (number of bins in the histogram) parameters or visualize the comparison only through a chosen parameter - mu or lambda.
#'
#' plot_means_difference(`reaction_time_class`, fits=`list`): a visualization of the difference between multiple groups. You can also provide the rope and bins (number of bins in the histogram) parameters or visualize the comparison only through a chosen parameter - mu or lambda.
#'
#' plot_means(`reaction_time_class`): plots density of the means. You can also visualize the density only for a chosen parameter - mu or lambda.
#'
#' plot_means(`reaction_time_class`, fit2=`reaction_time_class`): plots density for the first and the second group means. You can also visualize the density only for a chosen parameter - mu or lambda.
#'
#' plot_means(`reaction_time_class`, fits=`list`): plots density for means of multiple groups. You can also visualize the density only for a chosen parameter - mu or lambda.
#'
#' compare_distributions(`reaction_time_class`, fit2=`reaction_time_class`): draws samples from distribution of the first group and compares them against samples drawn from the distribution of the second group. You can also provide the rope parameter.
#'
#' compare_distributions(`reaction_time_class`, fits=`lists`): draws and compares samples from distributions of multiple groups. You can also provide the rope parameter.
#'
#' plot_distributions(`reaction_time_class`): a visualization of the fitted distribution.
#'
#' plot_distributions(`reaction_time_class`, fit2=`reaction_time_class`): a visualization of the distribution for two fits.
#'
#' plot_distributions(`reaction_time_class`, fits=`list`): a visualization of the distribution for multiple fits.
#'
#' plot_distributions_difference(`reaction_time_class`, fit2=`reaction_time_class`): a visualization of the difference between the distribution of the first group and the second group. You can also provide the rope and bins (number of bins in the histogram) parameters.
#'
#' plot_distributions_difference(`reaction_time_class`, fits=`list`): a visualization of the difference between the distributions of multiple groups. You can also provide the rope and bins (number of bins in the histogram) parameters.
#'
#' @slot extract Extract from Stan fit.
#' @slot fit Stan fit.
#' @slot data Data on which the fit is based.
#'
#' @examples
#' \donttest{
#' # priors
#' mu_prior <- b_prior(family="normal", pars=c(0, 100))
#' sigma_prior <- b_prior(family="uniform", pars=c(0, 500))
#' lambda_prior <- b_prior(family="uniform", pars=c(0.05, 5))
#'
#' # attach priors to relevant parameters
#' priors <- list(c("mu_m", mu_prior),
#' c("sigma_m", sigma_prior),
#' c("mu_s", sigma_prior),
#' c("sigma_s", sigma_prior),
#' c("mu_l", lambda_prior),
#' c("sigma_l", sigma_prior))
#'
#'
#' # subjects
#' s <- rep(1:5, 20)
#'
#' # generate data and fit
#' rt1 <- emg::remg(100, mu=10, sigma=1, lambda=0.4)
#' fit1 <- b_reaction_time(t=rt1, s=s, priors=priors, chains=1)
#'
#' rt2 <- emg::remg(100, mu=10, sigma=2, lambda=0.1)
#' fit2 <- b_reaction_time(t=rt2, s=s, priors=priors, chains=1)
#'
#' rt3 <- emg::remg(100, mu=20, sigma=2, lambda=1)
#' fit3 <- b_reaction_time(t=rt3, s=s, priors=priors, chains=1)
#'
#' rt4 <- emg::remg(100, mu=15, sigma=2, lambda=0.5)
#' fit4 <- b_reaction_time(t=rt4, s=s, priors=priors, chains=1)
#'
#' # fit list
#' fit_list <- list(fit2, fit3, fit4)
#'
#' # a short summary of fitted parameters
#' summary(fit1)
#'
#' # a more detailed summary of fitted parameters
#' print(fit1)
#' show(fit1)
#'
#' # plot the fitted distribution against the data
#' plot(fit1)
#' plot_fit(fit1)
#'
#' # plot the fitted distribution against the data,
#' # plot on the top (group) level
#' plot(fit1, subjects=FALSE)
#' plot_fit(fit1, subjects=FALSE)
#'
#' # traceplot of the fitted parameters
#' plot_trace(fit1)
#'
#' # extract parameter values from the fit
#' parameters <- get_parameters(fit1)
#'
#' # extract parameter values on the bottom (subject) level from the fit
#' subject_parameters <- get_subject_parameters(fit1)
#'
#' # compare means between two fits, use a rope interval
#' compare_means(fit1, fit2=fit2, rope=0.5)
#'
#' # compare means between two fits,
#' # use only the mu parameter of the exponentially modified gaussian distribution
#' compare_means(fit1, fit2=fit2, par="mu")
#'
#' # compare means between multiple fits
#' compare_means(fit1, fits=fit_list)
#'
#' # visualize difference in means between two fits,
#' # specify number of histogram bins and rope interval
#' plot_means_difference(fit1, fit2=fit2, bins=20, rope=0.5)
#'
#' # visualize difference in means between two fits,
#' # use only the mu parameter of the exponentially modified gaussian distribution
#' plot_means_difference(fit1, fit2=fit2, par="mu")
#'
#' # visualize difference in means between multiple fits
#' plot_means_difference(fit1, fits=fit_list)
#'
#' # visualize means of a single fit
#' plot_means(fit1)
#'
#' # visualize means of two fits
#' plot_means(fit1, fit2=fit1)
#'
#' # visualize means of two fits,
#' # use only the mu parameter of the exponentially modified gaussian distribution
#' plot_means(fit1, fit2=fit2, par="mu")
#'
#' # visualize means of multiple fits
#' plot_means(fit1, fits=fit_list)
#'
#' # draw samples from distributions underlying two fits and compare them,
#' # use a rope interval
#' compare_distributions(fit1, fit2=fit2, rope=0.5)
#'
#' # draw samples from distributions underlying multiple fits and compare them
#' compare_distributions(fit1, fits=fit_list)
#'
#' # visualize the distribution underlying a fit
#' plot_distributions(fit1)
#'
#' # visualize distributions underlying two fits
#' plot_distributions(fit1, fit2=fit2)
#'
#' # visualize distributions underlying multiple fits
#' plot_distributions(fit1, fits=fit_list)
#'
#' # visualize difference between distributions underlying two fits,
#' # use a rope interval
#' plot_distributions_difference(fit1, fit2=fit2, rope=0.05)
#'
#' # visualize difference between distributions underlying multiple fits
#' plot_distributions_difference(fit1, fits=fit_list)
#' }
#'
reaction_time_class <- setClass(
"reaction_time_class",
slots = c(
extract = "list",
fit = "stanfit",
data = "list"
),
contains = "b_results"
)
#' @title summary
#' @description \code{summary} prints a summary of the Bayesian reaction time fit.
#' @param object reaction_time_class object.
#' @exportMethod summary
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="summary", signature(object="reaction_time_class"), definition=function(object) {
# get means
rt <- mean(object@extract$rt)
mu <- mean(object@extract$mu_m)
sigma <- mean(object@extract$mu_s)
lambda <- mean(object@extract$mu_l)
# HDI
rt_hdi <- mcmc_hdi(object@extract$rt)
mu_hdi <- mcmc_hdi(object@extract$mu_m)
sigma_hdi <- mcmc_hdi(object@extract$mu_s)
lambda_hdi <- mcmc_hdi(object@extract$mu_l)
# print
cat(sprintf("rt:\t\t%.2f +/- %.5f\t95%% HDI: [%.2f, %.2f]\n",
rt, mcmcse::mcse(object@extract$rt)$se, rt_hdi[1], rt_hdi[2]))
cat(sprintf("mu:\t\t%.2f +/- %.5f\t95%% HDI: [%.2f, %.2f]\n",
mu, mcmcse::mcse(object@extract$mu_m)$se, mu_hdi[1], mu_hdi[2]))
cat(sprintf("sigma:\t\t%.2f +/- %.5f\t95%% HDI: [%.2f, %.2f]\n",
sigma, mcmcse::mcse(object@extract$mu_s)$se, sigma_hdi[1], sigma_hdi[2]))
cat(sprintf("lambda:\t\t%.4f +/- %.5f\t95%% HDI: [%.4f, %.4f]\n",
lambda, mcmcse::mcse(object@extract$mu_l)$se, lambda_hdi[1], lambda_hdi[2]))
})
#' @title show
#' @description \code{show} prints a more detailed summary of the Bayesian reaction time fit.
#' @param object reaction_time_class object.
#' @exportMethod show
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="show", signature(object="reaction_time_class"), definition=function(object) {
# print
show(object@fit)
})
#' @title plot
#' @description \code{plot} plots fitted model against the data. Use this function to explore the quality of your fit. You can plot on the subjects level (subjects=TRUE) or on the group level (subjects=FALSE).
#' @param x reaction_time_class object.
#' @param y empty dummy variable, ignore this.
#' @param ... subjects - plot fits on a subject level (default = TRUE).
#' @exportMethod plot
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="plot", signature(x="reaction_time_class", y="missing"), definition=function(x, ...) {
return(plot_fit(object=x, ...))
})
#' @title plot_fit
#' @description \code{plot_fit} plots fitted model against the data. Use this function to explore the quality of your fit. You can plot on the subjects level (subjects=TRUE) or on the group level (subjects=FALSE).
#' @param object reaction_time_class object.
#' @param ... subjects - plot fits on a subject level (default = TRUE).
#' @rdname reaction_time_class-plot_fit
#' @aliases plot_fit_reaction_time
#' @return A ggplot visualization.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="plot_fit", signature(object="reaction_time_class"), definition=function(object, ...) {
# init local varibales for CRAN check
rt <- x <- y <- NULL
arguments <- list(...)
# plot on a subject level?
subjects <- TRUE
if (!is.null(arguments$subjects)) {
subjects <- arguments$subjects
}
df_data <- data.frame(rt=object@data$t, s=object@data$s)
x_min <- 0
if (!subjects) {
mu_m <- mean(object@extract$mu_m)
mu_s <- mean(object@extract$mu_s)
mu_l <- mean(object@extract$mu_l)
x_max <- max(emg::remg(1000, mu=mu_m, sigma=mu_s, lambda=mu_l), object@data$t)
x_max <- x_max + 0.1*abs(x_max)
step <- (x_max - x_min) / 1000
df_fit <- data.frame(x = seq(x_min, x_max, step),
y = emg::demg(seq(x_min, x_max, step),
mu = mu_m,
sigma = mu_s,
lambda = mu_l))
graph <- ggplot(df_data, aes(x=rt)) +
geom_density(fill="#3182bd", alpha=0.4, color=NA) +
geom_line(data=df_fit, aes(x=x, y=y)) +
xlab("reaction time")
} else {
df_fit <- NULL
n <- length(unique(object@data$s))
x_max <- max(object@data$t)
x_max <- x_max + 0.1*abs(x_max)
for (i in 1:n) {
step <- (x_max - x_min) / 1000
df <- data.frame(x = seq(x_min, x_max, step),
s = i,
y = emg::demg(seq(x_min, x_max, step),
mu = mean(object@extract$mu[,i]),
sigma = mean(object@extract$sigma[,i]),
lambda = mean(object@extract$lambda[,i])))
df_fit <- rbind(df_fit, df)
}
# ncol
n_col <- ceiling(sqrt(n))
# density per subject
graph <- ggplot(df_data, aes(x=rt)) +
geom_density(fill="#3182bd", alpha=0.4, color=NA) +
geom_line(data=df_fit, aes(x=x, y=y)) +
facet_wrap(~ s, ncol=n_col) +
xlab("reaction time")
}
return(graph)
})
#' @title plot_trace
#' @description \code{plot_trace} traceplot for main fitted model parameters.
#' @param object reaction_time_class object.
#' @rdname reaction_time_class-plot_trace
#' @aliases plot_trace_reaction_time
#' @return A ggplot visualization.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="plot_trace", signature(object="reaction_time_class"), definition=function(object) {
rstan::traceplot(object@fit, pars=c("mu_m", "mu_s", "mu_l"), inc_warmup = TRUE)
})
#' @title get_parameters
#' @description \code{get_parameters} returns a dataframe with values of fitted parameters.
#' @param object reaction_time_class object.
#' @rdname reaction_time_class-get_parameters
#' @aliases get_parameters_reaction_time
#' @return A data frame with parameter values.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="get_parameters", signature(object="reaction_time_class"), definition=function(object) {
df <- data.frame(rt=object@extract$rt,
mu=object@extract$mu_m,
sigma=object@extract$mu_s,
lambda=object@extract$mu_l)
return(df)
})
#' @title get_subject_parameters
#' @description \code{get_subject_parameters} returns a dataframe with values of fitted parameters for each subject in the hierarchical model.
#' @param object reaction_time_class object.
#' @rdname reaction_time_class-get_subject_parameters
#' @aliases get_subject_parameters_reaction_time
#' @return A data frame with parameter values.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="get_subject_parameters", signature(object="reaction_time_class"), definition=function(object) {
df <- data.frame(rt=numeric(), mu=numeric(), sigma=numeric(), lambda=numeric(), subject=numeric())
n <- length(unique(object@data$s))
for (i in 1:n) {
df_subject <- data.frame(rt = object@extract$rt_subjects[,i],
mu = object@extract$mu[,i],
sigma = object@extract$sigma[,i],
lambda = object@extract$lambda[,i],
subject = i)
df <- rbind(df, df_subject)
}
return(df)
})
#' @title compare_means
#' @description \code{compare_means} prints difference in reaction times between two groups or multiple groups.
#' @param object reaction_time_class object.
#' @param ... fit2 - a second reaction_time_class object, fits - a list of reaction_time_class objects, rope - region of practical equivalence, par - specific parameter of comparison (mu or lambda).
#' @rdname reaction_time_class-compare_means
#' @aliases compare_means_reaction_time
#' @return Comparison results or an error if something went wrong.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="compare_means", signature(object="reaction_time_class"), definition=function(object, ...) {
arguments <- list(...)
wrong_arguments <- "The provided arguments for the compare_means function are invalid, compare_means(reaction_time_class, fit2=reaction_time_class) or compare_means(reaction_time_class, fits=list) is required! You can optionallly provide the rope parameter, e.g. compare_means(reaction_time_class, fit2=reaction_time_class, rope=numeric). You can also execute the comparison through only the mu or the lamdba parameter, e.g. compare_means(reaction_time_class, fit2=reaction_time_class, par=\"mu\")."
if (length(arguments) == 0) {
stop(wrong_arguments)
}
# prepare rope
rope <- NULL
if (!is.null(arguments$rope)) {
rope <- arguments$rope
}
rope <- prepare_rope(rope)
# compare only through one parameter
par <- NULL
if (!is.null(arguments$par)) {
par <- arguments$par
if (!(par == "mu" || par == "lambda")) {
w <- sprintf("Parameter %s not recognized, parameters used in this model are mu and lambda! Using the default setting for comparison.", par)
warning(w)
par <- NULL
} else {
cat(sprintf("\n---------- Using only the %s parameter. ----------\n\n", par))
}
}
# first group data
y <- list()
if (is.null(par)) {
y[[1]] <- object@extract$rt
} else if (par == "mu") {
y[[1]] <- object@extract$mu_m
} else if (par == "lambda") {
y[[1]] <- object@extract$mu_l
}
# second group data
if (!is.null(arguments$fit2) || class(arguments[[1]])[1] == "reaction_time_class") {
# provided another fit
if (!is.null(arguments$fit2)) {
fit2 <- arguments$fit2
} else {
fit2 <- arguments[[1]]
}
if (is.null(par)) {
y[[2]] <- fit2@extract$rt
} else if (par == "mu") {
y[[2]] <- fit2@extract$mu_m
} else if (par == "lambda") {
y[[2]] <- fit2@extract$mu_l
}
} else if (!is.null(arguments$fits)) {
# provided a list of fits
i <- 2
for (fit in arguments$fits) {
if (!("reaction_time_class" %in% class(fit))) {
stop("One of the fits in the fits list is not a valid reaction_time_class object.")
}
if (is.null(par)) {
y[[i]] <- fit@extract$rt
} else if (par == "mu") {
y[[i]] <- fit@extract$mu_m
} else if (par == "lambda") {
y[[i]] <- fit@extract$mu_l
}
i <- i + 1
}
} else {
stop(wrong_arguments)
}
n <- length(y)
comparison_matrix <- matrix(nrow = n, ncol = n)
for (i in 1:(n-1)) {
for (j in (i+1):n) {
cat(sprintf("\n---------- Group %d vs Group %d ----------\n", i, j))
result <- difference(y1=y[[i]], y2=y[[j]], rope=rope, group1=i, group2=j)
comparison_matrix[j,i] <- result[1]
comparison_matrix[i,j] <- result[2]
cat("\n")
}
}
# largest/smallest probabilities
if (n > 2) {
cat("-----------------------------------------")
cat("\nProbabilities that a certain group is\nsmallest/largest or equal to all others:\n\n")
smallest_largest <- is_smallest_or_largest(data=y, rope=rope)
print(smallest_largest)
cat("\n\n")
return(list(comparison_matrix=comparison_matrix, smallest_largest=smallest_largest))
} else {
return(comparison_matrix)
}
})
#' @title plot_means_difference
#' @description \code{plot_means_difference} a visualization of the difference between two groups or multiple groups.
#' @param object reaction_time_class object.
#' @param ... fit2 - a second reaction_time_class object, fits - a list of reaction_time_class objects, rope - region of practical equivalence, bins - number of bins in the histogram, par - specific parameter of comparison (mu or lambda).
#' @rdname reaction_time_class-plot_means_difference
#' @aliases plot_means_difference_reaction_time
#' @return A ggplot visualization or an error if something went wrong.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="plot_means_difference", signature(object="reaction_time_class"), definition=function(object, ...) {
# init local varibales for CRAN check
value <- NULL
arguments <- list(...)
wrong_arguments <- "The provided arguments for the plot_means_difference function are invalid, plot_means_difference(reaction_time_class, fit2=reaction_time_class) or plot_means_difference(reaction_time_class, fits=list) is required! You can optionallly provide the rope and bins (number of bins in the histogram) parameters, e.g. plot_means_difference(reaction_time_class, fit2=reaction_time_class, rope=numeric, bins=numeric). You can also visualize the difference through only the mu or the lamdba parameter, e.g. plot_means_difference(reaction_time_class, fit2=reaction_time_class, par=\"mu\")."
if (length(arguments) == 0) {
stop(wrong_arguments)
}
# prepare rope
rope <- NULL
if (!is.null(arguments$rope)) {
rope <- arguments$rope
}
rope <- prepare_rope(rope)
# compare only through one parameter
par <- NULL
if (!is.null(arguments$par)) {
par <- arguments$par
if (!(par == "mu" || par == "lambda")) {
w <- sprintf("Parameter %s not recognized, parameters used in this model are mu and lambda! Using the default setting for comparison.", par)
warning(w)
par <- NULL
} else {
cat(sprintf("\n---------- Using only the %s parameter. ----------\n\n", par))
}
}
# first group data
y <- list()
if (is.null(par)) {
y[[1]] <- object@extract$rt
} else if (par == "mu") {
y[[1]] <- object@extract$mu_m
} else if (par == "lambda") {
y[[1]] <- object@extract$mu_l
}
# limits
x_max <- max(y[[1]])
# second group data
if (!is.null(arguments$fit2) || class(arguments[[1]])[1] == "reaction_time_class") {
# provided another fit
if (!is.null(arguments$fit2)) {
fit2 <- arguments$fit2
} else {
fit2 <- arguments[[1]]
}
if (is.null(par)) {
y[[2]] <- fit2@extract$rt
} else if (par == "mu") {
y[[2]] <- fit2@extract$mu_m
} else if (par == "lambda") {
y[[2]] <- fit2@extract$mu_l
}
} else if (!is.null(arguments$fits)) {
i <- 2
for (fit in arguments$fits) {
if (!("reaction_time_class" %in% class(fit))) {
stop("One of the fits in the fits list is not a valid reaction_time_class object.")
}
if (is.null(par)) {
y[[i]] <- fit@extract$rt
} else if (par == "mu") {
y[[i]] <- fit@extract$mu_m
} else if (par == "lambda") {
y[[i]] <- fit@extract$mu_l
}
# limits
x_max <- max(x_max, y[[i]])
i <- i + 1
}
} else {
stop(wrong_arguments)
}
# bins in the histogram
bins <- 30
if (!is.null(arguments$bins)) {
bins <- arguments$bins
}
# if no list is provided
if (is.null(arguments$fits)) {
# call plot difference shared function
graph <- plot_difference(y1=y[[1]], y2=y[[2]], rope=rope, bins=bins)
return(graph)
} else {
x_max <- x_max + 0.1*x_max
graphs <- list()
n <- length(y)
for (i in 1:n) {
for (j in i:n) {
# if both are equal plot means, else plot difference
if (i == j) {
df <- data.frame(value=y[[i]])
index <- (i-1)*n + i
graphs[[index]] <- ggplot() +
geom_density(data=df, aes(x=value), fill="#3182bd", color=NA, alpha=0.4) +
xlab("reaction time") +
xlim(0, x_max)
} else {
index1 <- (i-1)*n + j
graphs[[index1]] <- plot_difference(y1=y[[i]], y2=y[[j]], rope=rope, bins=bins, nrow=n)
index2 <- (j-1)*n + i
graphs[[index2]] <- plot_difference(y1=y[[j]], y2=y[[i]], rope=rope, bins=bins, nrow=n)
}
}
}
# cowplot
graph <- suppressWarnings(cowplot::plot_grid(plotlist=graphs, nrow=n, ncol=n, scale=0.9))
return(graph)
}
})
#' @title plot_means
#' @description \code{plot_means} plots density of means for one, two or multiple groups.
#' @param object reaction_time_class object.
#' @param ... fit2 - a second reaction_time_class object, fits - a list of reaction_time_class objects, par - plot a specific parameter (mu or lambda).
#' @rdname reaction_time_class-plot_means
#' @aliases plot_means_reaction_time
#' @return A ggplot visualization or an error if something went wrong.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="plot_means", signature(object="reaction_time_class"), definition=function(object, ...) {
# init local varibales for CRAN check
group <- value <- NULL
# extract arguments
arguments <- list(...)
# compare only through one parameter
par <- NULL
if (!is.null(arguments$par)) {
par <- arguments$par
if (!(par == "mu" || par == "lambda")) {
w <- sprintf("Parameter %s not recognized, parameters used in this model are mu and lambda! Using the default setting for comparison.", par)
warning(w)
par <- NULL
} else {
cat(sprintf("\n---------- Using only the %s parameter. ----------\n\n", par))
}
}
# first group data
df <- NULL
if (is.null(par)) {
df <- data.frame(value=object@extract$rt, group="1")
} else if (par == "mu") {
df <- data.frame(value=object@extract$mu_m, group="1")
} else if (par == "lambda") {
df <- data.frame(value=object@extract$mu_l, group="1")
}
# second group data
if (length(arguments) > 0) {
if (!is.null(arguments$fit2) || class(arguments[[1]])[1] == "reaction_time_class") {
# provided another fit
if (!is.null(arguments$fit2)) {
fit2 <- arguments$fit2
} else {
fit2 <- arguments[[1]]
}
if (is.null(par)) {
df <- rbind(df, data.frame(value=fit2@extract$rt, group="2"))
} else if (par == "mu") {
df <- rbind(df, data.frame(value=fit2@extract$mu_m, group="2"))
} else if (par == "lambda") {
df <- rbind(df, data.frame(value=fit2@extract$mu_l, group="2"))
}
} else if (!is.null(arguments$fits)) {
i <- 2
for (fit in arguments$fits) {
if (!("reaction_time_class" %in% class(fit))) {
stop("One of the fits in the fits list is not a valid reaction_time_class object.")
}
if (is.null(par)) {
df <- rbind(df, data.frame(value=fit@extract$rt, group=as.factor(i)))
} else if (par == "mu") {
df <- rbind(df, data.frame(value=fit@extract$mu_m, group=as.factor(i)))
} else if (par == "lambda") {
df <- rbind(df, data.frame(value=fit@extract$mu_l, group=as.factor(i)))
}
i <- i + 1
}
}
}
# limits
x_max <- max(df$value)
x_max <- x_max + 0.1*x_max
# plot
graph <- ggplot() +
geom_density(data=df, aes(x=value, fill=group), color=NA, alpha=0.4) +
xlab("reaction time") +
xlim(0, x_max)
n_groups <- max(as.numeric(df$group))
if (n_groups == 2) {
graph <- graph +
scale_fill_manual(values=c("#3182bd", "#ff4e3f"))
} else if (n_groups > 2) {
graph <- graph +
scale_fill_hue()
} else {
graph <- graph +
scale_fill_manual(values=c("#3182bd")) +
theme(legend.position="none")
}
return(suppressWarnings(graph))
})
#' @title compare_distributions
#' @description \code{compare_distributions} draws samples from distribution of the first group and compares them against samples drawn from the distribution of the second group or from samples drawn from distributions of multiple groups.
#' @param object reaction_time_class object.
#' @param ... fit2 - a second reaction_time_class object, fits - a list of reaction_time_class objects, rope - region of practical equivalence.
#' @rdname reaction_time_class-compare_distributions
#' @aliases compare_distributions_reaction_time
#' @return Comparison results or an error if something went wrong.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="compare_distributions", signature(object="reaction_time_class"), definition=function(object, ...) {
arguments <- list(...)
wrong_arguments <- "The provided arguments for the compare_distributions function are invalid, compare_distributions(reaction_time_class, fit2=reaction_time_class) or compare_distributions(reaction_time_class, fits=list) is required! You can also provide the rope parameter, e.g. compare_distributions(reaction_time_class, fit2=reaction_time_class, rope=numeric)."
if (length(arguments) == 0) {
stop(wrong_arguments)
}
# prepare rope
rope <- NULL
if (!is.null(arguments$rope)) {
rope <- arguments$rope
}
rope <- prepare_rope(rope)
# first group data
y <- list()
n <- 100000
y[[1]] <- emg::remg(n,
mu=mean(object@extract$mu_m),
sigma=mean(object@extract$mu_s),
lambda=mean(object@extract$mu_l))
# second group data
if (!is.null(arguments$fit2) || class(arguments[[1]])[1] == "reaction_time_class") {
# provided another fit
if (!is.null(arguments$fit2)) {
fit2 <- arguments$fit2
} else {
fit2 <- arguments[[1]]
}
y[[2]] <- emg::remg(n,
mu=mean(fit2@extract$mu_m),
sigma=mean(fit2@extract$mu_s),
lambda=mean(fit2@extract$mu_l))
} else if (!is.null(arguments$fits)) {
# provided a list of fits
i <- 2
for (fit in arguments$fits) {
if (!("reaction_time_class" %in% class(fit))) {
stop("One of the fits in the fits list is not a valid reaction_time_class object.")
}
y[[i]] <- emg::remg(n,
mu=mean(fit@extract$mu_m),
sigma=mean(fit@extract$mu_s),
lambda=mean(fit@extract$mu_l))
i <- i + 1
}
} else {
stop(wrong_arguments)
}
n <- length(y)
comparison_matrix <- matrix(nrow = n, ncol = n)
for (i in 1:(n-1)) {
for (j in (i+1):n) {
cat(sprintf("\n---------- Group %d vs Group %d ----------\n", i, j))
result <- difference(y1=y[[i]], y2=y[[j]], rope=rope, group1=i, group2=j)
comparison_matrix[j,i] <- result[1]
comparison_matrix[i,j] <- result[2]
cat("\n")
}
}
# largest/smallest probabilities
if (n > 2) {
cat("-----------------------------------------")
cat("\nProbabilities that a certain group is\nsmallest/largest or equal to all others:\n\n")
smallest_largest <- is_smallest_or_largest(data=y, rope=rope)
print(smallest_largest)
cat("\n\n")
return(list(comparison_matrix=comparison_matrix, smallest_largest=smallest_largest))
} else {
return(comparison_matrix)
}
})
#' @title plot_distributions
#' @description \code{plot_distributions} a visualization of the fitted distribution, for one, two or multiple fits.
#' @param object reaction_time_class object.
#' @param ... fit2 - a second reaction_time_class object, fits - a list of reaction_time_class objects.
#' @rdname reaction_time_class-plot_distributions
#' @aliases plot_distributions_reaction_time
#' @return A ggplot visualization or an error if something went wrong.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="plot_distributions", signature(object="reaction_time_class"), definition=function(object, ...) {
# init local varibales for CRAN check
group <- x <- y <- NULL
# first group data
mus <- vector()
sigmas <- vector()
lambdas <- vector()
mus[[1]] <- mean(object@extract$mu_m)
sigmas[[1]] <- mean(object@extract$mu_s)
lambdas[[1]] <- mean(object@extract$mu_l)
# limits
x_max <- max(emg::remg(10000, mu=mus[[1]], sigma=sigmas[[1]], lambda=lambdas[[1]]))
x_max <- x_max + 0.1*abs(x_max)
# second group data
arguments <- list(...)
if (length(arguments) > 0) {
if (!is.null(arguments$fit2) || class(arguments[[1]])[1] == "reaction_time_class") {
# provided another fit
if (!is.null(arguments$fit2)) {
fit2 <- arguments$fit2
} else {
fit2 <- arguments[[1]]
}
mus[[2]] <- mean(fit2@extract$mu_m)
sigmas[[2]] <- mean(fit2@extract$mu_s)
lambdas[[2]] <- mean(fit2@extract$mu_l)
x_max2 <- max(emg::remg(10000, mu=mus[[2]], sigma=sigmas[[2]], lambda=lambdas[[2]]))
x_max2 <- x_max2 + 0.1*abs(x_max2)
x_max <- max(x_max, x_max2)
} else if (!is.null(arguments$fits)) {
i <- 2
for (fit in arguments$fits) {
if (!("reaction_time_class" %in% class(fit))) {
stop("One of the fits in the fits list is not a valid reaction_time_class object.")
}
mus[[i]] <- mean(fit@extract$mu_m)
sigmas[[i]] <- mean(fit@extract$mu_s)
lambdas[[i]] <- mean(fit@extract$mu_l)
x_max2 <- max(emg::remg(10000, mu=mus[[2]], sigma=sigmas[[2]], lambda=lambdas[[2]]))
x_max2 <- x_max2 + 0.1*abs(x_max2)
x_max <- max(x_max, x_max2)
i <- i + 1
}
}
}
# calculate data points
step <- 1 / 1000
df <- data.frame(x=numeric(), y=numeric(), group=factor())
n_groups <- length(mus)
for (i in 1:n_groups) {
df_group <- data.frame(x = seq(0, x_max, step),
y = emg::demg(seq(0, x_max, step),
mu = mus[i],
sigma = sigmas[i],
lambda = lambdas[i]),
group=as.factor(i))
df <- rbind(df, df_group)
}
# plot
graph <- ggplot() +
geom_area(data=df, aes(x=x, y=y, fill=group), alpha=0.4, position="identity") +
xlab("reaction time") +
ylab("density")
if (n_groups == 2) {
graph <- graph +
scale_fill_manual(values=c("#3182bd", "#ff4e3f"))
} else if (n_groups > 2) {
graph <- graph +
scale_fill_hue()
} else {
graph <- graph +
scale_fill_manual(values=c("#3182bd")) +
theme(legend.position="none")
}
return(suppressWarnings(graph))
})
#' @title plot_distributions_difference
#' @description \code{plot_distributions_difference} a visualization of the difference between the distribution of the first group and the second group or between multiple groups.
#' @param object reaction_time_class object.
#' @param ... fit2 - a second reaction_time_class object, fits - a list of reaction_time_class objects, rope - region of practical equivalence, bins - number of bins in the histogram.
#' @rdname reaction_time_class-plot_distributions_difference
#' @aliases plot_distributions_difference_reaction_time
#' @return A ggplot visualization or an error if something went wrong.
#'
#' @examples
#' # to use the function you first have to prepare the data and fit the model
#' # see class documentation for an example of the whole process
#' # along with an example of how to use this function
#' ?reaction_time_class
#'
setMethod(f="plot_distributions_difference", signature(object="reaction_time_class"), definition=function(object, ...) {
# init local varibales for CRAN check
value <- NULL
arguments <- list(...)
wrong_arguments <- "The provided arguments for the plot_distributions_difference function are invalid, plot_distributions_difference(reaction_time_class, fit2=reaction_time_class) or plot_distributions_difference(reaction_time_class, fits=list) is required! You can also provide the rope and bins (number of bins in the histogram) parameters, e.g. plot_distributions_difference(reaction_time_class, fit2=reaction_time_class, rope=numeric, bins=numeric)."
if (length(arguments) == 0) {
stop(wrong_arguments)
}
# prepare rope
rope <- NULL
if (!is.null(arguments$rope)) {
rope <- arguments$rope
}
rope <- prepare_rope(rope)
# first group data
y <- list()
n <- 100000
y[[1]] <- emg::remg(n,
mu=mean(object@extract$mu_m),
sigma=mean(object@extract$mu_s),
lambda=mean(object@extract$mu_l))
# limits
x_max <- max(y[[1]])
# second group data
if (!is.null(arguments$fit2) || class(arguments[[1]])[1] == "reaction_time_class") {
# provided another fit
if (!is.null(arguments$fit2)) {
fit2 <- arguments$fit2
} else {
fit2 <- arguments[[1]]
}
y[[2]] <- emg::remg(n,
mu=mean(fit2@extract$mu_m),
sigma=mean(fit2@extract$mu_s),
lambda=mean(fit2@extract$mu_l))
} else if (!is.null(arguments$fits)) {
i <- 2
for (fit in arguments$fits) {
if (!("reaction_time_class" %in% class(fit))) {
stop("One of the fits in the fits list is not a valid reaction_time_class object.")
}
y[[i]] <- emg::remg(n,
mu=mean(fit@extract$mu_m),
sigma=mean(fit@extract$mu_s),
lambda=mean(fit@extract$mu_l))
# limits
x_max <- max(x_max, y[[i]])
i <- i + 1
}
} else {
stop(wrong_arguments)
}
# bins in the histogram
bins <- 30
if (!is.null(arguments$bins)) {
bins <- arguments$bins
}
# if no list is provided
if (is.null(arguments$fits)) {
# call plot difference shared function
graph <- plot_difference(y1=y[[1]], y2=y[[2]], rope=rope, bins=bins)
return(graph)
} else {
x_max <- x_max + 0.1*x_max
graphs <- list()
n <- length(y)
for (i in 1:n) {
for (j in i:n) {
# if both are equal plot samples, else plot difference
if (i == j) {
df <- data.frame(value=y[[i]])
index <- (i-1)*n + i
graphs[[index]] <- ggplot() +
geom_density(data=df, aes(x=value), fill="#3182bd", color=NA, alpha=0.4) +
xlab("reaction time") +
xlim(0, x_max)
} else {
index1 <- (i-1)*n + j
graphs[[index1]] <- plot_difference(y1=y[[i]], y2=y[[j]], rope=rope, bins=bins, nrow=n)
index2 <- (j-1)*n + i
graphs[[index2]] <- plot_difference(y1=y[[j]], y2=y[[i]], rope=rope, bins=bins, nrow=n)
}
}
}
# cowplot
graph <- suppressWarnings(cowplot::plot_grid(plotlist=graphs, nrow=n, ncol=n, scale=0.9))
return(graph)
}
})
|
#' Break Down the Walls of 'HTML' Tags into Usable Text
#'
#' Structured 'HTML' content can be useful when you need to parse data tables or
#' other tagged data from within a document. However, it is also useful to obtain
#' "just the text" from a document free from the walls of tags that surround it.
#' Tools are provied that wrap methods in the 'Jericho HTML Parser' Java library
#' by Martin Jericho <http://jericho.htmlparser.net/docs/index.html>. Martin's
#' library is used in many at-scale projects, icluding the 'The Internet Archive'.
#'
#' @md
#' @name jericho
#' @docType package
#' @author Bob Rudis (bob@@rud.is)
#' @import rJava jerichojars
NULL
| /R/jericho-package.R | permissive | hrbrmstr/jericho | R | false | false | 662 | r | #' Break Down the Walls of 'HTML' Tags into Usable Text
#'
#' Structured 'HTML' content can be useful when you need to parse data tables or
#' other tagged data from within a document. However, it is also useful to obtain
#' "just the text" from a document free from the walls of tags that surround it.
#' Tools are provied that wrap methods in the 'Jericho HTML Parser' Java library
#' by Martin Jericho <http://jericho.htmlparser.net/docs/index.html>. Martin's
#' library is used in many at-scale projects, icluding the 'The Internet Archive'.
#'
#' @md
#' @name jericho
#' @docType package
#' @author Bob Rudis (bob@@rud.is)
#' @import rJava jerichojars
NULL
|
load_data<-function(){}
clean_data<-function(){}
store_data<-function(data,name){write.csv(x,paste(name,'.csv',sep=''))}
run_ml_pipeline<-function(){}
| /ml.R | no_license | renorosgon/pds-2019 | R | false | false | 151 | r | load_data<-function(){}
clean_data<-function(){}
store_data<-function(data,name){write.csv(x,paste(name,'.csv',sep=''))}
run_ml_pipeline<-function(){}
|
## Peer-graded Assignment: Course Project 1 ##
# 1. read and organize data
data <- read.table("household_power_consumption.txt", skip = 1, sep =";")
names(data) <- c("Date","Time","Global_active_power","Global_reactive_power",
"Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
head (data)
# 2. Subset data
data_2 <- subset(data, data$Date == "1/2/2007" | data$Date == "2/2/2007")
head(data_2)
# 3. First plot
hist(as.numeric(data$Global_active_power), col = "purple", main = "Global Active Power",
ylab = "Frequency", xlab = "Global Active Power(kilowatts)")
#4.Transformation date and time
data_2$Date <- as.Date(data_2$Date, format="%d/%m/%Y")
data_2$Time <- strptime(data_2$Time, format="%H:%M:%S")
data_2[1:1440,"Time"] <- format(data_2[1:1440,"Time"],"2007-02-01 %H:%M:%S")
data_2[1441:2880,"Time"] <- format(data_2[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# 4. second plot
plot(data_2$Time, as.numeric(data_2$Global_active_power),
type="l", main = "Global Active Power Vs Time", xlab="",ylab="Global Active Power (kilowatts)")
# 5. Third plot
plot(data_2$Time, data_2$Sub_metering_1, type = "n",
main="Energy sub-metering", xlab = "", ylab = "Energy sub metering")
with(data_2,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(data_2,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(data_2,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# 6.Fourth plot
par(mfrow=c(2,2))
with(data_2,{
plot(data_2$Time, as.numeric(data_2$Global_active_power), type = "l", xlab = "", ylab = "Global Active Power")
plot(data_2$Time, as.numeric(data_2$Voltage), type = "l", xlab = "datetime", ylab = "Voltage")
plot(data_2$Time, data_2$Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering")
with(data_2, lines(Time,as.numeric(as.character(Sub_metering_1))))
with(data_2, lines(Time,as.numeric(as.character(Sub_metering_2)), col = "red"))
with(data_2, lines(Time,as.numeric(as.character(Sub_metering_3)), col = "blue"))
legend("topright", lty = 1,
col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
cex = 0.6)
plot(data_2$Time, as.numeric(as.character(data_2$Global_reactive_power)),
type = "l", xlab = "datetime", ylab = "Global_reactive_power")
}) | /Exploratory Data Analysis.R | no_license | soleyepes12/ExData | R | false | false | 2,486 | r | ## Peer-graded Assignment: Course Project 1 ##
# 1. read and organize data
data <- read.table("household_power_consumption.txt", skip = 1, sep =";")
names(data) <- c("Date","Time","Global_active_power","Global_reactive_power",
"Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
head (data)
# 2. Subset data
data_2 <- subset(data, data$Date == "1/2/2007" | data$Date == "2/2/2007")
head(data_2)
# 3. First plot
hist(as.numeric(data$Global_active_power), col = "purple", main = "Global Active Power",
ylab = "Frequency", xlab = "Global Active Power(kilowatts)")
#4.Transformation date and time
data_2$Date <- as.Date(data_2$Date, format="%d/%m/%Y")
data_2$Time <- strptime(data_2$Time, format="%H:%M:%S")
data_2[1:1440,"Time"] <- format(data_2[1:1440,"Time"],"2007-02-01 %H:%M:%S")
data_2[1441:2880,"Time"] <- format(data_2[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# 4. second plot
plot(data_2$Time, as.numeric(data_2$Global_active_power),
type="l", main = "Global Active Power Vs Time", xlab="",ylab="Global Active Power (kilowatts)")
# 5. Third plot
plot(data_2$Time, data_2$Sub_metering_1, type = "n",
main="Energy sub-metering", xlab = "", ylab = "Energy sub metering")
with(data_2,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(data_2,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(data_2,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# 6.Fourth plot
par(mfrow=c(2,2))
with(data_2,{
plot(data_2$Time, as.numeric(data_2$Global_active_power), type = "l", xlab = "", ylab = "Global Active Power")
plot(data_2$Time, as.numeric(data_2$Voltage), type = "l", xlab = "datetime", ylab = "Voltage")
plot(data_2$Time, data_2$Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering")
with(data_2, lines(Time,as.numeric(as.character(Sub_metering_1))))
with(data_2, lines(Time,as.numeric(as.character(Sub_metering_2)), col = "red"))
with(data_2, lines(Time,as.numeric(as.character(Sub_metering_3)), col = "blue"))
legend("topright", lty = 1,
col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
cex = 0.6)
plot(data_2$Time, as.numeric(as.character(data_2$Global_reactive_power)),
type = "l", xlab = "datetime", ylab = "Global_reactive_power")
}) |
\name{ evaluate_expected.polynomial}
\alias{ evaluate_expected.polynomial}
\title{ Evaluate the expected value of a multivariate polynomial }
\description{Evaluate the expected value of a multivariate polynomial
assuming a specified non-central multivariate distribution.}
\usage{ evaluate_expected.polynomial(poly,mu,sigma, envir='symmoments') }
\arguments{
\item{ poly }{ either an object of class 'mpoly' or 'multipol', or a list with components for coefficients and powers.}
\item{ mu }{ a vector of real numbers representing the mean of the multivariate distribution }
\item{ sigma }{an vector giving an upper-triangular matrix representing the covariance matrix of the multivariate distribution }
\item{ envir }{a character variable specifying the environment
containing the central moments needed for the calculation }
}
\details{ This function looks in the environment specified in the envir
argument for the central moments needed in the calculation. The default
is the symmoments environment. The computation stops with an error
message if a required moment is not found in envir.}
\value{expected value of the multivariate polynomial at the specified multivariate normal mean and covariance matrix}
\references{ K Phillips, Symbolic Computation of the Central Moments of the Multivariate Normal Distribution, Journal of Statistical Software, 2010.
}
\author{Kem Phillips <kemphillips@comcast.net>}
\seealso{ See the evaluate_noncentral and make.all.moments functions.}
\examples{
# define a mpoly object for a multivariate polynomial and determine
# its expected value at specified mean and covariance matrix:
# note that all moments up to c(2,3,2) must exist in the symmoments
# environment. Use make.all.moments(c(2,3,2)) if necessary.
# use library(mpoly) for first statement below.
# t0 <- mpoly(list(c(coef=3,x1=2),c(coef=2,x1=1,x2=3),c(coef=-4,z=2),c(coef=1,x1=1,x2=2,z=1)))
# evaluate_expected.polynomial(t0,c(1,2,3),c(1,0,0,1,0,1))
}
| /man/evaluate_expected.polynomial.Rd | no_license | cran/symmoments | R | false | false | 2,021 | rd | \name{ evaluate_expected.polynomial}
\alias{ evaluate_expected.polynomial}
\title{ Evaluate the expected value of a multivariate polynomial }
\description{Evaluate the expected value of a multivariate polynomial
assuming a specified non-central multivariate distribution.}
\usage{ evaluate_expected.polynomial(poly,mu,sigma, envir='symmoments') }
\arguments{
\item{ poly }{ either an object of class 'mpoly' or 'multipol', or a list with components for coefficients and powers.}
\item{ mu }{ a vector of real numbers representing the mean of the multivariate distribution }
\item{ sigma }{an vector giving an upper-triangular matrix representing the covariance matrix of the multivariate distribution }
\item{ envir }{a character variable specifying the environment
containing the central moments needed for the calculation }
}
\details{ This function looks in the environment specified in the envir
argument for the central moments needed in the calculation. The default
is the symmoments environment. The computation stops with an error
message if a required moment is not found in envir.}
\value{expected value of the multivariate polynomial at the specified multivariate normal mean and covariance matrix}
\references{ K Phillips, Symbolic Computation of the Central Moments of the Multivariate Normal Distribution, Journal of Statistical Software, 2010.
}
\author{Kem Phillips <kemphillips@comcast.net>}
\seealso{ See the evaluate_noncentral and make.all.moments functions.}
\examples{
# define a mpoly object for a multivariate polynomial and determine
# its expected value at specified mean and covariance matrix:
# note that all moments up to c(2,3,2) must exist in the symmoments
# environment. Use make.all.moments(c(2,3,2)) if necessary.
# use library(mpoly) for first statement below.
# t0 <- mpoly(list(c(coef=3,x1=2),c(coef=2,x1=1,x2=3),c(coef=-4,z=2),c(coef=1,x1=1,x2=2,z=1)))
# evaluate_expected.polynomial(t0,c(1,2,3),c(1,0,0,1,0,1))
}
|
library(dplyr)
library(tidyr)
library(ggplot2)
source("R/community_cluster_diagnostics_funs.R")
# load allocations calculate in community_cluster_fit-k-means.R
allocations <- readRDS("Outputs/cluster_allocations/bcr_kmeans_allocations.rds")
# load the localities that were used for clustering
localities_list <- lapply(allocations, function(x) x[["localities"]])
# inspect the sum of squares ----------------------------------------------
# flatten
allocations_diagnostics <- bind_rows(lapply(allocations, flatten_list), .id = "BCR") # this is where the BCR unlisting is done
# add information criterion
allocations_diagnostics_summary <- allocations_diagnostics %>%
mutate(AIC = total_within_ss + (2 * n_vars * k),
BIC = total_within_ss + (log(n_obs) * n_vars * k))
# go long for plotting
allocations_diagnostics_plot <- allocations_diagnostics_summary %>%
select(BCR, k, total_within_ss, AIC, BIC) %>%
gather(metric, value, total_within_ss:BIC)
ggplot(allocations_diagnostics_plot, aes(x = k, group = metric)) +
geom_line(aes(y = value, colour = metric)) +
facet_wrap(~BCR, scales = "free")
allocations_diagnostics_plot %>%
filter(metric == "AIC") %>%
mutate(BCR = gsub("_", " ", .$BCR)) %>%
mutate(BCR = gsub("WEST GULF COASTAL PLAIN OUACHITAS", "WEST GULF COASTAL PLAIN/OUACHITAS", .$BCR)) %>%
mutate(BCR = gsub("SOUTHERN ROCKIES COLORADO PLATEAU", "SOUTHERN ROCKIES/COLORADO PLATEAU", .$BCR)) %>%
mutate(BCR = gsub("LOWER GREAT LAKES ST LAWRENCE PLAIN", "LOWER GREAT LAKES/ST. LAWRENCE PLAIN", .$BCR)) %>%
mutate(BCR = gsub("NEW ENGLAND MID ATLANTIC COAST", "NEW ENGLAND/MID-ATLANTIC COAST", .$BCR)) %>%
ggplot(., aes(x = k, group = metric)) +
geom_line(aes(y = value, colour = metric)) +
facet_wrap(~BCR, scales = "free", labeller = labeller(BCR = label_wrap_gen(18)))+
xlab("k")+
ylab("value")+
guides(color=FALSE)+
theme_bw()+
theme(strip.text.x = element_text(size = 6, colour = "black"))
ggsave(file="H:/Dissertation/Dissertation Chapters/Data Chapters/United States Urban Bird Patterns/Submissions/Landscape Ecology/Appendix S8/cluster_aics.png",
width=9.5, height=8, units="in", dpi=300)
ggplot(filter(allocations_diagnostics_plot, metric == "total_within_ss"), aes(x = k, group = metric)) +
geom_line(aes(y = value, colour = metric)) +
facet_wrap(~BCR, scales = "free")
### looks like AIC is more informative at this point
### will try re-run with a distance metric on the data?
# choose best clusters ----------------------------------------------------
min_AIC_data <- allocations_diagnostics_summary %>%
group_by(BCR) %>%
filter(AIC == min(AIC))
min_AIC_clusters <- purrr::map2(allocations, min_AIC_data$k, retreive_k_clusters)
# compare communities to land cover classification ------------------------
# join the data together
clusters_landcover <- lapply(names(min_AIC_clusters), join_lc_data, min_AIC_clusters, localities_list)
names(clusters_landcover) <- names(min_AIC_clusters)
# entropy values: examine distriubtion of assembleges
# low entropy indicates less even distribution among groups (often indicates gruops with low or no membership)
# urban_diff values: examine whether (proportionally) more of the communities are in non-urban areas
# positive values will mean more proportional membership of sites to the non-urban component
#### urban / non-urban comparison
urban_entropy <- gather(bind_rows(lapply(clusters_landcover, calculate_entropy, type = "urban"),
.id = "BCR"), "zone", "value", -BCR)
ggplot(urban_entropy, aes(y = value, x = zone)) +
geom_boxplot() +
geom_point(aes(colour = BCR)) +
geom_line(aes(group = BCR))
summary(nlme::lme(fixed = value ~ zone, random = ~ 1 | BCR, data = urban_entropy))
urban_urbandiff <- bind_rows(lapply(clusters_landcover, calculate_urban_diff, type = "urban"),
.id = "BCR")
ggplot(urban_urbandiff, aes(y = urban_diff, x = BCR)) +
#geom_violin() +
geom_point(alpha = 0.5, size = 3) +
geom_hline(yintercept = 0, colour = "red") +
theme_classic()
#### land cover comparison
# entropy values - examine distriubtion of assembleges
landcover_entropy <- gather(bind_rows(lapply(clusters_landcover, calculate_entropy, type = "landcover"),
.id = "BCR"),
"zone", "value", -BCR) %>%
filter(zone %in% c("Natural.Green.Area", "Urban.Green.Area")) %>%
mutate(zone = gsub("Natural.Green.Area", "Natural Green Area", .$zone)) %>%
mutate(zone = gsub("Urban.Green.Area", "Urban Green Area", .$zone)) %>%
mutate(BCR = gsub("_", " ", .$BCR)) %>%
mutate(BCR = gsub("WEST GULF COASTAL PLAIN OUACHITAS", "WEST GULF COASTAL PLAIN/OUACHITAS", .$BCR)) %>%
mutate(BCR = gsub("SOUTHERN ROCKIES COLORADO PLATEAU", "SOUTHERN ROCKIES/COLORADO PLATEAU", .$BCR)) %>%
mutate(BCR = gsub("LOWER GREAT LAKES ST LAWRENCE PLAIN", "LOWER GREAT LAKES/ST. LAWRENCE PLAIN", .$BCR)) %>%
mutate(BCR = gsub("NEW ENGLAND MID ATLANTIC COAST", "NEW ENGLAND/MID-ATLANTIC COAST", .$BCR))
ggplot(landcover_entropy, aes(y = value, x = zone)) +
geom_violin()+
stat_summary(fun.y=mean, geom="point", size=6, shape=15, alpha=0.7, color="red", aes(color=BCR))+
theme_classic()+
ylab("Shannon entropy of cluster assignments")+
xlab("")+
coord_flip()+
theme(axis.text.x=element_text(size=16))+
theme(axis.text.y=element_text(size=16))+
theme(axis.title.x = element_text(size=14))+
theme(axis.title.y=element_text(size=14))
ggsave(filename="H:/Dissertation/Dissertation Chapters/Data Chapters/United States Urban Bird Patterns/Submissions/Landscape Ecology/Figures/cluster_assignments.png",
height=4, width=6, units="in")
## stats to report for paper
landcover_entropy %>%
group_by(zone) %>%
summarise(mean=mean(value),
sd=sd(value))
urban <- landcover_entropy %>%
filter(zone == "Urban Green Area") %>%
.$value
natural <- landcover_entropy %>%
filter(zone == "Natural Green Area") %>%
.$value
var.test(urban, natural)
t.test(urban, natural, var.equal=FALSE, paired=FALSE)
summary(nlme::lme(fixed = value ~ zone, random = ~ 1 | BCR, data = landcover_entropy))
# urban_diff values - examine whether (proportionally) more of the communities are in non-urban areas
landcover_urbandiff <- bind_rows(lapply(clusters_landcover, calculate_urban_diff, type = "landcover"),
.id = "BCR")
library(forcats)
landcover_urbandiff %>%
group_by(BCR) %>%
summarise(mean=mean(urban_diff)) %>%
inner_join(., landcover_urbandiff, by="BCR") %>%
arrange(mean) %>%
mutate(BCR = gsub("_", " ", .$BCR)) %>%
mutate(BCR = gsub("WEST GULF COASTAL PLAIN OUACHITAS", "WEST GULF COASTAL PLAIN/OUACHITAS", .$BCR)) %>%
mutate(BCR = gsub("SOUTHERN ROCKIES COLORADO PLATEAU", "SOUTHERN ROCKIES/COLORADO PLATEAU", .$BCR)) %>%
mutate(BCR = gsub("LOWER GREAT LAKES ST LAWRENCE PLAIN", "LOWER GREAT LAKES/ST. LAWRENCE PLAIN", .$BCR)) %>%
mutate(BCR = gsub("NEW ENGLAND MID ATLANTIC COAST", "NEW ENGLAND/MID-ATLANTIC COAST", .$BCR)) %>%
ggplot(., aes(y = urban_diff, x = fct_inorder(BCR))) +
geom_point(alpha = 0.5, size = 3) +
geom_hline(yintercept = 0, colour = "red") +
theme_classic()+
coord_flip()+
ylab("Proportional difference")+
xlab("BCR")
ggsave(file="H:/Dissertation/Dissertation Chapters/Data Chapters/United States Urban Bird Patterns/Submissions/Landscape Ecology/Appendix S8/proportional_difference.png",
width=8, height=5, units="in", dpi=300)
# calculate per-cluster species metrics (i.e. community metrics) ------------------------
clusters_metrics <- lapply(names(min_AIC_clusters), calculate_cluster_metrics,
min_AIC_clusters, localities_list)
names(clusters_metrics) <- names(min_AIC_clusters)
cluster_metrics_df <- bind_rows(clusters_metrics, .id = "BCR") %>%
mutate(cluster = as.integer(cluster))
# join with other data...
cluster_metrics_urbandiff <- inner_join(urban_urbandiff, cluster_metrics_df)
cluster_metrics_lcdiff <- inner_join(landcover_urbandiff, cluster_metrics_df)
# check for trends...
plot(urban_diff~richness, data = cluster_metrics_urbandiff)
plot(urban_diff~richness, data = cluster_metrics_lcdiff)
plot(urban_diff~diversity, data = cluster_metrics_urbandiff)
plot(urban_diff~diversity, data = cluster_metrics_lcdiff)
## Make two plots for appendix for paper
ggplot(cluster_metrics_lcdiff, aes(x=urban_diff, y=richness))+
geom_point()+
theme_classic()+
xlab("Proportional difference")+
ylab("Total richness")
ggsave(file="H:/Dissertation/Dissertation Chapters/Data Chapters/United States Urban Bird Patterns/Submissions/Landscape Ecology/Appendix S8/richness_proportional.png",
width=7, height=6, units="in", dpi=300)
ggplot(cluster_metrics_lcdiff, aes(x=urban_diff, y=diversity))+
geom_point()+
theme_classic()+
xlab("Proportional difference")+
ylab("Total diversity")
| /R/community_cluster_diagnostics.R | no_license | coreytcallaghan/LAND-D-19-00128R1 | R | false | false | 8,950 | r | library(dplyr)
library(tidyr)
library(ggplot2)
source("R/community_cluster_diagnostics_funs.R")
# load allocations calculate in community_cluster_fit-k-means.R
allocations <- readRDS("Outputs/cluster_allocations/bcr_kmeans_allocations.rds")
# load the localities that were used for clustering
localities_list <- lapply(allocations, function(x) x[["localities"]])
# inspect the sum of squares ----------------------------------------------
# flatten
allocations_diagnostics <- bind_rows(lapply(allocations, flatten_list), .id = "BCR") # this is where the BCR unlisting is done
# add information criterion
allocations_diagnostics_summary <- allocations_diagnostics %>%
mutate(AIC = total_within_ss + (2 * n_vars * k),
BIC = total_within_ss + (log(n_obs) * n_vars * k))
# go long for plotting
allocations_diagnostics_plot <- allocations_diagnostics_summary %>%
select(BCR, k, total_within_ss, AIC, BIC) %>%
gather(metric, value, total_within_ss:BIC)
ggplot(allocations_diagnostics_plot, aes(x = k, group = metric)) +
geom_line(aes(y = value, colour = metric)) +
facet_wrap(~BCR, scales = "free")
allocations_diagnostics_plot %>%
filter(metric == "AIC") %>%
mutate(BCR = gsub("_", " ", .$BCR)) %>%
mutate(BCR = gsub("WEST GULF COASTAL PLAIN OUACHITAS", "WEST GULF COASTAL PLAIN/OUACHITAS", .$BCR)) %>%
mutate(BCR = gsub("SOUTHERN ROCKIES COLORADO PLATEAU", "SOUTHERN ROCKIES/COLORADO PLATEAU", .$BCR)) %>%
mutate(BCR = gsub("LOWER GREAT LAKES ST LAWRENCE PLAIN", "LOWER GREAT LAKES/ST. LAWRENCE PLAIN", .$BCR)) %>%
mutate(BCR = gsub("NEW ENGLAND MID ATLANTIC COAST", "NEW ENGLAND/MID-ATLANTIC COAST", .$BCR)) %>%
ggplot(., aes(x = k, group = metric)) +
geom_line(aes(y = value, colour = metric)) +
facet_wrap(~BCR, scales = "free", labeller = labeller(BCR = label_wrap_gen(18)))+
xlab("k")+
ylab("value")+
guides(color=FALSE)+
theme_bw()+
theme(strip.text.x = element_text(size = 6, colour = "black"))
ggsave(file="H:/Dissertation/Dissertation Chapters/Data Chapters/United States Urban Bird Patterns/Submissions/Landscape Ecology/Appendix S8/cluster_aics.png",
width=9.5, height=8, units="in", dpi=300)
ggplot(filter(allocations_diagnostics_plot, metric == "total_within_ss"), aes(x = k, group = metric)) +
geom_line(aes(y = value, colour = metric)) +
facet_wrap(~BCR, scales = "free")
### looks like AIC is more informative at this point
### will try re-run with a distance metric on the data?
# choose best clusters ----------------------------------------------------
min_AIC_data <- allocations_diagnostics_summary %>%
group_by(BCR) %>%
filter(AIC == min(AIC))
min_AIC_clusters <- purrr::map2(allocations, min_AIC_data$k, retreive_k_clusters)
# compare communities to land cover classification ------------------------
# join the data together
clusters_landcover <- lapply(names(min_AIC_clusters), join_lc_data, min_AIC_clusters, localities_list)
names(clusters_landcover) <- names(min_AIC_clusters)
# entropy values: examine distriubtion of assembleges
# low entropy indicates less even distribution among groups (often indicates gruops with low or no membership)
# urban_diff values: examine whether (proportionally) more of the communities are in non-urban areas
# positive values will mean more proportional membership of sites to the non-urban component
#### urban / non-urban comparison
urban_entropy <- gather(bind_rows(lapply(clusters_landcover, calculate_entropy, type = "urban"),
.id = "BCR"), "zone", "value", -BCR)
ggplot(urban_entropy, aes(y = value, x = zone)) +
geom_boxplot() +
geom_point(aes(colour = BCR)) +
geom_line(aes(group = BCR))
summary(nlme::lme(fixed = value ~ zone, random = ~ 1 | BCR, data = urban_entropy))
urban_urbandiff <- bind_rows(lapply(clusters_landcover, calculate_urban_diff, type = "urban"),
.id = "BCR")
ggplot(urban_urbandiff, aes(y = urban_diff, x = BCR)) +
#geom_violin() +
geom_point(alpha = 0.5, size = 3) +
geom_hline(yintercept = 0, colour = "red") +
theme_classic()
#### land cover comparison
# entropy values - examine distriubtion of assembleges
landcover_entropy <- gather(bind_rows(lapply(clusters_landcover, calculate_entropy, type = "landcover"),
.id = "BCR"),
"zone", "value", -BCR) %>%
filter(zone %in% c("Natural.Green.Area", "Urban.Green.Area")) %>%
mutate(zone = gsub("Natural.Green.Area", "Natural Green Area", .$zone)) %>%
mutate(zone = gsub("Urban.Green.Area", "Urban Green Area", .$zone)) %>%
mutate(BCR = gsub("_", " ", .$BCR)) %>%
mutate(BCR = gsub("WEST GULF COASTAL PLAIN OUACHITAS", "WEST GULF COASTAL PLAIN/OUACHITAS", .$BCR)) %>%
mutate(BCR = gsub("SOUTHERN ROCKIES COLORADO PLATEAU", "SOUTHERN ROCKIES/COLORADO PLATEAU", .$BCR)) %>%
mutate(BCR = gsub("LOWER GREAT LAKES ST LAWRENCE PLAIN", "LOWER GREAT LAKES/ST. LAWRENCE PLAIN", .$BCR)) %>%
mutate(BCR = gsub("NEW ENGLAND MID ATLANTIC COAST", "NEW ENGLAND/MID-ATLANTIC COAST", .$BCR))
ggplot(landcover_entropy, aes(y = value, x = zone)) +
geom_violin()+
stat_summary(fun.y=mean, geom="point", size=6, shape=15, alpha=0.7, color="red", aes(color=BCR))+
theme_classic()+
ylab("Shannon entropy of cluster assignments")+
xlab("")+
coord_flip()+
theme(axis.text.x=element_text(size=16))+
theme(axis.text.y=element_text(size=16))+
theme(axis.title.x = element_text(size=14))+
theme(axis.title.y=element_text(size=14))
ggsave(filename="H:/Dissertation/Dissertation Chapters/Data Chapters/United States Urban Bird Patterns/Submissions/Landscape Ecology/Figures/cluster_assignments.png",
height=4, width=6, units="in")
## stats to report for paper
landcover_entropy %>%
group_by(zone) %>%
summarise(mean=mean(value),
sd=sd(value))
urban <- landcover_entropy %>%
filter(zone == "Urban Green Area") %>%
.$value
natural <- landcover_entropy %>%
filter(zone == "Natural Green Area") %>%
.$value
var.test(urban, natural)
t.test(urban, natural, var.equal=FALSE, paired=FALSE)
summary(nlme::lme(fixed = value ~ zone, random = ~ 1 | BCR, data = landcover_entropy))
# urban_diff values - examine whether (proportionally) more of the communities are in non-urban areas
landcover_urbandiff <- bind_rows(lapply(clusters_landcover, calculate_urban_diff, type = "landcover"),
.id = "BCR")
library(forcats)
landcover_urbandiff %>%
group_by(BCR) %>%
summarise(mean=mean(urban_diff)) %>%
inner_join(., landcover_urbandiff, by="BCR") %>%
arrange(mean) %>%
mutate(BCR = gsub("_", " ", .$BCR)) %>%
mutate(BCR = gsub("WEST GULF COASTAL PLAIN OUACHITAS", "WEST GULF COASTAL PLAIN/OUACHITAS", .$BCR)) %>%
mutate(BCR = gsub("SOUTHERN ROCKIES COLORADO PLATEAU", "SOUTHERN ROCKIES/COLORADO PLATEAU", .$BCR)) %>%
mutate(BCR = gsub("LOWER GREAT LAKES ST LAWRENCE PLAIN", "LOWER GREAT LAKES/ST. LAWRENCE PLAIN", .$BCR)) %>%
mutate(BCR = gsub("NEW ENGLAND MID ATLANTIC COAST", "NEW ENGLAND/MID-ATLANTIC COAST", .$BCR)) %>%
ggplot(., aes(y = urban_diff, x = fct_inorder(BCR))) +
geom_point(alpha = 0.5, size = 3) +
geom_hline(yintercept = 0, colour = "red") +
theme_classic()+
coord_flip()+
ylab("Proportional difference")+
xlab("BCR")
ggsave(file="H:/Dissertation/Dissertation Chapters/Data Chapters/United States Urban Bird Patterns/Submissions/Landscape Ecology/Appendix S8/proportional_difference.png",
width=8, height=5, units="in", dpi=300)
# calculate per-cluster species metrics (i.e. community metrics) ------------------------
clusters_metrics <- lapply(names(min_AIC_clusters), calculate_cluster_metrics,
min_AIC_clusters, localities_list)
names(clusters_metrics) <- names(min_AIC_clusters)
cluster_metrics_df <- bind_rows(clusters_metrics, .id = "BCR") %>%
mutate(cluster = as.integer(cluster))
# join with other data...
cluster_metrics_urbandiff <- inner_join(urban_urbandiff, cluster_metrics_df)
cluster_metrics_lcdiff <- inner_join(landcover_urbandiff, cluster_metrics_df)
# check for trends...
plot(urban_diff~richness, data = cluster_metrics_urbandiff)
plot(urban_diff~richness, data = cluster_metrics_lcdiff)
plot(urban_diff~diversity, data = cluster_metrics_urbandiff)
plot(urban_diff~diversity, data = cluster_metrics_lcdiff)
## Make two plots for appendix for paper
ggplot(cluster_metrics_lcdiff, aes(x=urban_diff, y=richness))+
geom_point()+
theme_classic()+
xlab("Proportional difference")+
ylab("Total richness")
ggsave(file="H:/Dissertation/Dissertation Chapters/Data Chapters/United States Urban Bird Patterns/Submissions/Landscape Ecology/Appendix S8/richness_proportional.png",
width=7, height=6, units="in", dpi=300)
ggplot(cluster_metrics_lcdiff, aes(x=urban_diff, y=diversity))+
geom_point()+
theme_classic()+
xlab("Proportional difference")+
ylab("Total diversity")
|
library(shiny)
# Define server
shinyServer(function(input, output) {
sim <- reactive({
dosim(maxsimtime=as.numeric(input$tmax), modules=list("habitat", "caribou"),
params=list(Ncaribou=as.numeric(input$N)), path="..")
})
# output$mapPlot <- renderPlot({
# map <- readRDS("../data/habitat.rds")
# plot(map)
#
# caribou <- readRDS("../data/caribou_0.rds")
# points(caribou, pch=19, cex=0.1)
#
# for (i in 1:input$tmax) {
# caribou <- readRDS(paste("../data/caribou_", i, ".rds", sep=""))
# points(caribou[[1]], pch=19, cex=0.1)
# points(caribou[[2]]$x, caribou[[2]]$y, col=caribou[[2]]$ids, pch=19, cex=0.1)
# }
# })
output$simPlot <- renderPlot({ sim() })
# debug only
output$debug <- renderText({ sim() })
})
| /SpaDES-master/SAMPLE/shiny_caribou/server.R | no_license | B-Ron12/RCodeSK | R | false | false | 837 | r | library(shiny)
# Define server
shinyServer(function(input, output) {
sim <- reactive({
dosim(maxsimtime=as.numeric(input$tmax), modules=list("habitat", "caribou"),
params=list(Ncaribou=as.numeric(input$N)), path="..")
})
# output$mapPlot <- renderPlot({
# map <- readRDS("../data/habitat.rds")
# plot(map)
#
# caribou <- readRDS("../data/caribou_0.rds")
# points(caribou, pch=19, cex=0.1)
#
# for (i in 1:input$tmax) {
# caribou <- readRDS(paste("../data/caribou_", i, ".rds", sep=""))
# points(caribou[[1]], pch=19, cex=0.1)
# points(caribou[[2]]$x, caribou[[2]]$y, col=caribou[[2]]$ids, pch=19, cex=0.1)
# }
# })
output$simPlot <- renderPlot({ sim() })
# debug only
output$debug <- renderText({ sim() })
})
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Read Data
AllData<-mtcars
# Define server logic required to draw a plot
shinyServer(function(input, output) {
myFinalData<-reactive({
#get data for selected variables
myData1<-subset(AllData, select = input$var)
myData1$mpg<-AllData$mpg
myData1
})
# Prepare Summary Tab
rendersumry <- reactive({ summary(myFinalData())})
output$sumry <- renderPrint({
rendersumry()
})
# Prepare Structure Tab
renderstr <- reactive({ str(myFinalData())})
output$struct <- renderPrint({
renderstr()
})
# Prepare "Data tab"
output$displayData <- renderTable({
myFinalData()
})
# Prepare Plot Tab
output$mygraph <- renderPlot({
formulaText <- reactive({
paste("mpg ~", input$var)
})
plotdata <- myFinalData()
plot(as.formula(formulaText()),
data=plotdata)
})
})
| /server.R | no_license | XiaoJenJen/data-products | R | false | false | 1,334 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Read Data
AllData<-mtcars
# Define server logic required to draw a plot
shinyServer(function(input, output) {
myFinalData<-reactive({
#get data for selected variables
myData1<-subset(AllData, select = input$var)
myData1$mpg<-AllData$mpg
myData1
})
# Prepare Summary Tab
rendersumry <- reactive({ summary(myFinalData())})
output$sumry <- renderPrint({
rendersumry()
})
# Prepare Structure Tab
renderstr <- reactive({ str(myFinalData())})
output$struct <- renderPrint({
renderstr()
})
# Prepare "Data tab"
output$displayData <- renderTable({
myFinalData()
})
# Prepare Plot Tab
output$mygraph <- renderPlot({
formulaText <- reactive({
paste("mpg ~", input$var)
})
plotdata <- myFinalData()
plot(as.formula(formulaText()),
data=plotdata)
})
})
|
posteriorSuccess <- function( PSIobject ) {
# compute posteriors, for all possible SSDs, for success and error trials
thresh <- length(PSIobject$thresholds)
slopes <- length(PSIobject$slopes)
errs <- length(PSIobject$error_rates)
numssds <- length(PSIobject$SSDs)
p_posterior_success <- array( 0, dim = c(thresh, slopes, errs, numssds) )
for (del in 1:length(PSIobject$SSDs)) { # compute posteriors for all possible trials
p_posterior_success[,,,del] = ( PSIobject$p_prior[PSIobject$Stop_trial,,,] * PSIobject$p_inhibit_signal[,,,del] ) / sum(( PSIobject$p_prior[PSIobject$Stop_trial,,,] * PSIobject$p_inhibit_signal[,,,del] ))
}
return( p_posterior_success)
} | /R/posteriorSuccess.R | no_license | lorweiuk/PSIadjusted | R | false | false | 736 | r | posteriorSuccess <- function( PSIobject ) {
# compute posteriors, for all possible SSDs, for success and error trials
thresh <- length(PSIobject$thresholds)
slopes <- length(PSIobject$slopes)
errs <- length(PSIobject$error_rates)
numssds <- length(PSIobject$SSDs)
p_posterior_success <- array( 0, dim = c(thresh, slopes, errs, numssds) )
for (del in 1:length(PSIobject$SSDs)) { # compute posteriors for all possible trials
p_posterior_success[,,,del] = ( PSIobject$p_prior[PSIobject$Stop_trial,,,] * PSIobject$p_inhibit_signal[,,,del] ) / sum(( PSIobject$p_prior[PSIobject$Stop_trial,,,] * PSIobject$p_inhibit_signal[,,,del] ))
}
return( p_posterior_success)
} |
## Preparatory moosilauke data work
## NOTE: using estimated heights and bole volumes from boles project
## "~/work/boles/"
## eht = height indicator column, 0 means not predicted
source("~/work/data/data-prep/read-moose.R") ## calls data 'pp'
source("~/work/functions/functions-datatrans.R")
## Check to see that we have the necessary columns
cols <- c("DBH","HT","EHT","BV")
yrs <- c(86, 87, 98, 10)
checkCols(pp, cols, yrs) ## returns columns that arent present
## rename columns that have been changed
changed <- c("HTTCR", "ebv")
replacements <- c("HT", "BV")
for (i in 1:length(changed))
names(pp) <- gsub(changed[i], replacements[i], names(pp))
checkCols(pp, cols, yrs) ## recheck cols
## remake the ba and growth columns so they are annualized and named
## with yr suffixes for long transformation
pp$BA86 <- 0.00007854*pp$DBH86*pp$DBH86 ### Basal area column, m^2
pp$BA87 <- 0.00007854*pp$DBH87*pp$DBH87
pp$BA98 <- 0.00007854*pp$DBH98*pp$DBH98
pp$BA10 <- 0.00007854*pp$DBH10*pp$DBH10
pp$BAGROWTH98 <- rep(NA, nrow(pp))
pp[!is.na(pp$BA86),]$BAGROWTH98 <- (pp[!is.na(pp$BA86),]$BA98-
pp[!is.na(pp$BA86),]$BA86)/12
pp[!is.na(pp$BA87),]$BAGROWTH98 <- (pp[!is.na(pp$BA87),]$BA98-
pp[!is.na(pp$BA87),]$BA87)/11
pp$BAGROWTH10 <- (pp$BA10-pp$BA98)/12
pp$BVGROWTH98 <- rep(NA, nrow(pp))
pp[!is.na(pp$BV86),]$BVGROWTH98 <- (pp[!is.na(pp$BV86),]$BV98-
pp[!is.na(pp$BV86),]$BV86)/12
pp[!is.na(pp$BV87),]$BVGROWTH98 <- (pp[!is.na(pp$BV87),]$BV98-
pp[!is.na(pp$BV87),]$BV87)/11
pp$BVGROWTH10 <- (pp$BV10-pp$BV98)/12
pp$DBHGROWTH98 <- rep(NA, nrow(pp))
pp[!is.na(pp$DBH86),]$DBHGROWTH98 <- (pp[!is.na(pp$DBH86),]$DBH98-
pp[!is.na(pp$DBH86),]$DBH86)/12
pp[!is.na(pp$DBH87),]$DBHGROWTH98 <- (pp[!is.na(pp$DBH87),]$DBH98-
pp[!is.na(pp$DBH87),]$DBH87)/11
pp$DBHGROWTH10 <-(pp$DBH10-pp$DBH98)/12
pp$HTGROWTH98 <- rep(NA, nrow(pp))
pp[!is.na(pp$HT86),]$HTGROWTH98 <- (pp[!is.na(pp$HT86),]$HT98-
pp[!is.na(pp$HT86),]$HT86)/12
pp[!is.na(pp$HT87),]$HTGROWTH98 <- (pp[!is.na(pp$HT87),]$HT98-
pp[!is.na(pp$HT87),]$HT87)/11
pp$HTGROWTH10 <-(pp$HT10-pp$HT98)/12
# Make prior columns for ht, dbh, ba, bv
pp$PRIORDBH86 <- rep(NA, nrow(pp))
pp$PRIORDBH87 <- rep(NA, nrow(pp))
pp$PRIORDBH98 <- rep(NA, nrow(pp))
pp$PRIORDBH10 <- rep(NA, nrow(pp))
pp[!is.na(pp$DBH86),]$PRIORDBH98 <- pp[!is.na(pp$DBH86),]$DBH86
pp[!is.na(pp$DBH87),]$PRIORDBH98 <- pp[!is.na(pp$DBH87),]$DBH87
pp[!is.na(pp$DBH98),]$PRIORDBH10 <- pp[!is.na(pp$DBH98),]$DBH98
pp$PRIORBA86 <- rep(NA, nrow(pp))
pp$PRIORBA87 <- rep(NA, nrow(pp))
pp$PRIORBA98 <- rep(NA, nrow(pp))
pp$PRIORBA10 <- rep(NA, nrow(pp))
pp[!is.na(pp$BA86),]$PRIORBA98 <- pp[!is.na(pp$BA86),]$BA86
pp[!is.na(pp$BA87),]$PRIORBA98 <- pp[!is.na(pp$BA87),]$BA87
pp[!is.na(pp$BA98),]$PRIORBA10 <- pp[!is.na(pp$BA98),]$BA98
pp$PRIORBV86 <- rep(NA, nrow(pp))
pp$PRIORBV87 <- rep(NA, nrow(pp))
pp$PRIORBV98 <- rep(NA, nrow(pp))
pp$PRIORBV10 <- rep(NA, nrow(pp))
pp[!is.na(pp$BV86),]$PRIORBV98 <- pp[!is.na(pp$BV86),]$BV86
pp[!is.na(pp$BV87),]$PRIORBV98 <- pp[!is.na(pp$BV87),]$BV87
pp[!is.na(pp$BV98),]$PRIORBV10 <- pp[!is.na(pp$BV98),]$BV98
pp$PRIORHT86 <- rep(NA, nrow(pp))
pp$PRIORHT87 <- rep(NA, nrow(pp))
pp$PRIORHT98 <- rep(NA, nrow(pp))
pp$PRIORHT10 <- rep(NA, nrow(pp))
pp[!is.na(pp$HT86),]$PRIORHT98 <- pp[!is.na(pp$HT86),]$HT86
pp[!is.na(pp$HT87),]$PRIORHT98 <- pp[!is.na(pp$HT87),]$HT87
pp[!is.na(pp$HT98),]$PRIORHT10 <- pp[!is.na(pp$HT98),]$HT98
## make columns that identify direction for dbh and ht from last sampling period
## period 1
## pp$p98dbh <- rep(NA, nrow(pp))
## pp[pp$PPLOT<16,]$p98dbh <- pp[pp$PPLOT<16,]$DBH98 > pp[pp$PPLOT<16,]$DBH86
## pp[pp$PPLOT>=16,]$p98dbh <- pp[pp$PPLOT>=16,]$DBH98 > pp[pp$PPLOT>=16,]$DBH87
## pp$p98ht <- rep(NA, nrow(pp))
## pp[pp$PPLOT<16,]$p98ht <- pp[pp$PPLOT<16,]$HT98 > pp[pp$PPLOT<16,]$HT86
## pp[pp$PPLOT>=16,]$p98ht <- pp[pp$PPLOT>=16,]$HT98 > pp[pp$PPLOT>=16,]$HT87
## # period 2
## pp$p10dbh <- pp$DBH10 > pp$DBH98
## pp$p10ht <- pp$HT10 > pp$HT98
## colnames to lower case and drop unwanted columns
names(pp) <-tolower(names(pp))
## separate cht8687 into cht86 and cht87:
## plots 1-12 have cht86 and 13-24 have cht87
pp$cht86 <- ifelse(pp$pplot < 16, pp$cht8687, NA)
pp$cht87 <- ifelse(pp$pplot > 15, pp$cht8687, NA)
yrs <- c(86, 87, 98, 10)
toKeep <- c("pplot","splot","tag","spec","yrmort","elev","elevcl","asp","aspcl",
"bqudx","bqudy","soilcl","slopcl","slope8687", paste0("cht",yrs),
paste0("stat",yrs), paste0("dbh",yrs), paste0("bv",yrs), paste0("ba",yrs),
paste0("ht",yrs), paste0("eht",yrs), paste0("decm",yrs), paste0("cpos",yrs),
paste0("dbhgrowth",yrs), paste0("htgrowth",yrs), paste0("priordbh",yrs),
paste0("priorba",yrs), paste0("bagrowth",yrs), paste0("priorht",yrs), paste0("priorbv",yrs),
paste0("bvgrowth",yrs))
pp <- pp[,names(pp) %in% toKeep]
write.csv(pp, "~/work/data/data/moose-wide.csv", row.names = FALSE)
| /data-prep/clean-moose.R | no_license | nverno/data | R | false | false | 5,194 | r | ## Preparatory moosilauke data work
## NOTE: using estimated heights and bole volumes from boles project
## "~/work/boles/"
## eht = height indicator column, 0 means not predicted
source("~/work/data/data-prep/read-moose.R") ## calls data 'pp'
source("~/work/functions/functions-datatrans.R")
## Check to see that we have the necessary columns
cols <- c("DBH","HT","EHT","BV")
yrs <- c(86, 87, 98, 10)
checkCols(pp, cols, yrs) ## returns columns that arent present
## rename columns that have been changed
changed <- c("HTTCR", "ebv")
replacements <- c("HT", "BV")
for (i in 1:length(changed))
names(pp) <- gsub(changed[i], replacements[i], names(pp))
checkCols(pp, cols, yrs) ## recheck cols
## remake the ba and growth columns so they are annualized and named
## with yr suffixes for long transformation
pp$BA86 <- 0.00007854*pp$DBH86*pp$DBH86 ### Basal area column, m^2
pp$BA87 <- 0.00007854*pp$DBH87*pp$DBH87
pp$BA98 <- 0.00007854*pp$DBH98*pp$DBH98
pp$BA10 <- 0.00007854*pp$DBH10*pp$DBH10
pp$BAGROWTH98 <- rep(NA, nrow(pp))
pp[!is.na(pp$BA86),]$BAGROWTH98 <- (pp[!is.na(pp$BA86),]$BA98-
pp[!is.na(pp$BA86),]$BA86)/12
pp[!is.na(pp$BA87),]$BAGROWTH98 <- (pp[!is.na(pp$BA87),]$BA98-
pp[!is.na(pp$BA87),]$BA87)/11
pp$BAGROWTH10 <- (pp$BA10-pp$BA98)/12
pp$BVGROWTH98 <- rep(NA, nrow(pp))
pp[!is.na(pp$BV86),]$BVGROWTH98 <- (pp[!is.na(pp$BV86),]$BV98-
pp[!is.na(pp$BV86),]$BV86)/12
pp[!is.na(pp$BV87),]$BVGROWTH98 <- (pp[!is.na(pp$BV87),]$BV98-
pp[!is.na(pp$BV87),]$BV87)/11
pp$BVGROWTH10 <- (pp$BV10-pp$BV98)/12
pp$DBHGROWTH98 <- rep(NA, nrow(pp))
pp[!is.na(pp$DBH86),]$DBHGROWTH98 <- (pp[!is.na(pp$DBH86),]$DBH98-
pp[!is.na(pp$DBH86),]$DBH86)/12
pp[!is.na(pp$DBH87),]$DBHGROWTH98 <- (pp[!is.na(pp$DBH87),]$DBH98-
pp[!is.na(pp$DBH87),]$DBH87)/11
pp$DBHGROWTH10 <-(pp$DBH10-pp$DBH98)/12
pp$HTGROWTH98 <- rep(NA, nrow(pp))
pp[!is.na(pp$HT86),]$HTGROWTH98 <- (pp[!is.na(pp$HT86),]$HT98-
pp[!is.na(pp$HT86),]$HT86)/12
pp[!is.na(pp$HT87),]$HTGROWTH98 <- (pp[!is.na(pp$HT87),]$HT98-
pp[!is.na(pp$HT87),]$HT87)/11
pp$HTGROWTH10 <-(pp$HT10-pp$HT98)/12
# Make prior columns for ht, dbh, ba, bv
pp$PRIORDBH86 <- rep(NA, nrow(pp))
pp$PRIORDBH87 <- rep(NA, nrow(pp))
pp$PRIORDBH98 <- rep(NA, nrow(pp))
pp$PRIORDBH10 <- rep(NA, nrow(pp))
pp[!is.na(pp$DBH86),]$PRIORDBH98 <- pp[!is.na(pp$DBH86),]$DBH86
pp[!is.na(pp$DBH87),]$PRIORDBH98 <- pp[!is.na(pp$DBH87),]$DBH87
pp[!is.na(pp$DBH98),]$PRIORDBH10 <- pp[!is.na(pp$DBH98),]$DBH98
pp$PRIORBA86 <- rep(NA, nrow(pp))
pp$PRIORBA87 <- rep(NA, nrow(pp))
pp$PRIORBA98 <- rep(NA, nrow(pp))
pp$PRIORBA10 <- rep(NA, nrow(pp))
pp[!is.na(pp$BA86),]$PRIORBA98 <- pp[!is.na(pp$BA86),]$BA86
pp[!is.na(pp$BA87),]$PRIORBA98 <- pp[!is.na(pp$BA87),]$BA87
pp[!is.na(pp$BA98),]$PRIORBA10 <- pp[!is.na(pp$BA98),]$BA98
pp$PRIORBV86 <- rep(NA, nrow(pp))
pp$PRIORBV87 <- rep(NA, nrow(pp))
pp$PRIORBV98 <- rep(NA, nrow(pp))
pp$PRIORBV10 <- rep(NA, nrow(pp))
pp[!is.na(pp$BV86),]$PRIORBV98 <- pp[!is.na(pp$BV86),]$BV86
pp[!is.na(pp$BV87),]$PRIORBV98 <- pp[!is.na(pp$BV87),]$BV87
pp[!is.na(pp$BV98),]$PRIORBV10 <- pp[!is.na(pp$BV98),]$BV98
pp$PRIORHT86 <- rep(NA, nrow(pp))
pp$PRIORHT87 <- rep(NA, nrow(pp))
pp$PRIORHT98 <- rep(NA, nrow(pp))
pp$PRIORHT10 <- rep(NA, nrow(pp))
pp[!is.na(pp$HT86),]$PRIORHT98 <- pp[!is.na(pp$HT86),]$HT86
pp[!is.na(pp$HT87),]$PRIORHT98 <- pp[!is.na(pp$HT87),]$HT87
pp[!is.na(pp$HT98),]$PRIORHT10 <- pp[!is.na(pp$HT98),]$HT98
## make columns that identify direction for dbh and ht from last sampling period
## period 1
## pp$p98dbh <- rep(NA, nrow(pp))
## pp[pp$PPLOT<16,]$p98dbh <- pp[pp$PPLOT<16,]$DBH98 > pp[pp$PPLOT<16,]$DBH86
## pp[pp$PPLOT>=16,]$p98dbh <- pp[pp$PPLOT>=16,]$DBH98 > pp[pp$PPLOT>=16,]$DBH87
## pp$p98ht <- rep(NA, nrow(pp))
## pp[pp$PPLOT<16,]$p98ht <- pp[pp$PPLOT<16,]$HT98 > pp[pp$PPLOT<16,]$HT86
## pp[pp$PPLOT>=16,]$p98ht <- pp[pp$PPLOT>=16,]$HT98 > pp[pp$PPLOT>=16,]$HT87
## # period 2
## pp$p10dbh <- pp$DBH10 > pp$DBH98
## pp$p10ht <- pp$HT10 > pp$HT98
## colnames to lower case and drop unwanted columns
names(pp) <-tolower(names(pp))
## separate cht8687 into cht86 and cht87:
## plots 1-12 have cht86 and 13-24 have cht87
pp$cht86 <- ifelse(pp$pplot < 16, pp$cht8687, NA)
pp$cht87 <- ifelse(pp$pplot > 15, pp$cht8687, NA)
yrs <- c(86, 87, 98, 10)
toKeep <- c("pplot","splot","tag","spec","yrmort","elev","elevcl","asp","aspcl",
"bqudx","bqudy","soilcl","slopcl","slope8687", paste0("cht",yrs),
paste0("stat",yrs), paste0("dbh",yrs), paste0("bv",yrs), paste0("ba",yrs),
paste0("ht",yrs), paste0("eht",yrs), paste0("decm",yrs), paste0("cpos",yrs),
paste0("dbhgrowth",yrs), paste0("htgrowth",yrs), paste0("priordbh",yrs),
paste0("priorba",yrs), paste0("bagrowth",yrs), paste0("priorht",yrs), paste0("priorbv",yrs),
paste0("bvgrowth",yrs))
pp <- pp[,names(pp) %in% toKeep]
write.csv(pp, "~/work/data/data/moose-wide.csv", row.names = FALSE)
|
####################################################
## Uganda prepare data for activity data analysis ##
####################################################
## This script reprojects the map data and combines
## the maps
## contact : yelena.finegold@fao.org
## created : 09 April 2019
## modified: 24 April 2019
####################################################
### load the parameters
source('~/uga_activity_data/scripts/get_parameters.R')
### load data
## reference data for changes
cefile <- paste0(ref_dir,'TOTAL_collectedData_earthuri_ce_changes1517_on_080319_151929_CSV.csv')
## 2015 land cover map
lc2015 <- paste0(lc15_dir,'sieved_LC_2015.tif')
## 2017 land cover map
lc2017 <- paste0(lc17_dir,'LC_2017_18012019.tif')
## forest management areas
mgmt <- paste0(mgmt_dir,'Protected_Areas_UTMWGS84_dslv.shp')
## Latlong projection used to reproject data
proj <- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
### assign names to data that will be created in this script
lc2015.proj <- paste0(lc15_dir,"sieved_LC_2015_proj.tif")
lc2017.proj <- paste0(lc17_dir,"LC_2017_18012019_proj.tif")
mgmt.proj <- paste0(mgmt_dir,'Protected_Areas_WGS84_dslv.shp')
mgmt.proj.tif <- paste0(mgmt_dir,"Protected_Areas_WGS84_dslv.tif")
mgmt.tif <- paste0(mgmt_dir,"Protected_Areas_UTM_dslv.tif")
lc2017.aligned <- paste0(lc17_dir,'LC_2017_18012019_aligned.tif')
change <- paste0(ad_dir,"change_2015_2017.tif")
change.sieved <- paste0(ad_dir,"change_2015_2017_sieve.tif")
###############################################################################
################### REPROJECT IN latlong PROJECTION
###############################################################################
# 2015 LC map
if(!file.exists(lc2015.proj)){
system(sprintf("gdalwarp -t_srs \"%s\" -overwrite -ot Byte -multi -co COMPRESS=LZW %s %s",
proj,
lc2015,
lc2015.proj
))
}
# 2017 LC map
if(!file.exists(lc2017.proj)){
system(sprintf("gdalwarp -t_srs \"%s\" -overwrite -ot Byte -multi -co COMPRESS=LZW %s %s",
proj,
lc2017,
lc2017.proj
))
}
##### reproject mgmt data into latlong
mgmt.data <- readOGR(mgmt)
#download province boundaries
adm <- getData ('GADM', country= countrycode, level=1)
#match the coordinate systems for the sample points and the boundaries
mgmt.data.proj <- spTransform(mgmt.data,crs(adm))
writeOGR(mgmt.data.proj,mgmt_dir,'Protected_Areas_WGS84_dslv',driver = 'ESRI Shapefile')
##### rasterize mgmt map
##### latlong forest management map
if(!file.exists(mgmt.proj.tif)){
system(sprintf("python %soft-rasterize_attr.py -v %s -i %s -o %s -a %s",
scriptdir,
mgmt.proj,
lc2015.proj,
mgmt.proj.tif,
"code"
))
}
##### utm forest management map
if(!file.exists(mgmt.tif)){
system(sprintf("python %s/oft-rasterize_attr.py -v %s -i %s -o %s -a %s",
scriptdir,
mgmt,
lc2015,
mgmt.tif,
"code"
))
}
## match the extent of the 2 LC maps -- using the extent of 2015
bb<- extent(raster(lc2015))
if(!file.exists(lc2017.aligned)){
system(sprintf("gdal_translate -ot Byte -projwin %s %s %s %s -co COMPRESS=LZW %s %s",
floor(bb@xmin),
ceiling(bb@ymax),
ceiling(bb@xmax),
floor(bb@ymin),
lc2017,
lc2017.aligned
))
}
###############################################################################
################### create change map
###############################################################################
#################### COMBINATION INTO NATIONAL AD SCALE MAP
# 1 = stable forest plantation to plantation
# 2 = stable forest THF to plantation
# 3 = stable forest THF to THF
# 4 = stable forest THF to WL
# 5 = stable forest WL to plantation
# 6 = stable forest WL to WL
# 7 = forest loss plantations
# 8 = forest loss THF
# 9 = forest loss woodlands
# 10 = forest gain plantations
# 11 = forest gain THF
# 12 = forest gain woodlands
# 13 = stable non-forest
if(!file.exists(change)){
system(sprintf("gdal_calc.py -A %s -B %s --co COMPRESS=LZW --type=Byte --outfile=%s --calc=\"%s\"",
lc2015,
lc2017.aligned,
change,
paste0("(A>0)*(A<3) * (B>0)*(B<3) * 1+", ### stable forest plantation to plantation
"(A>2)*(A<5) * (B>0)*(B<3) * 2+", ### stable forest THF to plantation
"(A>0)*(A<5) * (B>2)*(B<5) * 3+", ### stable forest THF to THF
"(A>2)*(A<5) * (B==5) * 4+", ### stable forest THF to woodlands
"(A==5) * (B==5) * 5+", ### stable forest woodlands to woodlands
"(A==5) * (B>0)*(B<3) * 6+", ### stable forest woodlands to plantation
"(A>0)*(A<3) * (B>5) * 7+", ### forest loss plantation
"(A>2)*(A<5) * (B>5) * 8+", ### forest loss THF
"(A==5) * (B>5) * 9+", ### forest loss woodlands
"(A>5) * (B>0)*(B<3) * 10+",### forest gain plantation
"(A>5) * (B>2)*(B<5) * 11+",### forest gain THF
"(A>5) * (B==5) * 12+",### forest gain woodlands
"(A>5) * (B>5) * 13" ### stable non-forest
)
))
}
################### SIEVE TO THE MMU
if(!file.exists(change.sieved)){
system(sprintf("gdal_sieve.py -st %s %s %s ",
mmu,
change,
paste0(ad_dir,"tmp_change_2015_2017_sieve.tif")
))
################### COMPRESS
system(sprintf("gdal_translate -ot Byte -co COMPRESS=LZW %s %s",
paste0(ad_dir,"tmp_change_2015_2017_sieve.tif"),
change.sieved
))
################### REMOVE UNCOMPRESSED FILE
system(sprintf("rm %s ",
paste0(ad_dir,"tmp_change_2015_2017_sieve.tif")
))
}
################### project to latlong
if(!file.exists(paste0(ad_dir,"change_2015_2017_sieve_wgs84.tif"))){
system(sprintf("gdalwarp -t_srs \"%s\" -overwrite -ot Byte -multi -co COMPRESS=LZW %s %s",
proj,
change.sieved,
paste0(ad_dir,"change_2015_2017_sieve_wgs84.tif")
))
}
###############################################################################
################### INCLUDE FOREST MANAGEMENT INFORMATION
###############################################################################
################### CREATE PRIVATE LANDS MASK
if(!file.exists(paste0(mgmt_dir,"private_lands_UTM.tif"))){
system(sprintf("gdal_calc.py -A %s --co COMPRESS=LZW --overwrite --outfile=%s --calc=\"%s\"",
mgmt.tif,
paste0(mgmt_dir,"private_lands_UTM.tif"),
paste0("(A==0)*1"
)
))
}
################### CREATE UWA LANDS MASK
if(!file.exists(paste0(mgmt_dir,"UWA_UTM.tif"))){
system(sprintf("gdal_calc.py -A %s --co COMPRESS=LZW --overwrite --outfile=%s --calc=\"%s\"",
mgmt.tif,
paste0(mgmt_dir,"UWA_UTM.tif"),
paste0("(A==10)*1"
)
))
}
################### CREATE NFA LANDS MASK
if(!file.exists(paste0(mgmt_dir,"NFA_UTM.tif"))){
system(sprintf("gdal_calc.py -A %s --co COMPRESS=LZW --overwrite --outfile=%s --calc=\"%s\"",
mgmt.tif,
paste0(mgmt_dir,"NFA_UTM.tif"),
paste0("(A==100)*1"
)
))
}
################### CHANGE MAP ON PRIVATE LANDS
if(!file.exists(paste0(ad_dir,"change_2015_2017_private_lands_UTM.tif"))){
system(sprintf("gdal_calc.py -A %s -B %s --co COMPRESS=LZW --overwrite --outfile=%s --calc=\"%s\"",
change.sieved,
paste0(mgmt_dir,"private_lands_UTM.tif"),
paste0(ad_dir,"change_2015_2017_private_lands_UTM.tif"),
paste0("(A*B)"
)
))
}
################### CHANGE MAP ON UWA LANDS
if(!file.exists(paste0(ad_dir,"change_2015_2017_UWA_UTM.tif"))){
system(sprintf("gdal_calc.py -A %s -B %s --co COMPRESS=LZW --overwrite --outfile=%s --calc=\"%s\"",
change.sieved,
paste0(mgmt_dir,"UWA_UTM.tif"),
paste0(ad_dir,"change_2015_2017_UWA_UTM.tif"),
paste0("(A*B)"
)
))
}
################### CHANGE MAP ON NFA LANDS
if(!file.exists(paste0(ad_dir,"change_2015_2017_NFA_UTM.tif"))){
system(sprintf("gdal_calc.py -A %s -B %s --co COMPRESS=LZW --overwrite --outfile=%s --calc=\"%s\"",
change.sieved,
paste0(mgmt_dir,"NFA_UTM.tif"),
paste0(ad_dir,"change_2015_2017_NFA_UTM.tif"),
paste0("(A*B)"
)
))
} | /scripts/land_cover_maps/ad1_prepare_maps.R | no_license | mutyabajoekk/uga_activity_data | R | false | false | 8,766 | r | ####################################################
## Uganda prepare data for activity data analysis ##
####################################################
## This script reprojects the map data and combines
## the maps
## contact : yelena.finegold@fao.org
## created : 09 April 2019
## modified: 24 April 2019
####################################################
### load the parameters
source('~/uga_activity_data/scripts/get_parameters.R')
### load data
## reference data for changes
cefile <- paste0(ref_dir,'TOTAL_collectedData_earthuri_ce_changes1517_on_080319_151929_CSV.csv')
## 2015 land cover map
lc2015 <- paste0(lc15_dir,'sieved_LC_2015.tif')
## 2017 land cover map
lc2017 <- paste0(lc17_dir,'LC_2017_18012019.tif')
## forest management areas
mgmt <- paste0(mgmt_dir,'Protected_Areas_UTMWGS84_dslv.shp')
## Latlong projection used to reproject data
proj <- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
### assign names to data that will be created in this script
lc2015.proj <- paste0(lc15_dir,"sieved_LC_2015_proj.tif")
lc2017.proj <- paste0(lc17_dir,"LC_2017_18012019_proj.tif")
mgmt.proj <- paste0(mgmt_dir,'Protected_Areas_WGS84_dslv.shp')
mgmt.proj.tif <- paste0(mgmt_dir,"Protected_Areas_WGS84_dslv.tif")
mgmt.tif <- paste0(mgmt_dir,"Protected_Areas_UTM_dslv.tif")
lc2017.aligned <- paste0(lc17_dir,'LC_2017_18012019_aligned.tif')
change <- paste0(ad_dir,"change_2015_2017.tif")
change.sieved <- paste0(ad_dir,"change_2015_2017_sieve.tif")
###############################################################################
################### REPROJECT IN latlong PROJECTION
###############################################################################
# 2015 LC map
if(!file.exists(lc2015.proj)){
system(sprintf("gdalwarp -t_srs \"%s\" -overwrite -ot Byte -multi -co COMPRESS=LZW %s %s",
proj,
lc2015,
lc2015.proj
))
}
# 2017 LC map
if(!file.exists(lc2017.proj)){
system(sprintf("gdalwarp -t_srs \"%s\" -overwrite -ot Byte -multi -co COMPRESS=LZW %s %s",
proj,
lc2017,
lc2017.proj
))
}
##### reproject mgmt data into latlong
mgmt.data <- readOGR(mgmt)
#download province boundaries
adm <- getData ('GADM', country= countrycode, level=1)
#match the coordinate systems for the sample points and the boundaries
mgmt.data.proj <- spTransform(mgmt.data,crs(adm))
writeOGR(mgmt.data.proj,mgmt_dir,'Protected_Areas_WGS84_dslv',driver = 'ESRI Shapefile')
##### rasterize mgmt map
##### latlong forest management map
if(!file.exists(mgmt.proj.tif)){
system(sprintf("python %soft-rasterize_attr.py -v %s -i %s -o %s -a %s",
scriptdir,
mgmt.proj,
lc2015.proj,
mgmt.proj.tif,
"code"
))
}
##### utm forest management map
if(!file.exists(mgmt.tif)){
system(sprintf("python %s/oft-rasterize_attr.py -v %s -i %s -o %s -a %s",
scriptdir,
mgmt,
lc2015,
mgmt.tif,
"code"
))
}
## match the extent of the 2 LC maps -- using the extent of 2015
bb<- extent(raster(lc2015))
if(!file.exists(lc2017.aligned)){
system(sprintf("gdal_translate -ot Byte -projwin %s %s %s %s -co COMPRESS=LZW %s %s",
floor(bb@xmin),
ceiling(bb@ymax),
ceiling(bb@xmax),
floor(bb@ymin),
lc2017,
lc2017.aligned
))
}
###############################################################################
################### create change map
###############################################################################
#################### COMBINATION INTO NATIONAL AD SCALE MAP
# 1 = stable forest plantation to plantation
# 2 = stable forest THF to plantation
# 3 = stable forest THF to THF
# 4 = stable forest THF to WL
# 5 = stable forest WL to plantation
# 6 = stable forest WL to WL
# 7 = forest loss plantations
# 8 = forest loss THF
# 9 = forest loss woodlands
# 10 = forest gain plantations
# 11 = forest gain THF
# 12 = forest gain woodlands
# 13 = stable non-forest
if(!file.exists(change)){
system(sprintf("gdal_calc.py -A %s -B %s --co COMPRESS=LZW --type=Byte --outfile=%s --calc=\"%s\"",
lc2015,
lc2017.aligned,
change,
paste0("(A>0)*(A<3) * (B>0)*(B<3) * 1+", ### stable forest plantation to plantation
"(A>2)*(A<5) * (B>0)*(B<3) * 2+", ### stable forest THF to plantation
"(A>0)*(A<5) * (B>2)*(B<5) * 3+", ### stable forest THF to THF
"(A>2)*(A<5) * (B==5) * 4+", ### stable forest THF to woodlands
"(A==5) * (B==5) * 5+", ### stable forest woodlands to woodlands
"(A==5) * (B>0)*(B<3) * 6+", ### stable forest woodlands to plantation
"(A>0)*(A<3) * (B>5) * 7+", ### forest loss plantation
"(A>2)*(A<5) * (B>5) * 8+", ### forest loss THF
"(A==5) * (B>5) * 9+", ### forest loss woodlands
"(A>5) * (B>0)*(B<3) * 10+",### forest gain plantation
"(A>5) * (B>2)*(B<5) * 11+",### forest gain THF
"(A>5) * (B==5) * 12+",### forest gain woodlands
"(A>5) * (B>5) * 13" ### stable non-forest
)
))
}
################### SIEVE TO THE MMU
if(!file.exists(change.sieved)){
system(sprintf("gdal_sieve.py -st %s %s %s ",
mmu,
change,
paste0(ad_dir,"tmp_change_2015_2017_sieve.tif")
))
################### COMPRESS
system(sprintf("gdal_translate -ot Byte -co COMPRESS=LZW %s %s",
paste0(ad_dir,"tmp_change_2015_2017_sieve.tif"),
change.sieved
))
################### REMOVE UNCOMPRESSED FILE
system(sprintf("rm %s ",
paste0(ad_dir,"tmp_change_2015_2017_sieve.tif")
))
}
################### project to latlong
if(!file.exists(paste0(ad_dir,"change_2015_2017_sieve_wgs84.tif"))){
system(sprintf("gdalwarp -t_srs \"%s\" -overwrite -ot Byte -multi -co COMPRESS=LZW %s %s",
proj,
change.sieved,
paste0(ad_dir,"change_2015_2017_sieve_wgs84.tif")
))
}
###############################################################################
################### INCLUDE FOREST MANAGEMENT INFORMATION
###############################################################################
################### CREATE PRIVATE LANDS MASK
if(!file.exists(paste0(mgmt_dir,"private_lands_UTM.tif"))){
system(sprintf("gdal_calc.py -A %s --co COMPRESS=LZW --overwrite --outfile=%s --calc=\"%s\"",
mgmt.tif,
paste0(mgmt_dir,"private_lands_UTM.tif"),
paste0("(A==0)*1"
)
))
}
################### CREATE UWA LANDS MASK
if(!file.exists(paste0(mgmt_dir,"UWA_UTM.tif"))){
system(sprintf("gdal_calc.py -A %s --co COMPRESS=LZW --overwrite --outfile=%s --calc=\"%s\"",
mgmt.tif,
paste0(mgmt_dir,"UWA_UTM.tif"),
paste0("(A==10)*1"
)
))
}
################### CREATE NFA LANDS MASK
if(!file.exists(paste0(mgmt_dir,"NFA_UTM.tif"))){
system(sprintf("gdal_calc.py -A %s --co COMPRESS=LZW --overwrite --outfile=%s --calc=\"%s\"",
mgmt.tif,
paste0(mgmt_dir,"NFA_UTM.tif"),
paste0("(A==100)*1"
)
))
}
################### CHANGE MAP ON PRIVATE LANDS
if(!file.exists(paste0(ad_dir,"change_2015_2017_private_lands_UTM.tif"))){
system(sprintf("gdal_calc.py -A %s -B %s --co COMPRESS=LZW --overwrite --outfile=%s --calc=\"%s\"",
change.sieved,
paste0(mgmt_dir,"private_lands_UTM.tif"),
paste0(ad_dir,"change_2015_2017_private_lands_UTM.tif"),
paste0("(A*B)"
)
))
}
################### CHANGE MAP ON UWA LANDS
if(!file.exists(paste0(ad_dir,"change_2015_2017_UWA_UTM.tif"))){
system(sprintf("gdal_calc.py -A %s -B %s --co COMPRESS=LZW --overwrite --outfile=%s --calc=\"%s\"",
change.sieved,
paste0(mgmt_dir,"UWA_UTM.tif"),
paste0(ad_dir,"change_2015_2017_UWA_UTM.tif"),
paste0("(A*B)"
)
))
}
################### CHANGE MAP ON NFA LANDS
if(!file.exists(paste0(ad_dir,"change_2015_2017_NFA_UTM.tif"))){
system(sprintf("gdal_calc.py -A %s -B %s --co COMPRESS=LZW --overwrite --outfile=%s --calc=\"%s\"",
change.sieved,
paste0(mgmt_dir,"NFA_UTM.tif"),
paste0(ad_dir,"change_2015_2017_NFA_UTM.tif"),
paste0("(A*B)"
)
))
} |
\name{event_data}
\Rdversion{1.2.5}
\alias{event_data}
\docType{data}
\title{
Neurodevelopmental event timing data consisting of 106 known and unknown events across 10 mammals (8 non-primates and 2 primates).
}
\description{
Neurodevelopmental event timing data consisting of 106 known and unknown events across 10 mammals (i.e. 8 non-primates and 2 primates). Unknown events are represented by zeros. The ordering of the columns is such that the event timings of the non-primate species precede those of primate species. The structure of the \emph{event_data} is shown below.
}
\usage{data(event_data)}
\format{
Variables in event_data follow the order below.
\describe{
\item{\code{Event}}{Name of the neurodevelopmental event}
\item{\code{Hamster}}{Hamster neurodevelopmental event timing}
\item{\code{Mouse}}{Mouse neurodevelopmental event timing}
\item{\code{Rat}}{Rat neurodevelopmental event timing}
\item{\code{Rabbit}}{Rabbit neurodevelopmental event timing}
\item{\code{Spiny.Mouse}}{Spiny-Mouse neurodevelopmental event timing}
\item{\code{Guinea.Pig}}{Guinea Pig neurodevelopmental event timing}
\item{\code{Ferret}}{Ferret neurodevelopmental event timing}
\item{\code{Cat}}{Cat neurodevelopmental event timing}
\item{\code{Macaque}}{Macaque neurodevelopmental event timing}
\item{\code{Human}}{Human neurodevelopmental event timing}
\item{\code{Cortical}}{Cortical neurodevelopmental event (1 yes, 0 no) }
\item{\code{Limbic}}{Limbic neurodevelopmental event (1 yes, 0 no) }
\item{\code{Reference}}{Bibiographic Reference for the corresponding event}
}
}
\examples{
\dontrun{
#Neurodevelopmental event timing data consisting of 106 known and unknown
#events across 10 species. The columns are arranged in the order described
#above
library(ttime);
data(event_data);
}
}
\references{
Nagarajan R, Darlington RB, Finlay BL, Clancy B. (2010). \emph{ttime: an R package for translating the timing of brain development across mammalian species.} Neuroinformatics, 8(3), 201-205.\cr
Finlay, B.L., & Darlington, R.B. (1995). \emph{Linked regularities in the development and evolution of mammalian brains.} Science, 268,1578-1584.\cr
Clancy, B., Darlington, R.B., & Finlay, B.L. (2000). \emph{The course of human events: predicting the timing of primate neural development.} Developmental Science, 3, 57-66.\cr
}
| /man/event_data.Rd | no_license | cran/ttime | R | false | false | 2,441 | rd | \name{event_data}
\Rdversion{1.2.5}
\alias{event_data}
\docType{data}
\title{
Neurodevelopmental event timing data consisting of 106 known and unknown events across 10 mammals (8 non-primates and 2 primates).
}
\description{
Neurodevelopmental event timing data consisting of 106 known and unknown events across 10 mammals (i.e. 8 non-primates and 2 primates). Unknown events are represented by zeros. The ordering of the columns is such that the event timings of the non-primate species precede those of primate species. The structure of the \emph{event_data} is shown below.
}
\usage{data(event_data)}
\format{
Variables in event_data follow the order below.
\describe{
\item{\code{Event}}{Name of the neurodevelopmental event}
\item{\code{Hamster}}{Hamster neurodevelopmental event timing}
\item{\code{Mouse}}{Mouse neurodevelopmental event timing}
\item{\code{Rat}}{Rat neurodevelopmental event timing}
\item{\code{Rabbit}}{Rabbit neurodevelopmental event timing}
\item{\code{Spiny.Mouse}}{Spiny-Mouse neurodevelopmental event timing}
\item{\code{Guinea.Pig}}{Guinea Pig neurodevelopmental event timing}
\item{\code{Ferret}}{Ferret neurodevelopmental event timing}
\item{\code{Cat}}{Cat neurodevelopmental event timing}
\item{\code{Macaque}}{Macaque neurodevelopmental event timing}
\item{\code{Human}}{Human neurodevelopmental event timing}
\item{\code{Cortical}}{Cortical neurodevelopmental event (1 yes, 0 no) }
\item{\code{Limbic}}{Limbic neurodevelopmental event (1 yes, 0 no) }
\item{\code{Reference}}{Bibiographic Reference for the corresponding event}
}
}
\examples{
\dontrun{
#Neurodevelopmental event timing data consisting of 106 known and unknown
#events across 10 species. The columns are arranged in the order described
#above
library(ttime);
data(event_data);
}
}
\references{
Nagarajan R, Darlington RB, Finlay BL, Clancy B. (2010). \emph{ttime: an R package for translating the timing of brain development across mammalian species.} Neuroinformatics, 8(3), 201-205.\cr
Finlay, B.L., & Darlington, R.B. (1995). \emph{Linked regularities in the development and evolution of mammalian brains.} Science, 268,1578-1584.\cr
Clancy, B., Darlington, R.B., & Finlay, B.L. (2000). \emph{The course of human events: predicting the timing of primate neural development.} Developmental Science, 3, 57-66.\cr
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Reserves_Util.R
\name{get_Aries_Enertia_Links_by_Enertia_Code}
\alias{get_Aries_Enertia_Links_by_Enertia_Code}
\title{Retrieve Aries-Enertia links as a data frame for a user defined vector of enertia codes.}
\usage{
get_Aries_Enertia_Links_by_Enertia_Code(enertia_codes, export = FALSE)
}
\arguments{
\item{enertia_codes}{a character vector.}
\item{export}{a boolean value}
}
\value{
A dataframe containing the Aries-Enertia links for a specified vector of enertia codes
}
\description{
The following functin will make a connection to the Enertia database located on the Enertia01B Server.
It will then take the user defined 'enertia_codes' vector which contains one or more enertia codes to query.
The function will then return a dataframe object with all the current aries_enertia links that exists at the time of the query.
the export variable can be set equal to TRUE to export the output to the users desktop.
}
\examples{
get_Aries_Enertia_Links_by_Enertia_Code("49.1018.0010.00", export = TRUE)
}
| /man/get_Aries_Enertia_Links_by_Enertia_Code.Rd | no_license | Hilcorp-Reserves/Reserves.Util | R | false | true | 1,084 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Reserves_Util.R
\name{get_Aries_Enertia_Links_by_Enertia_Code}
\alias{get_Aries_Enertia_Links_by_Enertia_Code}
\title{Retrieve Aries-Enertia links as a data frame for a user defined vector of enertia codes.}
\usage{
get_Aries_Enertia_Links_by_Enertia_Code(enertia_codes, export = FALSE)
}
\arguments{
\item{enertia_codes}{a character vector.}
\item{export}{a boolean value}
}
\value{
A dataframe containing the Aries-Enertia links for a specified vector of enertia codes
}
\description{
The following functin will make a connection to the Enertia database located on the Enertia01B Server.
It will then take the user defined 'enertia_codes' vector which contains one or more enertia codes to query.
The function will then return a dataframe object with all the current aries_enertia links that exists at the time of the query.
the export variable can be set equal to TRUE to export the output to the users desktop.
}
\examples{
get_Aries_Enertia_Links_by_Enertia_Code("49.1018.0010.00", export = TRUE)
}
|
library(plyr)
library(dplyr)
library(brms)
library(ggplot2)
library(tidybayes)
library(modelr)
library(ggdist)
library(bridgesampling)
library(loo)
# load('fits/bayesian_hierarchical_changepoint_loos.Rda')
# load('fits/bayesian_hierarchical_changepoint_models.Rda')
### load and visualize data ###
data <- read.csv('datasets/CP.csv')
ggplot(data, aes(x=Numerosity, y=RT, group=Subj)) +
geom_point()
bform <- brms::bf( # explicit brms dependencies (bf is also a function of bridgesampling)
RT ~ b0 + b1 * (Numerosity - 5) * step(5 - Numerosity) + b2 * (Numerosity - 5) * step(Numerosity - 5),
b0 + b1 + b2 ~ 1 + (1|Subj),
nl = TRUE
)
bprior <- set_prior("normal(0, 100)", lb = 0, nlpar = 'b0') +
prior(normal(0, 100), nlpar = "b1") +
prior(normal(0, 100), nlpar = "b2")
m <- brm(formula = bform, data = data, prior = bprior, iter = 4000, warmup = 500, chains = 3, cores = 3)
data$pred_m0 <- fitted(m)
ggplot(data, aes(x=Numerosity, y=pred_m0[,'Estimate'], group=Subj)) +
geom_line()
data %>%
add_predicted_draws(m, re_formula = NA) %>%
ggplot(aes(x = Numerosity, y = RT)) +
stat_lineribbon(aes(y = .prediction), .width = c(.99, .95, .90, .8, .5), color = "blue") +
geom_point(data = data, size = 2, alpha=0.5) +
scale_fill_brewer() +
theme_bw()
### Group difference analysis ###
ggplot(data, aes(x=Numerosity, y=RT, group=Subj, color=Group)) +
facet_grid(~Group, labeller = label_both) +
geom_point()
### Model with changepoint at Numerosity=3 ###
bform <- brms::bf(
RT ~ b0 + b1 * (Numerosity - 3) * step(3 - Numerosity) + b2 * (Numerosity - 3) * step(Numerosity - 3),
b0 + b1 + b2 ~ 1 + Group + (1|Subj),
nl = TRUE
)
bprior <- set_prior("normal(0, 100)", lb = 0, nlpar = 'b0') +
prior(normal(0, 100), nlpar = "b1") +
prior(normal(0, 100), nlpar = "b2")
m_cp3 <- brm(formula = bform, data = data, prior = bprior, iter = 4000, warmup = 500, chains = 3, cores = 3,
save_pars=save_pars(all = TRUE), sample_prior = TRUE)
### Model with changepoint at Numerosity=4 ###
bform <- brms::bf(
RT ~ b0 + b1 * (Numerosity - 4) * step(4 - Numerosity) + b2 * (Numerosity - 4) * step(Numerosity - 4),
b0 + b1 + b2 ~ 1 + Group + (1|Subj),
nl = TRUE
)
bprior <- set_prior("normal(0, 100)", lb = 0, nlpar = 'b0') +
prior(normal(0, 100), nlpar = "b1") +
prior(normal(0, 100), nlpar = "b2")
m_cp4 <- brm(formula = bform, data = data, prior = bprior, iter = 4000, warmup = 500, chains = 3, cores = 3,
save_pars=save_pars(all = TRUE), sample_prior = TRUE)
### Model with changepoint at Numerosity=6 ###
bform <- brms::bf(
RT ~ b0 + b1 * (Numerosity - 6) * step(6 - Numerosity) + b2 * (Numerosity - 6) * step(Numerosity - 6),
b0 + b1 + b2 ~ 1 + Group + (1|Subj),
nl = TRUE
)
bprior <- set_prior("normal(0, 100)", lb = 0, nlpar = 'b0') +
prior(normal(0, 100), nlpar = "b1") +
prior(normal(0, 100), nlpar = "b2")
m_cp6 <- brm(formula = bform, data = data, prior = bprior, iter = 4000, warmup = 500, chains = 3, cores = 3,
save_pars=save_pars(all = TRUE), sample_prior = TRUE)
# leave-one-out
loo_cp3 <- loo(m_cp3) # reloo = TRUE
loo_cp4 <- loo(m_cp4)
loo_cp6 <- loo(m_cp6)
# Inspect comparison
loo_compare(loo_cp3, loo_cp4, loo_cp6)
#####################################
### analyze m_cp4, the best model ###
#####################################
### posterior predictive check ###
pp_check(m_cp4)
pp_check(m_cp4, type='stat', stat='mean')
post_pred <- add_predicted_draws(data, m_cp4) %>%
ddply(.(Group, Subj, Numerosity), summarize, post_mean_RT=mean(.prediction))
ggplot(post_pred, aes(x=Numerosity, y=post_mean_RT, group=Subj, color=Group)) +
facet_grid(~ Group, labeller = label_both) +
geom_point(alpha=0.8) +
geom_point(data=data, mapping=aes(x=Numerosity, y=RT, group=Subj), color='black', alpha=0.3)
### contrast analysis ###
# set the hypotheses #
contr <- c('pre-slope G vs pre-slope A' = 'b1_GroupG = 0',
'post-slope G vs post-slope A' = 'b2_GroupG = 0',
'pre-slope A vs post-slope A' = 'b2_Intercept - b1_Intercept = 0',
'pre-slope G vs post-slope G' = '(b2_Intercept + b2_GroupG) - (b1_Intercept + b1_GroupG) = 0')
h <- hypothesis(m_cp4, contr)
plot(h)[[1]] + geom_vline(xintercept = 0, col='red', linetype='dashed')
#####################################
### analyze m_cp4, the best model ###
#####################################
bform <- brms::bf(
RT ~ b0 + b1 * (Numerosity - 4) * step(4 - Numerosity) + b2 * (Numerosity - 4) * step(Numerosity - 4),
b0 + b1 + b2 ~ 1 + Group + (1|gr(Subj, by=Group)),
nl = TRUE
)
bprior <- set_prior("normal(0, 100)", lb = 0, nlpar = 'b0') +
prior(normal(0, 100), nlpar = "b1") +
prior(normal(0, 100), nlpar = "b2")
m_cp4_cond_cov <- brm(formula = bform, data = data, prior = bprior, iter = 4000, warmup = 500, chains = 3, cores = 3,
save_pars=save_pars(all = TRUE), sample_prior = TRUE)
### model selection (parsimony) ###
bridge_0 <- bridge_sampler(m_cp4)
bridge_1 <- bridge_sampler(m_cp4_cond_cov)
bf(bridge_0, bridge_1)
| /bayesian_hiearchical_changepoint.R | no_license | kincsesbalint/BayesWorkshop | R | false | false | 5,252 | r | library(plyr)
library(dplyr)
library(brms)
library(ggplot2)
library(tidybayes)
library(modelr)
library(ggdist)
library(bridgesampling)
library(loo)
# load('fits/bayesian_hierarchical_changepoint_loos.Rda')
# load('fits/bayesian_hierarchical_changepoint_models.Rda')
### load and visualize data ###
data <- read.csv('datasets/CP.csv')
ggplot(data, aes(x=Numerosity, y=RT, group=Subj)) +
geom_point()
bform <- brms::bf( # explicit brms dependencies (bf is also a function of bridgesampling)
RT ~ b0 + b1 * (Numerosity - 5) * step(5 - Numerosity) + b2 * (Numerosity - 5) * step(Numerosity - 5),
b0 + b1 + b2 ~ 1 + (1|Subj),
nl = TRUE
)
bprior <- set_prior("normal(0, 100)", lb = 0, nlpar = 'b0') +
prior(normal(0, 100), nlpar = "b1") +
prior(normal(0, 100), nlpar = "b2")
m <- brm(formula = bform, data = data, prior = bprior, iter = 4000, warmup = 500, chains = 3, cores = 3)
data$pred_m0 <- fitted(m)
ggplot(data, aes(x=Numerosity, y=pred_m0[,'Estimate'], group=Subj)) +
geom_line()
data %>%
add_predicted_draws(m, re_formula = NA) %>%
ggplot(aes(x = Numerosity, y = RT)) +
stat_lineribbon(aes(y = .prediction), .width = c(.99, .95, .90, .8, .5), color = "blue") +
geom_point(data = data, size = 2, alpha=0.5) +
scale_fill_brewer() +
theme_bw()
### Group difference analysis ###
ggplot(data, aes(x=Numerosity, y=RT, group=Subj, color=Group)) +
facet_grid(~Group, labeller = label_both) +
geom_point()
### Model with changepoint at Numerosity=3 ###
bform <- brms::bf(
RT ~ b0 + b1 * (Numerosity - 3) * step(3 - Numerosity) + b2 * (Numerosity - 3) * step(Numerosity - 3),
b0 + b1 + b2 ~ 1 + Group + (1|Subj),
nl = TRUE
)
bprior <- set_prior("normal(0, 100)", lb = 0, nlpar = 'b0') +
prior(normal(0, 100), nlpar = "b1") +
prior(normal(0, 100), nlpar = "b2")
m_cp3 <- brm(formula = bform, data = data, prior = bprior, iter = 4000, warmup = 500, chains = 3, cores = 3,
save_pars=save_pars(all = TRUE), sample_prior = TRUE)
### Model with changepoint at Numerosity=4 ###
bform <- brms::bf(
RT ~ b0 + b1 * (Numerosity - 4) * step(4 - Numerosity) + b2 * (Numerosity - 4) * step(Numerosity - 4),
b0 + b1 + b2 ~ 1 + Group + (1|Subj),
nl = TRUE
)
bprior <- set_prior("normal(0, 100)", lb = 0, nlpar = 'b0') +
prior(normal(0, 100), nlpar = "b1") +
prior(normal(0, 100), nlpar = "b2")
m_cp4 <- brm(formula = bform, data = data, prior = bprior, iter = 4000, warmup = 500, chains = 3, cores = 3,
save_pars=save_pars(all = TRUE), sample_prior = TRUE)
### Model with changepoint at Numerosity=6 ###
bform <- brms::bf(
RT ~ b0 + b1 * (Numerosity - 6) * step(6 - Numerosity) + b2 * (Numerosity - 6) * step(Numerosity - 6),
b0 + b1 + b2 ~ 1 + Group + (1|Subj),
nl = TRUE
)
bprior <- set_prior("normal(0, 100)", lb = 0, nlpar = 'b0') +
prior(normal(0, 100), nlpar = "b1") +
prior(normal(0, 100), nlpar = "b2")
m_cp6 <- brm(formula = bform, data = data, prior = bprior, iter = 4000, warmup = 500, chains = 3, cores = 3,
save_pars=save_pars(all = TRUE), sample_prior = TRUE)
# leave-one-out
loo_cp3 <- loo(m_cp3) # reloo = TRUE
loo_cp4 <- loo(m_cp4)
loo_cp6 <- loo(m_cp6)
# Inspect comparison
loo_compare(loo_cp3, loo_cp4, loo_cp6)
#####################################
### analyze m_cp4, the best model ###
#####################################
### posterior predictive check ###
pp_check(m_cp4)
pp_check(m_cp4, type='stat', stat='mean')
post_pred <- add_predicted_draws(data, m_cp4) %>%
ddply(.(Group, Subj, Numerosity), summarize, post_mean_RT=mean(.prediction))
ggplot(post_pred, aes(x=Numerosity, y=post_mean_RT, group=Subj, color=Group)) +
facet_grid(~ Group, labeller = label_both) +
geom_point(alpha=0.8) +
geom_point(data=data, mapping=aes(x=Numerosity, y=RT, group=Subj), color='black', alpha=0.3)
### contrast analysis ###
# set the hypotheses #
contr <- c('pre-slope G vs pre-slope A' = 'b1_GroupG = 0',
'post-slope G vs post-slope A' = 'b2_GroupG = 0',
'pre-slope A vs post-slope A' = 'b2_Intercept - b1_Intercept = 0',
'pre-slope G vs post-slope G' = '(b2_Intercept + b2_GroupG) - (b1_Intercept + b1_GroupG) = 0')
h <- hypothesis(m_cp4, contr)
plot(h)[[1]] + geom_vline(xintercept = 0, col='red', linetype='dashed')
#####################################
### analyze m_cp4, the best model ###
#####################################
bform <- brms::bf(
RT ~ b0 + b1 * (Numerosity - 4) * step(4 - Numerosity) + b2 * (Numerosity - 4) * step(Numerosity - 4),
b0 + b1 + b2 ~ 1 + Group + (1|gr(Subj, by=Group)),
nl = TRUE
)
bprior <- set_prior("normal(0, 100)", lb = 0, nlpar = 'b0') +
prior(normal(0, 100), nlpar = "b1") +
prior(normal(0, 100), nlpar = "b2")
m_cp4_cond_cov <- brm(formula = bform, data = data, prior = bprior, iter = 4000, warmup = 500, chains = 3, cores = 3,
save_pars=save_pars(all = TRUE), sample_prior = TRUE)
### model selection (parsimony) ###
bridge_0 <- bridge_sampler(m_cp4)
bridge_1 <- bridge_sampler(m_cp4_cond_cov)
bf(bridge_0, bridge_1)
|
p <- ggplot(mtcars, aes(x = wt, y = mpg)) + geom_point()
p <-
p + annotate("pointrange", x = 3.5, y = 20, ymin = 12, ymax = 28,
colour = "red", size = 1.5)
| /ggplot2/Layers/Annotations/annotate/example5.R | no_license | plotly/ssim_baselines | R | false | false | 163 | r | p <- ggplot(mtcars, aes(x = wt, y = mpg)) + geom_point()
p <-
p + annotate("pointrange", x = 3.5, y = 20, ymin = 12, ymax = 28,
colour = "red", size = 1.5)
|
c("et",
"il",
"le",
"estre1",
"de",
"que4",
"si",
"a3",
"qui",
"avoir",
"en1",
#"saint",
"je",
"son4",
#"dire",
"ne1",
"ce1",
"tot",
#"faire",
"tu",
"un",
"por2",
"cel",
#"seignor",
"de+le",
"cant1",
"par",
"que2",
"nostre",
#"grant",
"soi1",
"come1",
"mout",
"a3+le",
#"vëoir",
#"venir",
#"ome",
"en2",
#"pöoir",
"nos1",
"vos",
"ne2",
"car",
#"savoir",
#"aler",
"lor2",
"cest",
"se",
"mais1",
"mon1",
"mie",
"i2",
#"chose",
#"voloir",
"où",
"bien1",
"devant",
"nul",
"là",
"tant",
"autre",
"dont",
#"metre2",
#"comencier",
#"jor",
#"prendre",
#"doner",
"ton4",
"lors",
"en1+le",
#"cors1",
#"comander",
#"dieu",
#"respondre1",
#"öir",
#"devoir",
"plus",
#"apostle",
#"morir",
"aussi",
#"dïable",
#"nom",
#"bel1",
#"benëir",
#"priier",
#"vie1",
#"cité",
"on",
"ainsi",
#"naistre",
"ce2",
#"deus",
"tel",
"quel1",
#"croire",
#"laissier",
#"terre",
"sor2",
"après",
#"lieu",
#"haut",
#"parole",
#"pere",
#"maniere",
#"gent1",
#"bon",
#"roi2",
#"parler",
#"fil2",
"puis",
"que4+il",
#"apeler",
#"ensemble",
#"rendre",
"ja",
#"feme",
"mëisme",
#"ciel",
#"frere",
#"trover",
"o3",
#"tenir1",
"sans",
#"main2",
#"pueple",
"pas",
#"sofrir",
#"mener",
#"recevoir",
"o4",
"or4",
#"tens",
#"conter",
#"mal1",
"avuec",
#"evesque",
#"miracle",
#"segm",
"encore",
"ainz",
"onque",
#"entrer",
#"mort",
#"cuidier1",
#"torment",
#"oir",
#"maison",
#"entendre",
"plusor",
"donc",
"fors1",
#"avenir",
#"dame",
#"enfant",
#"virge",
#"piere",
#"siecle",
#"pié",
#"lever",
#"plorer",
"vostre",
#"angele",
"coment1",
#"crestiien",
#"grace",
#"demander",
#"garder",
#"porter",
#"chëoir",
#"plein",
"cui",
#"voie",
#"verai",
#"emperëor",
"non",
"poi",
#"envoiier",
#"issir",
#"voiz",
#"uevre",
#"chevalier",
#"aorer2",
#"joie",
#"nuit",
#"foi",
#"laver",
"tantost",
#"conoistre",
#"peine",
"très",
#"pëor",
#"volenté",
#"contree",
#"cuer2",
#"loi3",
#"assembler",
#"crïer2",
#"vertu",
#"jesir",
"ci",
#"trois",
#"enseignier",
#"löer1",
#"orison",
"si+il",
#"amener",
"dedenz",
#"mere",
#"departir",
#"crëance",
#"croiz",
#"mangier",
#"prestre",
"adonc",
#"feu",
"que3",
#"serjant",
#"sol1",
#"vers2",
#"mer",
"maint",
#"comandement",
#"sembler",
#"ocire",
#"vivre1",
#"servir",
#"maistre",
#"revenir",
#"lonc",
"avant",
#"aporter",
#"covenir",
#"nature",
"encontre1",
#"monde1",
#"priiere",
#"mauvais",
#"petit",
"tresque1",
#"cri",
#"jhesu",
#"chanter",
#"droit",
#"moine1",
#"regarder",
#"corir",
#"querre",
#"povre",
#"ame",
#"deguerpir"
"aucun",
#"compagnon",
#"trespasser",
#"demorer",
#"gloire",
#"compagnie",
"chascun",
#"pechié",
#"dolor",
#"vent",
#"ardre",
#"reconter",
#"force2",
#"prëechier",
"près",
#"pucele",
#"bien2",
#"penser",
#"repairier",
"tost1",
#"douz",
#"navoir",
#"perdre",
#"resusciter",
#"eglise",
#"ueil",
#"destruire",
#"remanoir",
#"rien",
"enz",
#"ore3",
#"chief1",
#"jeter",
#"amer1",
#"traire",
#"ariere",
#"esperit",
"coi2",
#"movoir",
#"sacrefiier",
#"desciple",
#"taire",
#"batisier",
#"beste",
##"porte1",
#"temple1",
"maintenant"#,
#"martire2",
#"torner",
#"mostrer"
)
| /functionLemmas.R | no_license | Jean-Baptiste-Camps/Wauchier_stylo | R | false | false | 3,004 | r | c("et",
"il",
"le",
"estre1",
"de",
"que4",
"si",
"a3",
"qui",
"avoir",
"en1",
#"saint",
"je",
"son4",
#"dire",
"ne1",
"ce1",
"tot",
#"faire",
"tu",
"un",
"por2",
"cel",
#"seignor",
"de+le",
"cant1",
"par",
"que2",
"nostre",
#"grant",
"soi1",
"come1",
"mout",
"a3+le",
#"vëoir",
#"venir",
#"ome",
"en2",
#"pöoir",
"nos1",
"vos",
"ne2",
"car",
#"savoir",
#"aler",
"lor2",
"cest",
"se",
"mais1",
"mon1",
"mie",
"i2",
#"chose",
#"voloir",
"où",
"bien1",
"devant",
"nul",
"là",
"tant",
"autre",
"dont",
#"metre2",
#"comencier",
#"jor",
#"prendre",
#"doner",
"ton4",
"lors",
"en1+le",
#"cors1",
#"comander",
#"dieu",
#"respondre1",
#"öir",
#"devoir",
"plus",
#"apostle",
#"morir",
"aussi",
#"dïable",
#"nom",
#"bel1",
#"benëir",
#"priier",
#"vie1",
#"cité",
"on",
"ainsi",
#"naistre",
"ce2",
#"deus",
"tel",
"quel1",
#"croire",
#"laissier",
#"terre",
"sor2",
"après",
#"lieu",
#"haut",
#"parole",
#"pere",
#"maniere",
#"gent1",
#"bon",
#"roi2",
#"parler",
#"fil2",
"puis",
"que4+il",
#"apeler",
#"ensemble",
#"rendre",
"ja",
#"feme",
"mëisme",
#"ciel",
#"frere",
#"trover",
"o3",
#"tenir1",
"sans",
#"main2",
#"pueple",
"pas",
#"sofrir",
#"mener",
#"recevoir",
"o4",
"or4",
#"tens",
#"conter",
#"mal1",
"avuec",
#"evesque",
#"miracle",
#"segm",
"encore",
"ainz",
"onque",
#"entrer",
#"mort",
#"cuidier1",
#"torment",
#"oir",
#"maison",
#"entendre",
"plusor",
"donc",
"fors1",
#"avenir",
#"dame",
#"enfant",
#"virge",
#"piere",
#"siecle",
#"pié",
#"lever",
#"plorer",
"vostre",
#"angele",
"coment1",
#"crestiien",
#"grace",
#"demander",
#"garder",
#"porter",
#"chëoir",
#"plein",
"cui",
#"voie",
#"verai",
#"emperëor",
"non",
"poi",
#"envoiier",
#"issir",
#"voiz",
#"uevre",
#"chevalier",
#"aorer2",
#"joie",
#"nuit",
#"foi",
#"laver",
"tantost",
#"conoistre",
#"peine",
"très",
#"pëor",
#"volenté",
#"contree",
#"cuer2",
#"loi3",
#"assembler",
#"crïer2",
#"vertu",
#"jesir",
"ci",
#"trois",
#"enseignier",
#"löer1",
#"orison",
"si+il",
#"amener",
"dedenz",
#"mere",
#"departir",
#"crëance",
#"croiz",
#"mangier",
#"prestre",
"adonc",
#"feu",
"que3",
#"serjant",
#"sol1",
#"vers2",
#"mer",
"maint",
#"comandement",
#"sembler",
#"ocire",
#"vivre1",
#"servir",
#"maistre",
#"revenir",
#"lonc",
"avant",
#"aporter",
#"covenir",
#"nature",
"encontre1",
#"monde1",
#"priiere",
#"mauvais",
#"petit",
"tresque1",
#"cri",
#"jhesu",
#"chanter",
#"droit",
#"moine1",
#"regarder",
#"corir",
#"querre",
#"povre",
#"ame",
#"deguerpir"
"aucun",
#"compagnon",
#"trespasser",
#"demorer",
#"gloire",
#"compagnie",
"chascun",
#"pechié",
#"dolor",
#"vent",
#"ardre",
#"reconter",
#"force2",
#"prëechier",
"près",
#"pucele",
#"bien2",
#"penser",
#"repairier",
"tost1",
#"douz",
#"navoir",
#"perdre",
#"resusciter",
#"eglise",
#"ueil",
#"destruire",
#"remanoir",
#"rien",
"enz",
#"ore3",
#"chief1",
#"jeter",
#"amer1",
#"traire",
#"ariere",
#"esperit",
"coi2",
#"movoir",
#"sacrefiier",
#"desciple",
#"taire",
#"batisier",
#"beste",
##"porte1",
#"temple1",
"maintenant"#,
#"martire2",
#"torner",
#"mostrer"
)
|
\name{ji.ds.complete.cases}
\alias{ji.ds.complete.cases}
\title{Returns a logical vector indicating which cases are complete, i.e., have no missing values.}
\usage{
ji.ds.complete.cases(datasources = NULL, x = NULL,
newobj = NULL)
}
\arguments{
\item{datasources}{a list of opal object(s) obtained
after login in to opal servers; these objects hold also
the data assign to R, as \code{dataframe}, from opal
datasources.}
\item{xlist}{a list of objects to be checked for
completeness.}
\item{newobj}{the name of the new vector.If this argument
is set to \code{NULL}, the name of the new variable is
the name of the input variable with the suffixe
'_complete' (e.g. 'D_complete', if input variable's name
is 'D')}
}
\value{
a message is displayed when the action is completed.
}
\description{
This function is similar to R function
\code{complete.cases}.
}
\examples{
{
# load that contains the login details
data(logindata)
# login and assign specific variable(s)
opals <- datashield.login(logins=logindata,assign=TRUE)
# Create a vector with indices for complete observations (rows)
ji.ds.complete.cases(datasources=opals, x=quote(D))
# Create a vector with indices for complete observations for LAB_TSC and PM_BMI_CONTINUOUS variables
input = list(quote(D$LAB_TSC),quote(D$PM_BMI_CONTINUOUS))
ji.ds.complete.cases(datasources=opals, x=input, newobj='TSC_BMI_complete')
}
}
\author{
Gaye, A. (amadou.gaye
}
| /man/ji.ds.complete.cases.Rd | no_license | datashield/ji.dev.cl | R | false | false | 1,450 | rd | \name{ji.ds.complete.cases}
\alias{ji.ds.complete.cases}
\title{Returns a logical vector indicating which cases are complete, i.e., have no missing values.}
\usage{
ji.ds.complete.cases(datasources = NULL, x = NULL,
newobj = NULL)
}
\arguments{
\item{datasources}{a list of opal object(s) obtained
after login in to opal servers; these objects hold also
the data assign to R, as \code{dataframe}, from opal
datasources.}
\item{xlist}{a list of objects to be checked for
completeness.}
\item{newobj}{the name of the new vector.If this argument
is set to \code{NULL}, the name of the new variable is
the name of the input variable with the suffixe
'_complete' (e.g. 'D_complete', if input variable's name
is 'D')}
}
\value{
a message is displayed when the action is completed.
}
\description{
This function is similar to R function
\code{complete.cases}.
}
\examples{
{
# load that contains the login details
data(logindata)
# login and assign specific variable(s)
opals <- datashield.login(logins=logindata,assign=TRUE)
# Create a vector with indices for complete observations (rows)
ji.ds.complete.cases(datasources=opals, x=quote(D))
# Create a vector with indices for complete observations for LAB_TSC and PM_BMI_CONTINUOUS variables
input = list(quote(D$LAB_TSC),quote(D$PM_BMI_CONTINUOUS))
ji.ds.complete.cases(datasources=opals, x=input, newobj='TSC_BMI_complete')
}
}
\author{
Gaye, A. (amadou.gaye
}
|
# load models for testing:
load_test_models <- function() {
seed <- 1
data <- readr::read_csv(paste0(system.file("extdata", package = "bgamcar1"), "/data.csv"))
data_ar <- readr::read_csv(paste0(system.file("extdata", package = "bgamcar1"), "/data_ar.csv"))
data_car1 <- readr::read_csv(paste0(system.file("extdata", package = "bgamcar1"), "/data_car1.csv"))
fit <- fit_stan_model(
paste0(system.file("extdata", package = "bgamcar1"), "/test"),
seed,
brms::bf(y | cens(ycens, y2 = y2) ~ 1),
data,
brms::prior(normal(0, 1), class = Intercept),
car1 = FALSE,
save_warmup = FALSE,
chains = 3
)
form_ar <- brms::bf(y ~ ar(time = date, gr = series), sigma ~ series)
prior_ar <- brms::prior(normal(0, 1), class = Intercept)
fit_ar <- fit_stan_model(
paste0(system.file("extdata", package = "bgamcar1"), "/test_ar"),
seed,
form_ar,
data_ar,
prior_ar,
save_warmup = FALSE,
chains = 2
)
form_car1 <- brms::bf(y ~ ar(time = x))
phi_car1 <- .45
fit_car1 <- fit_stan_model(
paste0(system.file("extdata", package = "bgamcar1"), "/test_car1"),
seed,
form_car1,
data_car1,
prior_ar,
save_warmup = FALSE,
chains = 2
)
list(
# data:
data = data,
data_ar = data_ar,
data_car1 = data_car1,
# models:
fit = fit,
fit_ar = fit_ar,
fit_car1 = fit_car1,
# other inputs:
form_ar = form_ar,
form_car1 = form_car1,
prior_ar = prior_ar,
phi_car1 = phi_car1
)
}
load_test_gams <- function() {
seed <- 1
# for local_slope.R
data_gam <- read.csv(paste0(system.file("extdata", package = "bgamcar1"), "/data_gam.csv"))
data_gam2 <- read.csv(paste0(system.file("extdata", package = "bgamcar1"), "/data_gam2.csv"))
fit_gam <- fit_stan_model(
paste0(system.file("extdata", package = "bgamcar1"), "/test_gam"),
seed,
brms::bf(y ~ s(x0) + s(x1) + s(x2) + s(x3)),
data_gam,
car1 = FALSE,
chains = 2
)
fit_gam2 <- fit_stan_model(
paste0(system.file("extdata", package = "bgamcar1"), "/test_gam2"),
seed,
brms::bf(y ~ s(x0, by = g) + s(x1, by = g) + s(x2, by = g) + s(x3, by = g)),
data_gam2,
save_warmup = FALSE,
car1 = FALSE,
chains = 2
)
fit_gam3 <- fit_stan_model(
paste0(system.file("extdata", package = "bgamcar1"), "/test_gam2"),
seed,
brms::bf(y ~ s(x0, by = g) + s(x1, by = g) + s(x2, by = g) + s(x3, by = g) + ar(time = x1, gr = g)),
data_gam2,
save_warmup = FALSE,
car1 = FALSE,
chains = 2
)
list(
# data:
data_gam = data_gam,
data_gam2 = data_gam2,
# models:
fit_gam = fit_gam,
fit_gam2 = fit_gam2,
fit_gam3 = fit_gam3
)
}
# test-add_car1_err.R
help_add_car1_err <- function() {
phi <- .7
car1_input <- tidyr::crossing(.index = 1:2, location = letters[1:2], rep = 1:200) |>
dplyr::mutate(
`ar[1]` = phi,
nu = 1e3,
sigma = 1,
.epred = 0
)
car1_input_test <- car1_input
data_car1 <- withr::with_seed(32567, {
car1_input |>
dplyr::mutate(d_x = 1) |>
add_car1_err(gr_vars = c(".index", "location")) |>
dplyr::filter(.index == 1, location == "a")
})
fit <- arima(data_car1$.prediction, order = c(1, 0, 0))
sub <- withr::with_seed(219, {
car1_input |>
dplyr::group_by(.index, location) |>
dplyr::slice_sample(prop = .6) |>
dplyr::ungroup() |>
dplyr::arrange(.index, location, rep) |>
dplyr::mutate(d_x = replace_na(rep - lag(rep), 0))
})
data2_car1 <- withr::with_seed(32567, {
sub |>
add_car1_err(gr_vars = c(".index", "location")) |>
filter(.index == 1, location == "a")
})
fit2 <- arima(data2_car1$.prediction, order = c(1, 0, 0))
list(
phi = phi,
fit = fit,
fit2 = fit2,
car1_input = car1_input,
car1_input_test = car1_input_test
)
}
# test-add_car1.R
help_add_car1 <- function(inputs) {
add_car1_input <- inputs$data_ar |>
dplyr::mutate(.index = 1, `ar[1]` = .7, .epred = 0)
car1 <- add_car1(add_car1_input, "y", gr_vars = c(".index", "series")) |>
dplyr::mutate(r = y - .epred)
autocorr <- extract_acf(car1$r)
autocorr2 <- arima(add_car1_input$y, order = c(1, 0, 0)) |>
residuals() |>
extract_acf()
add_car1_sub <- withr::with_seed(3526, {
dplyr::slice_sample(add_car1_input, prop = .7) |>
dplyr::arrange(series, date) |>
dplyr::group_by(series) |>
dplyr::mutate(
d_x = date - dplyr::lag(date),
d_x = tidyr::replace_na(d_x, 0),
d_x = as.numeric(d_x)
) |>
dplyr::ungroup()
})
car1 <- add_car1(add_car1_sub, "y", gr_vars = c(".index", "series")) |>
dplyr::mutate(r = y - .epred)
car1_cor <- car1 |>
dplyr::group_by(series) |>
dplyr::mutate(
r_lag = dplyr::lag(r),
y_lag = dplyr::lag(y)
) |>
dplyr::ungroup() |>
dplyr::filter(d_x == 1) |>
dplyr::summarize(
cor_r = cor(r_lag, r),
cor_y = cor(y_lag, y)
)
list(
autocorr = autocorr,
autocorr2 = autocorr2,
car1_cor = car1_cor
)
}
extract_acf <- function(x) {
x |>
acf(plot = FALSE) |>
with(acf) |>
as.numeric()
}
# test-add_pred_draws_car1.R
calc_sigma <- function(x) {
varnames <- extract_resp(x)
distributional <- as.character(x$formula)[2] |>
str_detect("sigma ~ ")
if (distributional) {
groups <- unique(x$data[, varnames$gr_sigma])
}
draws <- x |>
brms::as_draws_df("sigma", regex = TRUE) |>
tibble::as_tibble()
if (distributional) {
draws <- draws |>
tidyr::pivot_longer(c(
tidyselect::starts_with("b_sigma_"),
-tidyselect::matches("^b_sigma_Intercept$")
)) |>
dplyr::mutate(value = b_sigma_Intercept + value) |>
tidyr::pivot_wider(names_from = name, values_from = value) |>
tidyr::pivot_longer(starts_with("b_sigma_"), values_to = "sigma") |>
dplyr::select(c(name, sigma, tidyselect::starts_with("."))) |>
dplyr::mutate(
sigma = exp(sigma),
group = stringr::str_remove(name, varnames$gr_sigma) |>
stringr::str_extract("(?<=b_sigma_).+$") |>
stringr::str_replace("Intercept", groups[1])
)
}
draws
}
compare_preds <- function(x, ...) {
x |>
dplyr::select(date, series, y, d_x, .row, .epred, ...) |>
dplyr::arrange(.draw) |>
dplyr::ungroup()
}
# test-calc_acf.R
help_calc_acf <- function() {
data_acf1 <- tibble::tibble(
.draw = 1,
.residual = c(1:9, -1e2),
location = "a"
)
data_acf_test1 <- data_acf1
c1 <- calc_acf(data_acf1, gr_vars = c(".draw", "location"))
c2 <- calc_acf(data_acf1, .residual > 0, gr_vars = c(".draw", "location"))
data_acf2 <- tidyr::crossing(
location = letters[1:2],
.residual = as.numeric(1:10)
) |>
dplyr::mutate(
.residual = dplyr::if_else(location == "b", .residual - 100, .residual),
.draw = 1
)
data_acf_test2 <- data_acf2
c3 <- calc_acf(data_acf2, gr_vars = c(".draw", "location"))
data_acf3 <- tidyr::crossing(
series = letters[1:3],
.draw = 1:3,
.residual = as.numeric(1:10)
) |>
dplyr::mutate(
censoring = withr::with_seed(1245, {
sample(c(-1, 1), length(series), replace = TRUE)
}
),
.residual = dplyr::if_else(censoring == -1, -10, .residual)
)
acf3 <- calc_acf(
data_acf3,
censoring == 1 & cens_lagged == 1,
cen_var = "censoring",
gr_vars = c(".draw", "series")
)
list(
data_acf1 = data_acf1,
data_acf_test1 = data_acf_test1,
c1 = c1,
c2 = c2,
c3 = c3,
data_acf2 = data_acf2,
data_acf_test2 = data_acf_test2,
data_acf3 = data_acf3,
acf3 = acf3
)
}
# test-calc_ll.R
help_calc_ll_preds <- function(inputs) {
# fit:
ll_brm1 <- brms::log_lik(inputs$fit)
ll_myfn_in1 <- add_pred_draws_car1(inputs$data, inputs$fit, draw_ids = 1:3000, car1 = FALSE) |>
dplyr::ungroup()
ll_myfn1 <- ll_myfn_in1 |>
calc_ll("y", censored = "ycens", upper = "y2") |>
tidyr::pivot_wider(id_cols = c(.draw, .chain, .iteration), names_from = .row, values_from = log_lik) |>
dplyr::select(tidyselect::matches("^\\d")) |>
as.matrix()
colnames(ll_myfn1) <- NULL
# fit_ar
ll_brm2 <- brms::log_lik(inputs$fit_ar)
ll_myfn_in2 <- add_pred_draws_car1(inputs$data_ar, inputs$fit_ar, draw_ids = 1:2000) |>
dplyr::ungroup()
ll_myfn_in_test <- ll_myfn_in2
ll_myfn2 <- ll_myfn_in2 |>
calc_ll("y", cens = FALSE) |>
tidyr::pivot_wider(id_cols = c(.draw, .chain, .iteration), names_from = .row, values_from = log_lik) |>
dplyr::select(tidyselect::matches("^\\d")) |>
as.matrix()
colnames(ll_myfn2) <- NULL
}
help_calc_ll <- function(inputs) {
# fit:
ll_brm1 <- brms::log_lik(inputs$fit)
ll_myfn_in1 <- add_pred_draws_car1(inputs$data, inputs$fit, draw_ids = 1:3000, car1 = FALSE) |>
dplyr::ungroup()
ll_myfn1 <- ll_myfn_in1 |>
calc_ll("y", censored = "ycens", upper = "y2") |>
tidyr::pivot_wider(id_cols = c(.draw, .chain, .iteration), names_from = .row, values_from = log_lik) |>
dplyr::select(tidyselect::matches("^\\d")) |>
as.matrix()
colnames(ll_myfn1) <- NULL
# fit_ar
ll_brm2 <- brms::log_lik(inputs$fit_ar)
ll_myfn_in2 <- add_pred_draws_car1(inputs$data_ar, inputs$fit_ar, draw_ids = 1:2000) |>
dplyr::ungroup()
ll_myfn_in_test <- ll_myfn_in2
ll_myfn2 <- ll_myfn_in2 |>
calc_ll("y", cens = FALSE) |>
tidyr::pivot_wider(id_cols = c(.draw, .chain, .iteration), names_from = .row, values_from = log_lik) |>
dplyr::select(tidyselect::matches("^\\d")) |>
as.matrix()
colnames(ll_myfn2) <- NULL
list(
ll_brm1 = ll_brm1,
ll_brm2 = ll_brm2,
ll_myfn1 = ll_myfn1,
ll_myfn2 = ll_myfn2,
ll_myfn_in2 = ll_myfn_in2,
ll_myfn_in_test = ll_myfn_in_test
)
}
# test-local_slope.R
help_local_slope <- function(inputs) {
slopes <- local_slope(inputs$data_gam, inputs$fit_gam, "x2", smooth = "s(x2)", pts = 498)
slopes2 <- local_slope(inputs$data_gam2, inputs$fit_gam2, "x2", smooth = "s(x2, by = g)", g_var = "g", pts = 459)
slopes3 <- local_slope(
inputs$data_gam2, inputs$fit_gam2, "x2", smooth = "s(x2, by = g)",
add_vars = list(g = "b"), pts = 459
)
slopes4 <- local_slope(
inputs$data_gam2, inputs$fit_gam3, "x2", smooth = "s(x2, by = g)",
g_var = "g", add_vars = list(x1 = 1:(459 * 3)), pts = 459
)
list(
slopes = slopes,
slopes2 = slopes2,
slopes3 = slopes3,
slopes4 = slopes4
)
}
# test-summarize_preds.R
help_summarize_preds <- function() {
data_sumpred <- tidyr::crossing(
g = letters[1:5],
y = 1:10
) |>
dplyr::mutate(.epred = scale(log(y))[,1]) |>
dplyr::group_by(g, y)
data_sumpred_test <- data_sumpred
x_sum <- summarize_preds(data_sumpred, y_var = y) |>
dplyr::filter(!dplyr::near(y, .epred_retrans)) |>
nrow()
x_sum2 <- withr::with_seed(1246, {
data_sumpred |>
ungroup() |>
slice_sample(prop = .75) |>
group_by(g, y)
})
s1 <- x_sum2 |>
summarize_preds(y_var = y) |>
filter(!near(y, .epred_retrans)) |>
nrow()
s2 <- x_sum2|>
summarize_preds(y_var = data_sumpred$y) |>
filter(!near(y, .epred_retrans)) |>
nrow()
list(
data_sumpred = data_sumpred,
data_sumpred_test = data_sumpred_test,
x_sum = x_sum,
s1 = s1,
s2 = s2
)
}
| /tests/testthat/helpers.R | permissive | bentrueman/bgamcar1 | R | false | false | 11,384 | r |
# load models for testing:
load_test_models <- function() {
seed <- 1
data <- readr::read_csv(paste0(system.file("extdata", package = "bgamcar1"), "/data.csv"))
data_ar <- readr::read_csv(paste0(system.file("extdata", package = "bgamcar1"), "/data_ar.csv"))
data_car1 <- readr::read_csv(paste0(system.file("extdata", package = "bgamcar1"), "/data_car1.csv"))
fit <- fit_stan_model(
paste0(system.file("extdata", package = "bgamcar1"), "/test"),
seed,
brms::bf(y | cens(ycens, y2 = y2) ~ 1),
data,
brms::prior(normal(0, 1), class = Intercept),
car1 = FALSE,
save_warmup = FALSE,
chains = 3
)
form_ar <- brms::bf(y ~ ar(time = date, gr = series), sigma ~ series)
prior_ar <- brms::prior(normal(0, 1), class = Intercept)
fit_ar <- fit_stan_model(
paste0(system.file("extdata", package = "bgamcar1"), "/test_ar"),
seed,
form_ar,
data_ar,
prior_ar,
save_warmup = FALSE,
chains = 2
)
form_car1 <- brms::bf(y ~ ar(time = x))
phi_car1 <- .45
fit_car1 <- fit_stan_model(
paste0(system.file("extdata", package = "bgamcar1"), "/test_car1"),
seed,
form_car1,
data_car1,
prior_ar,
save_warmup = FALSE,
chains = 2
)
list(
# data:
data = data,
data_ar = data_ar,
data_car1 = data_car1,
# models:
fit = fit,
fit_ar = fit_ar,
fit_car1 = fit_car1,
# other inputs:
form_ar = form_ar,
form_car1 = form_car1,
prior_ar = prior_ar,
phi_car1 = phi_car1
)
}
load_test_gams <- function() {
seed <- 1
# for local_slope.R
data_gam <- read.csv(paste0(system.file("extdata", package = "bgamcar1"), "/data_gam.csv"))
data_gam2 <- read.csv(paste0(system.file("extdata", package = "bgamcar1"), "/data_gam2.csv"))
fit_gam <- fit_stan_model(
paste0(system.file("extdata", package = "bgamcar1"), "/test_gam"),
seed,
brms::bf(y ~ s(x0) + s(x1) + s(x2) + s(x3)),
data_gam,
car1 = FALSE,
chains = 2
)
fit_gam2 <- fit_stan_model(
paste0(system.file("extdata", package = "bgamcar1"), "/test_gam2"),
seed,
brms::bf(y ~ s(x0, by = g) + s(x1, by = g) + s(x2, by = g) + s(x3, by = g)),
data_gam2,
save_warmup = FALSE,
car1 = FALSE,
chains = 2
)
fit_gam3 <- fit_stan_model(
paste0(system.file("extdata", package = "bgamcar1"), "/test_gam2"),
seed,
brms::bf(y ~ s(x0, by = g) + s(x1, by = g) + s(x2, by = g) + s(x3, by = g) + ar(time = x1, gr = g)),
data_gam2,
save_warmup = FALSE,
car1 = FALSE,
chains = 2
)
list(
# data:
data_gam = data_gam,
data_gam2 = data_gam2,
# models:
fit_gam = fit_gam,
fit_gam2 = fit_gam2,
fit_gam3 = fit_gam3
)
}
# test-add_car1_err.R
help_add_car1_err <- function() {
phi <- .7
car1_input <- tidyr::crossing(.index = 1:2, location = letters[1:2], rep = 1:200) |>
dplyr::mutate(
`ar[1]` = phi,
nu = 1e3,
sigma = 1,
.epred = 0
)
car1_input_test <- car1_input
data_car1 <- withr::with_seed(32567, {
car1_input |>
dplyr::mutate(d_x = 1) |>
add_car1_err(gr_vars = c(".index", "location")) |>
dplyr::filter(.index == 1, location == "a")
})
fit <- arima(data_car1$.prediction, order = c(1, 0, 0))
sub <- withr::with_seed(219, {
car1_input |>
dplyr::group_by(.index, location) |>
dplyr::slice_sample(prop = .6) |>
dplyr::ungroup() |>
dplyr::arrange(.index, location, rep) |>
dplyr::mutate(d_x = replace_na(rep - lag(rep), 0))
})
data2_car1 <- withr::with_seed(32567, {
sub |>
add_car1_err(gr_vars = c(".index", "location")) |>
filter(.index == 1, location == "a")
})
fit2 <- arima(data2_car1$.prediction, order = c(1, 0, 0))
list(
phi = phi,
fit = fit,
fit2 = fit2,
car1_input = car1_input,
car1_input_test = car1_input_test
)
}
# test-add_car1.R
help_add_car1 <- function(inputs) {
add_car1_input <- inputs$data_ar |>
dplyr::mutate(.index = 1, `ar[1]` = .7, .epred = 0)
car1 <- add_car1(add_car1_input, "y", gr_vars = c(".index", "series")) |>
dplyr::mutate(r = y - .epred)
autocorr <- extract_acf(car1$r)
autocorr2 <- arima(add_car1_input$y, order = c(1, 0, 0)) |>
residuals() |>
extract_acf()
add_car1_sub <- withr::with_seed(3526, {
dplyr::slice_sample(add_car1_input, prop = .7) |>
dplyr::arrange(series, date) |>
dplyr::group_by(series) |>
dplyr::mutate(
d_x = date - dplyr::lag(date),
d_x = tidyr::replace_na(d_x, 0),
d_x = as.numeric(d_x)
) |>
dplyr::ungroup()
})
car1 <- add_car1(add_car1_sub, "y", gr_vars = c(".index", "series")) |>
dplyr::mutate(r = y - .epred)
car1_cor <- car1 |>
dplyr::group_by(series) |>
dplyr::mutate(
r_lag = dplyr::lag(r),
y_lag = dplyr::lag(y)
) |>
dplyr::ungroup() |>
dplyr::filter(d_x == 1) |>
dplyr::summarize(
cor_r = cor(r_lag, r),
cor_y = cor(y_lag, y)
)
list(
autocorr = autocorr,
autocorr2 = autocorr2,
car1_cor = car1_cor
)
}
extract_acf <- function(x) {
x |>
acf(plot = FALSE) |>
with(acf) |>
as.numeric()
}
# test-add_pred_draws_car1.R
calc_sigma <- function(x) {
varnames <- extract_resp(x)
distributional <- as.character(x$formula)[2] |>
str_detect("sigma ~ ")
if (distributional) {
groups <- unique(x$data[, varnames$gr_sigma])
}
draws <- x |>
brms::as_draws_df("sigma", regex = TRUE) |>
tibble::as_tibble()
if (distributional) {
draws <- draws |>
tidyr::pivot_longer(c(
tidyselect::starts_with("b_sigma_"),
-tidyselect::matches("^b_sigma_Intercept$")
)) |>
dplyr::mutate(value = b_sigma_Intercept + value) |>
tidyr::pivot_wider(names_from = name, values_from = value) |>
tidyr::pivot_longer(starts_with("b_sigma_"), values_to = "sigma") |>
dplyr::select(c(name, sigma, tidyselect::starts_with("."))) |>
dplyr::mutate(
sigma = exp(sigma),
group = stringr::str_remove(name, varnames$gr_sigma) |>
stringr::str_extract("(?<=b_sigma_).+$") |>
stringr::str_replace("Intercept", groups[1])
)
}
draws
}
compare_preds <- function(x, ...) {
x |>
dplyr::select(date, series, y, d_x, .row, .epred, ...) |>
dplyr::arrange(.draw) |>
dplyr::ungroup()
}
# test-calc_acf.R
help_calc_acf <- function() {
data_acf1 <- tibble::tibble(
.draw = 1,
.residual = c(1:9, -1e2),
location = "a"
)
data_acf_test1 <- data_acf1
c1 <- calc_acf(data_acf1, gr_vars = c(".draw", "location"))
c2 <- calc_acf(data_acf1, .residual > 0, gr_vars = c(".draw", "location"))
data_acf2 <- tidyr::crossing(
location = letters[1:2],
.residual = as.numeric(1:10)
) |>
dplyr::mutate(
.residual = dplyr::if_else(location == "b", .residual - 100, .residual),
.draw = 1
)
data_acf_test2 <- data_acf2
c3 <- calc_acf(data_acf2, gr_vars = c(".draw", "location"))
data_acf3 <- tidyr::crossing(
series = letters[1:3],
.draw = 1:3,
.residual = as.numeric(1:10)
) |>
dplyr::mutate(
censoring = withr::with_seed(1245, {
sample(c(-1, 1), length(series), replace = TRUE)
}
),
.residual = dplyr::if_else(censoring == -1, -10, .residual)
)
acf3 <- calc_acf(
data_acf3,
censoring == 1 & cens_lagged == 1,
cen_var = "censoring",
gr_vars = c(".draw", "series")
)
list(
data_acf1 = data_acf1,
data_acf_test1 = data_acf_test1,
c1 = c1,
c2 = c2,
c3 = c3,
data_acf2 = data_acf2,
data_acf_test2 = data_acf_test2,
data_acf3 = data_acf3,
acf3 = acf3
)
}
# test-calc_ll.R
help_calc_ll_preds <- function(inputs) {
# fit:
ll_brm1 <- brms::log_lik(inputs$fit)
ll_myfn_in1 <- add_pred_draws_car1(inputs$data, inputs$fit, draw_ids = 1:3000, car1 = FALSE) |>
dplyr::ungroup()
ll_myfn1 <- ll_myfn_in1 |>
calc_ll("y", censored = "ycens", upper = "y2") |>
tidyr::pivot_wider(id_cols = c(.draw, .chain, .iteration), names_from = .row, values_from = log_lik) |>
dplyr::select(tidyselect::matches("^\\d")) |>
as.matrix()
colnames(ll_myfn1) <- NULL
# fit_ar
ll_brm2 <- brms::log_lik(inputs$fit_ar)
ll_myfn_in2 <- add_pred_draws_car1(inputs$data_ar, inputs$fit_ar, draw_ids = 1:2000) |>
dplyr::ungroup()
ll_myfn_in_test <- ll_myfn_in2
ll_myfn2 <- ll_myfn_in2 |>
calc_ll("y", cens = FALSE) |>
tidyr::pivot_wider(id_cols = c(.draw, .chain, .iteration), names_from = .row, values_from = log_lik) |>
dplyr::select(tidyselect::matches("^\\d")) |>
as.matrix()
colnames(ll_myfn2) <- NULL
}
help_calc_ll <- function(inputs) {
# fit:
ll_brm1 <- brms::log_lik(inputs$fit)
ll_myfn_in1 <- add_pred_draws_car1(inputs$data, inputs$fit, draw_ids = 1:3000, car1 = FALSE) |>
dplyr::ungroup()
ll_myfn1 <- ll_myfn_in1 |>
calc_ll("y", censored = "ycens", upper = "y2") |>
tidyr::pivot_wider(id_cols = c(.draw, .chain, .iteration), names_from = .row, values_from = log_lik) |>
dplyr::select(tidyselect::matches("^\\d")) |>
as.matrix()
colnames(ll_myfn1) <- NULL
# fit_ar
ll_brm2 <- brms::log_lik(inputs$fit_ar)
ll_myfn_in2 <- add_pred_draws_car1(inputs$data_ar, inputs$fit_ar, draw_ids = 1:2000) |>
dplyr::ungroup()
ll_myfn_in_test <- ll_myfn_in2
ll_myfn2 <- ll_myfn_in2 |>
calc_ll("y", cens = FALSE) |>
tidyr::pivot_wider(id_cols = c(.draw, .chain, .iteration), names_from = .row, values_from = log_lik) |>
dplyr::select(tidyselect::matches("^\\d")) |>
as.matrix()
colnames(ll_myfn2) <- NULL
list(
ll_brm1 = ll_brm1,
ll_brm2 = ll_brm2,
ll_myfn1 = ll_myfn1,
ll_myfn2 = ll_myfn2,
ll_myfn_in2 = ll_myfn_in2,
ll_myfn_in_test = ll_myfn_in_test
)
}
# test-local_slope.R
help_local_slope <- function(inputs) {
slopes <- local_slope(inputs$data_gam, inputs$fit_gam, "x2", smooth = "s(x2)", pts = 498)
slopes2 <- local_slope(inputs$data_gam2, inputs$fit_gam2, "x2", smooth = "s(x2, by = g)", g_var = "g", pts = 459)
slopes3 <- local_slope(
inputs$data_gam2, inputs$fit_gam2, "x2", smooth = "s(x2, by = g)",
add_vars = list(g = "b"), pts = 459
)
slopes4 <- local_slope(
inputs$data_gam2, inputs$fit_gam3, "x2", smooth = "s(x2, by = g)",
g_var = "g", add_vars = list(x1 = 1:(459 * 3)), pts = 459
)
list(
slopes = slopes,
slopes2 = slopes2,
slopes3 = slopes3,
slopes4 = slopes4
)
}
# test-summarize_preds.R
help_summarize_preds <- function() {
data_sumpred <- tidyr::crossing(
g = letters[1:5],
y = 1:10
) |>
dplyr::mutate(.epred = scale(log(y))[,1]) |>
dplyr::group_by(g, y)
data_sumpred_test <- data_sumpred
x_sum <- summarize_preds(data_sumpred, y_var = y) |>
dplyr::filter(!dplyr::near(y, .epred_retrans)) |>
nrow()
x_sum2 <- withr::with_seed(1246, {
data_sumpred |>
ungroup() |>
slice_sample(prop = .75) |>
group_by(g, y)
})
s1 <- x_sum2 |>
summarize_preds(y_var = y) |>
filter(!near(y, .epred_retrans)) |>
nrow()
s2 <- x_sum2|>
summarize_preds(y_var = data_sumpred$y) |>
filter(!near(y, .epred_retrans)) |>
nrow()
list(
data_sumpred = data_sumpred,
data_sumpred_test = data_sumpred_test,
x_sum = x_sum,
s1 = s1,
s2 = s2
)
}
|
\name{pd_labelattr}
\alias{pd_labelattr}
\title{
Function for obtaining party and issue labels when these are listed as attributes.
}
\description{
Function for obtaining party and issue labels when these are listed as attributes.
}
\usage{
pd_labelattr <- (data,
party_var,
issue_var,
coordmat)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{Matrix with rescaled and rotated coordinates. Usually the result of pd_rescale and pd_mdsrot.
}
\item{party_var}{Name of the party variable that contains labels as attributes.
}
\item{issue_var}{Name of the issue variable that contains labels as attributes.
}
\item{coordmat}{Matrix with rescaled and rotated coordinates. Usually the result of pd_rescale and pd_mdsrot.
}
}
\author{
Ioana-Elena Oana
}
| /man/pd_labelattr.Rd | no_license | nenaoana/MDScosa | R | false | false | 784 | rd | \name{pd_labelattr}
\alias{pd_labelattr}
\title{
Function for obtaining party and issue labels when these are listed as attributes.
}
\description{
Function for obtaining party and issue labels when these are listed as attributes.
}
\usage{
pd_labelattr <- (data,
party_var,
issue_var,
coordmat)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{Matrix with rescaled and rotated coordinates. Usually the result of pd_rescale and pd_mdsrot.
}
\item{party_var}{Name of the party variable that contains labels as attributes.
}
\item{issue_var}{Name of the issue variable that contains labels as attributes.
}
\item{coordmat}{Matrix with rescaled and rotated coordinates. Usually the result of pd_rescale and pd_mdsrot.
}
}
\author{
Ioana-Elena Oana
}
|
######################################
###This script runs a model ##########
###to look at drivers in ##########
###variation of N factors ##########
### with vegetation data. ##########
### However, the lack of ##########
### consistent vegetation ##########
### data means this is a ##########
### very reduced number of ##########
### N factors for comparision#########
######################################
library(plyr)
library(rjags)
library(xtable)
library(ggmcmc)
##this model version starts with just
##looking at the amount of variation
##resulting from the distance between
##sensors, location, and year
#read in winter N factor data
datWN<-read.csv("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\WinterNvege.csv")
#summer N factor data
datSN<-read.csv("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\SummerNvege.csv")
#read in EVI data
datEVI<-read.csv("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\Site_EVI_out.csv")
#there is a site in the summer that clearly has some numbers
#that are not correct for soil temperature. There is likely
#a large number used for a NA. This needs to be fixed, but
#until that can happen need filter out
datSN<-datSN[datSN$T<3000,]
#now filter to only focus on 0-10cm
datWN<-datWN[datWN$depth<=10,]
datSN<-datSN[datSN$depth<=10,]
#join the region id
#read in data from region generated in GIS
datR<-read.csv("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\region_siteinfo.csv")
reg.to.join<-data.frame(siteid=datR$site_id,regionid=datR$region_id,region.name=datR$region_nam)
#now add the EVI data to the data frame
datSNiii<-join(datSN, datEVI, by=c("siteid"), type="left")
datWNiii<-join(datWN, datEVI, by=c("siteid"), type="left")
#now join region
datSNiv<-join(datSNiii,reg.to.join, by="siteid", type="left")
datWNiv<-join(datWNiii,reg.to.join, by="siteid", type="left")
#look at how many sites actually have observations for vegetation
##summer
#now see what data loss is like for
#comb1SN<-na.omit(data.frame(datSNiv[1:11],vegez=datSNiv$Snew_vA, olt=datSNiv$olt, shrubC=datSNiv$shrub.pc, regionid=datSNiv$regionid,EVI=datSNiv$EVI,))
comb3SN<-na.omit(data.frame(datSNiv[1:11],vegez=datSNiv$Snew_vA, olt=datSNiv$olt,shrubC=datSNiv$shrub.pc,
mossC=datSNiv$moss.pc,EVI=datSNiv$EVI, regionid=datSNiv$regionid))
#dim(na.omit(comb1SN))[1]
dim(na.omit(comb3SN))[1]
##winter
#now see what data loss is like for
#comb1WN<-na.omit(data.frame(datWNiv[1:11],vegez=datWNiv$new_vA, olt=datWNiv$olt, shrubC=datWNiv$shrub.pc, regionid=datWNiv$regionid,EVI=datWNiv$EVI))
comb3WN<-na.omit(data.frame(datWNiv[1:11],vegez=datWNiv$new_vA, olt=datWNiv$olt, shrubC=datWNiv$shrub.pc,
mossC=datWNiv$moss.pc,regionid=datWNiv$regionid,EVI=datWNiv$EVI))
#dim(na.omit(comb1WN))[1]
dim(na.omit(comb3WN))[1]
#just stick with comb3 for now
#need to create year and regionIDs
#don't include any region with only one site observation
#this is regionid 9
comb3SN<-comb3SN[comb3SN$regionid!=9,]
comb3WN<-comb3WN[comb3WN$regionid!=9,]
#north interior doesn't have a lot of data and appear to be different from south interior
comb3SN$regionid<-ifelse(comb3SN$regionid==2,1,comb3SN$regionid)
comb3WN$regionid<-ifelse(comb3WN$regionid==2,1,comb3WN$regionid)
#get unique region id
RegionS<-data.frame(regionid=sort.int(unique(comb3SN$regionid)))
RegionW<-data.frame(regionid=sort.int(unique(comb3WN$regionid)))
#regions are the same between winter and summer
RegionS$reg.mod<-seq(1,dim(RegionS)[1])
vegeSN<-join(comb3SN,RegionS,by="regionid",type="inner")
vegeWN<-join(comb3WN,RegionS,by="regionid",type="inner")
#now need to create year
#see if the model is missing years
yearS<-data.frame(year=sort.int(unique(vegeSN$year)))
yearS$yearid<-seq(1, dim(yearS)[1])
yearW<-data.frame(wyear=sort.int(unique(vegeWN$wyear)))
yearW$yearid<-seq(1, dim(yearS)[1])
vegeSNi<-join(vegeSN,yearS,by="year",type="inner")
vegeWNi<-join(vegeWN,yearW,by="wyear",type="inner")
#need to aggregate covariates for covariate centering
OLTSm<-aggregate(vegeSNi$olt,by=list(vegeSNi$reg.mod), FUN="mean")
shrubSm<-aggregate(vegeSNi$shrubC,by=list(vegeSNi$reg.mod), FUN="mean")
mossSm<-aggregate(vegeSNi$mossC,by=list(vegeSNi$reg.mod), FUN="mean")
EVISm<-aggregate(vegeSNi$EVI,by=list(vegeSNi$reg.mod), FUN="mean")
OLTWm<-aggregate(vegeWNi$olt,by=list(vegeWNi$reg.mod), FUN="mean")
shrubWm<-aggregate(vegeWNi$shrubC,by=list(vegeWNi$reg.mod), FUN="mean")
mossWm<-aggregate(vegeWNi$mossC,by=list(vegeWNi$reg.mod), FUN="mean")
EVIWm<-aggregate(vegeWNi$EVI,by=list(vegeWNi$reg.mod), FUN="mean")
datamodellist<-list(NobsS=dim(vegeSNi)[1],n.factS=vegeSNi$n,
EVIS=vegeSNi$EVI, yearS=vegeSNi$yearid,
regIDS=vegeSNi$reg.mod,OLTS=vegeSNi$olt,
shrubCS=vegeSNi$shrubC,mossCS=vegeSNi$mossC,
NyearS=dim(yearS)[1],
xS=yearS$yearid, yS=rep(1,dim(yearS)[1]),
Nreg=dim(RegionS)[1],
NobsW=dim(vegeWNi)[1], n.factW=vegeWNi$n, EVIW=vegeWNi$EVI,
OLTW=vegeWNi$olt,
shrubCW=vegeWNi$shrubC,mossCW=vegeWNi$mossC,
yearW=vegeWNi$yearid,regIDW=vegeWNi$reg.mod,
NyearW=dim(yearW)[1], xW=yearW$wyear,yW=rep(1,dim(yearW)[1]),
OLTSm=OLTSm$x,shrubSm=shrubSm$x,mossSm=mossSm$x,
OLTWm=OLTWm$x,shrubWm=shrubWm$x, mossWm=mossWm$x, EVISm=EVISm$x,EVIWm=EVIWm$x)
Samplelist<-c("deviance", "nbeta.star1W","nbeta.star1S", "nbeta2S","nbeta3S","nbeta4S","nbeta5S",
"nbeta2W","nbeta3W","nbeta4W","nbeta5W",
"eps.star", "sig.S", "sig.W",
"rep.nS","epsW.star","rep.nW",
"rho.epsW",
"rho.eps")
inits<-list(list(t.eps=1,rho.eps=.99,t.epsW=1,rho.epsW=.99),
list(t.eps=1.5,rho.eps=.89,t.epsW=1.5,rho.epsW=.89),
list(t.eps=.5,rho.eps=.79,t.epsW=.5,rho.epsW=.79))
n.model.init=jags.model(file="c:\\Users\\hkropp\\Documents\\GitHub\\synthesis_database\\n_model\\Nvege_model_code.r",
data=datamodellist,
n.adapt=15000,
n.chains=3,
inits=inits)
n.iter.i=90000
codaobj.init = coda.samples(n.model.init,variable.names=Samplelist,
n.iter=n.iter.i, thin=30)
windows(18)
plot(codaobj.init[,"nbeta2W[3]"], ask=TRUE)
#generate summary
Mod.out<-summary(codaobj.init)
write.table(Mod.out$statistics, "c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\model_nvege_stats.csv",
sep=",",row.names=TRUE)
write.table(Mod.out$quantiles, "c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\model_nvege_quant.csv",
sep=",",row.names=TRUE)
write.table(vegeWNi, "c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\organized_vegewN_for_model.csv", sep=",",)
write.table(vegeSNi, "c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\organized_vegesN_for_model.csv", sep=",",)
| /archive/n_calculation/n_model/Nvege_model_script.r | no_license | kroppheather/synthesis_database | R | false | false | 6,701 | r | ######################################
###This script runs a model ##########
###to look at drivers in ##########
###variation of N factors ##########
### with vegetation data. ##########
### However, the lack of ##########
### consistent vegetation ##########
### data means this is a ##########
### very reduced number of ##########
### N factors for comparision#########
######################################
library(plyr)
library(rjags)
library(xtable)
library(ggmcmc)
##this model version starts with just
##looking at the amount of variation
##resulting from the distance between
##sensors, location, and year
#read in winter N factor data
datWN<-read.csv("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\WinterNvege.csv")
#summer N factor data
datSN<-read.csv("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\SummerNvege.csv")
#read in EVI data
datEVI<-read.csv("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\Site_EVI_out.csv")
#there is a site in the summer that clearly has some numbers
#that are not correct for soil temperature. There is likely
#a large number used for a NA. This needs to be fixed, but
#until that can happen need filter out
datSN<-datSN[datSN$T<3000,]
#now filter to only focus on 0-10cm
datWN<-datWN[datWN$depth<=10,]
datSN<-datSN[datSN$depth<=10,]
#join the region id
#read in data from region generated in GIS
datR<-read.csv("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\region_siteinfo.csv")
reg.to.join<-data.frame(siteid=datR$site_id,regionid=datR$region_id,region.name=datR$region_nam)
#now add the EVI data to the data frame
datSNiii<-join(datSN, datEVI, by=c("siteid"), type="left")
datWNiii<-join(datWN, datEVI, by=c("siteid"), type="left")
#now join region
datSNiv<-join(datSNiii,reg.to.join, by="siteid", type="left")
datWNiv<-join(datWNiii,reg.to.join, by="siteid", type="left")
#look at how many sites actually have observations for vegetation
##summer
#now see what data loss is like for
#comb1SN<-na.omit(data.frame(datSNiv[1:11],vegez=datSNiv$Snew_vA, olt=datSNiv$olt, shrubC=datSNiv$shrub.pc, regionid=datSNiv$regionid,EVI=datSNiv$EVI,))
comb3SN<-na.omit(data.frame(datSNiv[1:11],vegez=datSNiv$Snew_vA, olt=datSNiv$olt,shrubC=datSNiv$shrub.pc,
mossC=datSNiv$moss.pc,EVI=datSNiv$EVI, regionid=datSNiv$regionid))
#dim(na.omit(comb1SN))[1]
dim(na.omit(comb3SN))[1]
##winter
#now see what data loss is like for
#comb1WN<-na.omit(data.frame(datWNiv[1:11],vegez=datWNiv$new_vA, olt=datWNiv$olt, shrubC=datWNiv$shrub.pc, regionid=datWNiv$regionid,EVI=datWNiv$EVI))
comb3WN<-na.omit(data.frame(datWNiv[1:11],vegez=datWNiv$new_vA, olt=datWNiv$olt, shrubC=datWNiv$shrub.pc,
mossC=datWNiv$moss.pc,regionid=datWNiv$regionid,EVI=datWNiv$EVI))
#dim(na.omit(comb1WN))[1]
dim(na.omit(comb3WN))[1]
#just stick with comb3 for now
#need to create year and regionIDs
#don't include any region with only one site observation
#this is regionid 9
comb3SN<-comb3SN[comb3SN$regionid!=9,]
comb3WN<-comb3WN[comb3WN$regionid!=9,]
#north interior doesn't have a lot of data and appear to be different from south interior
comb3SN$regionid<-ifelse(comb3SN$regionid==2,1,comb3SN$regionid)
comb3WN$regionid<-ifelse(comb3WN$regionid==2,1,comb3WN$regionid)
#get unique region id
RegionS<-data.frame(regionid=sort.int(unique(comb3SN$regionid)))
RegionW<-data.frame(regionid=sort.int(unique(comb3WN$regionid)))
#regions are the same between winter and summer
RegionS$reg.mod<-seq(1,dim(RegionS)[1])
vegeSN<-join(comb3SN,RegionS,by="regionid",type="inner")
vegeWN<-join(comb3WN,RegionS,by="regionid",type="inner")
#now need to create year
#see if the model is missing years
yearS<-data.frame(year=sort.int(unique(vegeSN$year)))
yearS$yearid<-seq(1, dim(yearS)[1])
yearW<-data.frame(wyear=sort.int(unique(vegeWN$wyear)))
yearW$yearid<-seq(1, dim(yearS)[1])
vegeSNi<-join(vegeSN,yearS,by="year",type="inner")
vegeWNi<-join(vegeWN,yearW,by="wyear",type="inner")
#need to aggregate covariates for covariate centering
OLTSm<-aggregate(vegeSNi$olt,by=list(vegeSNi$reg.mod), FUN="mean")
shrubSm<-aggregate(vegeSNi$shrubC,by=list(vegeSNi$reg.mod), FUN="mean")
mossSm<-aggregate(vegeSNi$mossC,by=list(vegeSNi$reg.mod), FUN="mean")
EVISm<-aggregate(vegeSNi$EVI,by=list(vegeSNi$reg.mod), FUN="mean")
OLTWm<-aggregate(vegeWNi$olt,by=list(vegeWNi$reg.mod), FUN="mean")
shrubWm<-aggregate(vegeWNi$shrubC,by=list(vegeWNi$reg.mod), FUN="mean")
mossWm<-aggregate(vegeWNi$mossC,by=list(vegeWNi$reg.mod), FUN="mean")
EVIWm<-aggregate(vegeWNi$EVI,by=list(vegeWNi$reg.mod), FUN="mean")
datamodellist<-list(NobsS=dim(vegeSNi)[1],n.factS=vegeSNi$n,
EVIS=vegeSNi$EVI, yearS=vegeSNi$yearid,
regIDS=vegeSNi$reg.mod,OLTS=vegeSNi$olt,
shrubCS=vegeSNi$shrubC,mossCS=vegeSNi$mossC,
NyearS=dim(yearS)[1],
xS=yearS$yearid, yS=rep(1,dim(yearS)[1]),
Nreg=dim(RegionS)[1],
NobsW=dim(vegeWNi)[1], n.factW=vegeWNi$n, EVIW=vegeWNi$EVI,
OLTW=vegeWNi$olt,
shrubCW=vegeWNi$shrubC,mossCW=vegeWNi$mossC,
yearW=vegeWNi$yearid,regIDW=vegeWNi$reg.mod,
NyearW=dim(yearW)[1], xW=yearW$wyear,yW=rep(1,dim(yearW)[1]),
OLTSm=OLTSm$x,shrubSm=shrubSm$x,mossSm=mossSm$x,
OLTWm=OLTWm$x,shrubWm=shrubWm$x, mossWm=mossWm$x, EVISm=EVISm$x,EVIWm=EVIWm$x)
Samplelist<-c("deviance", "nbeta.star1W","nbeta.star1S", "nbeta2S","nbeta3S","nbeta4S","nbeta5S",
"nbeta2W","nbeta3W","nbeta4W","nbeta5W",
"eps.star", "sig.S", "sig.W",
"rep.nS","epsW.star","rep.nW",
"rho.epsW",
"rho.eps")
inits<-list(list(t.eps=1,rho.eps=.99,t.epsW=1,rho.epsW=.99),
list(t.eps=1.5,rho.eps=.89,t.epsW=1.5,rho.epsW=.89),
list(t.eps=.5,rho.eps=.79,t.epsW=.5,rho.epsW=.79))
n.model.init=jags.model(file="c:\\Users\\hkropp\\Documents\\GitHub\\synthesis_database\\n_model\\Nvege_model_code.r",
data=datamodellist,
n.adapt=15000,
n.chains=3,
inits=inits)
n.iter.i=90000
codaobj.init = coda.samples(n.model.init,variable.names=Samplelist,
n.iter=n.iter.i, thin=30)
windows(18)
plot(codaobj.init[,"nbeta2W[3]"], ask=TRUE)
#generate summary
Mod.out<-summary(codaobj.init)
write.table(Mod.out$statistics, "c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\model_nvege_stats.csv",
sep=",",row.names=TRUE)
write.table(Mod.out$quantiles, "c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\model_nvege_quant.csv",
sep=",",row.names=TRUE)
write.table(vegeWNi, "c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\organized_vegewN_for_model.csv", sep=",",)
write.table(vegeSNi, "c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u6\\organized_vegesN_for_model.csv", sep=",",)
|
#' guess_type
#'
#' @title guess_type
#'
#' @param x is a vector of values you want to guess
#'
#' @return a character vector that describes the suspected class. e.g., "10" is an integer, "20.11" is a double, "text" is character, etc.
#'
#' @description guess_type is used in vis_guess, where it
#'
#' @export
#'
guess_type <- function(x){
# since
# readr:::collectorGuess(NA,
# locale_ = readr::locale())
#
# returns "character", use an ifelse to identify NAs
#
# Basically, this is fast way to check individual elements of a vector
# I'd like to use purrr::map for this but I couldn't get it to work without
# writing more function calls, which slowed it down, by a factor of about 3.
# So this is faster, for the moment. Thanks Miles!
#
output <- character(length(x))
nas <- is.na(x)
output[!nas] <- vapply(FUN = readr:::collectorGuess,
X = x[!nas],
FUN.VALUE = character(1),
locale_ = readr::locale())
output[nas] <- NA
output
}
#'
#' all.equal(guess_df_1(iris), guess_df_2(iris))
#'
#' iris %>%
#' gather %>%
#' guess_vector()
#'
#' messy_df %>%
#'
#'
#'
#' mb_df <-
#' microbenchmark::microbenchmark(
#' guess_df_1(iris),
#' guess_df_2(iris)
#' )
#'
#' # then do this:
#' #
#' # new_function <- function(x) purrr::dmap(x, ~purrr::map_chr(., guess_type))
#' #
#' # or maybe make it an S3 method?
#' #
#' # guess.data.frame
#'
#' # > foo <- c(NA, "10", "10.1", "10/01/2001")
#' # > guess_type(foo)
#' # [1] NA "character" "character"
#' # [4] "character"
#' #
#' # > purrr::map_chr(foo, guess_type)
#' # [1] NA "integer" "double"
#' # [4] "character"
#' #
#' # foo_bar <- dplyr::data_frame(x1 = c(NA, "10", "10.1", "10/01/2001"),
#' # x2 = c(NA, "10.1", NA, "FALSE"))
#' #
#' # purrr:::map(foo_bar, guess_type)
#' #
#' # $x1
#' # [1] NA "character" "character"
#' # [4] "character"
#' #
#' # $x2
#' # [1] NA "character" NA
#' # [4] "character"
#' #
#' # purrr:::map_chr(foo_bar$x1, guess_type)
#' #
#' # [1] NA "integer" "double"
#' # [4] "character"
#'
#'
#'
#' # ------------------ perhaps rename guess_type to do this: purrr::map_chr(messy_vector, guess_type).
#'
| /R/guess_type.R | no_license | MilesMcBain/visdat | R | false | false | 2,298 | r | #' guess_type
#'
#' @title guess_type
#'
#' @param x is a vector of values you want to guess
#'
#' @return a character vector that describes the suspected class. e.g., "10" is an integer, "20.11" is a double, "text" is character, etc.
#'
#' @description guess_type is used in vis_guess, where it
#'
#' @export
#'
guess_type <- function(x){
# since
# readr:::collectorGuess(NA,
# locale_ = readr::locale())
#
# returns "character", use an ifelse to identify NAs
#
# Basically, this is fast way to check individual elements of a vector
# I'd like to use purrr::map for this but I couldn't get it to work without
# writing more function calls, which slowed it down, by a factor of about 3.
# So this is faster, for the moment. Thanks Miles!
#
output <- character(length(x))
nas <- is.na(x)
output[!nas] <- vapply(FUN = readr:::collectorGuess,
X = x[!nas],
FUN.VALUE = character(1),
locale_ = readr::locale())
output[nas] <- NA
output
}
#'
#' all.equal(guess_df_1(iris), guess_df_2(iris))
#'
#' iris %>%
#' gather %>%
#' guess_vector()
#'
#' messy_df %>%
#'
#'
#'
#' mb_df <-
#' microbenchmark::microbenchmark(
#' guess_df_1(iris),
#' guess_df_2(iris)
#' )
#'
#' # then do this:
#' #
#' # new_function <- function(x) purrr::dmap(x, ~purrr::map_chr(., guess_type))
#' #
#' # or maybe make it an S3 method?
#' #
#' # guess.data.frame
#'
#' # > foo <- c(NA, "10", "10.1", "10/01/2001")
#' # > guess_type(foo)
#' # [1] NA "character" "character"
#' # [4] "character"
#' #
#' # > purrr::map_chr(foo, guess_type)
#' # [1] NA "integer" "double"
#' # [4] "character"
#' #
#' # foo_bar <- dplyr::data_frame(x1 = c(NA, "10", "10.1", "10/01/2001"),
#' # x2 = c(NA, "10.1", NA, "FALSE"))
#' #
#' # purrr:::map(foo_bar, guess_type)
#' #
#' # $x1
#' # [1] NA "character" "character"
#' # [4] "character"
#' #
#' # $x2
#' # [1] NA "character" NA
#' # [4] "character"
#' #
#' # purrr:::map_chr(foo_bar$x1, guess_type)
#' #
#' # [1] NA "integer" "double"
#' # [4] "character"
#'
#'
#'
#' # ------------------ perhaps rename guess_type to do this: purrr::map_chr(messy_vector, guess_type).
#'
|
/第1部分 基本运算、函数定义与图形绘制.R | no_license | geekywhisper/DATA-ANALYTICS | R | false | false | 14,065 | r | ||
ip <- input[[1]]
length(ip)
woord = ip[1]
vowels <- c("a", "e", "i", "o", "u")
has3Vowels <- function(woord) {
strsplit(woord)
}
has3Vowels(ip[1]) | /2015/R/day5/solution5.R | no_license | YasmineSillis/adventofcode | R | false | false | 150 | r | ip <- input[[1]]
length(ip)
woord = ip[1]
vowels <- c("a", "e", "i", "o", "u")
has3Vowels <- function(woord) {
strsplit(woord)
}
has3Vowels(ip[1]) |
head(iris)
tail(iris)
iris[iris$species > 1 "setosa", ]
iris[iris$species > 1 ]
iris$Species
iris[iris$Species == "setosa", ]
iris$Species == "setosa"
iris[iris$Species == "setosa" & iris$Petal.Length > 1 , ]
iris[iris$Species == "setosa" & iris$Petal.Length > 1.5 , ]
length(iris[iris$Species == "setosa" & iris$Petal.Length > 1 , 2 ])
my_function = function(){
return(print("Привет"))
}
my_function()
kvadrat = function(x){
y = x^2
return(y)
}
kvadrat(5)
stepen = function(x,y){
z= x^y
return(z)
}
stepen(3,2)
kub = function(x){
p = x^3
return(p)
}
kub(3)
kub = function(x){
p = stepen(x,3)
return(p)
}
kub(2)
koren = function(x){
y=x^0.5
return(y)
}
koren(2)
koren = function(x){
if(x>0){
y = x^0.5
} else {
return(print("Дурак"))
}
}
koren(-2)
l = list(-2:8,3:10,20:0,10:15)
mn = c()
for (i in 1:length(l)){
m = mean(l[[i]])
mn = c(mn,m)
}
mn
| /lesson4.R | no_license | KristinaBobyleva/Matmodel | R | false | false | 985 | r | head(iris)
tail(iris)
iris[iris$species > 1 "setosa", ]
iris[iris$species > 1 ]
iris$Species
iris[iris$Species == "setosa", ]
iris$Species == "setosa"
iris[iris$Species == "setosa" & iris$Petal.Length > 1 , ]
iris[iris$Species == "setosa" & iris$Petal.Length > 1.5 , ]
length(iris[iris$Species == "setosa" & iris$Petal.Length > 1 , 2 ])
my_function = function(){
return(print("Привет"))
}
my_function()
kvadrat = function(x){
y = x^2
return(y)
}
kvadrat(5)
stepen = function(x,y){
z= x^y
return(z)
}
stepen(3,2)
kub = function(x){
p = x^3
return(p)
}
kub(3)
kub = function(x){
p = stepen(x,3)
return(p)
}
kub(2)
koren = function(x){
y=x^0.5
return(y)
}
koren(2)
koren = function(x){
if(x>0){
y = x^0.5
} else {
return(print("Дурак"))
}
}
koren(-2)
l = list(-2:8,3:10,20:0,10:15)
mn = c()
for (i in 1:length(l)){
m = mean(l[[i]])
mn = c(mn,m)
}
mn
|
## A little simplification would be the first step toward rational living, I think.
## Eleanor Roosevelt
## Lower logging level: LogManager.getLogManager().getLogger("com.amazonaws.request").setLevel(Level.OFF);
## ref: https://forums.aws.amazon.com/thread.jspa?messageID=186655𭤟
##' ##' AWS Support Function: set up credentials
##'
##' sets up the credentials needed to access AWS and optionally sets environment
##' variables for auto loading of credentials in the future
##' @param awsAccessKeyText your AWS Access Key as a string
##' @param awsSecretKeyText your AWS Secret Key as a string
##' @param setEnvironmentVariables T/F would you like environment variables to be set so
##' Segue will read the credentials on load
##' @author James "JD" Long
##' @export
setCredentials <- function(awsAccessKeyText, awsSecretKeyText, setEnvironmentVariables = TRUE){
awsCreds <- new(com.amazonaws.auth.BasicAWSCredentials, awsAccessKeyText, awsSecretKeyText)
assign("awsCreds", awsCreds, envir = .GlobalEnv)
if (setEnvironmentVariables == TRUE) {
Sys.setenv(AWSACCESSKEY = awsAccessKeyText, AWSSECRETKEY = awsSecretKeyText)
}
}
##' AWS Support Function: Delete an S3 Key (a.k.a file)
##'
##' Deteles a key in a given bucket on S3
##' @param bucketName name of the bucket
##' @param keyName the key in the bucket
##' @author James "JD" Long
##' @export
deleteS3Key <- function(bucketName, keyName){
tx <- new(com.amazonaws.services.s3.transfer.TransferManager, awsCreds)
s3 <- tx$getAmazonS3Client()
if (s3$doesBucketExist(bucketName)) {
s3$deleteObject(bucketName, keyName)
}
}
##' AWS Support Function: Empty an S3 bucket
##'
##' Deletes all keys in the designated bucket
##' @param bucketName Name of the bucket to be emptied
##' @author James "JD" Long
##' @export
emptyS3Bucket <- function(bucketName){
tx <- new(com.amazonaws.services.s3.transfer.TransferManager, awsCreds)
s3 <- tx$getAmazonS3Client()
# TODO: need a check to make sure the current user owns the bucket
# before trying to delete everything in it
# there's some risk this might loop forever if they don't own the bucket
if (s3$doesBucketExist(bucketName)) {
lst <- s3$listObjects(bucketName)
objSums <- lst$getObjectSummaries()
listJavaObjs <- .jevalArray(objSums$toArray())
if (length(listJavaObjs)>0){
for (i in 1:length(listJavaObjs)) {
deleteS3Key(bucketName, listJavaObjs[[i]]$getKey()[[1]])
}
}
if (lst$isTruncated()){
#recursion FTW!
emptyS3Bucket(bucketName)
}
}
}
##' AWS Support Function: Delete an S3 Bucket
##'
##' Does nothing if the bucketName does not exist. If bucket contains Keys,
##' all keys are deleted.
##' @param bucketName the bucket to be deleted
##' @author James "JD" Long
##' @export
deleteS3Bucket <- function(bucketName){
tx <- new(com.amazonaws.services.s3.transfer.TransferManager, awsCreds)
s3 <- tx$getAmazonS3Client()
if (s3$doesBucketExist(bucketName) == TRUE) {
emptyS3Bucket(bucketName)
tx <- new(com.amazonaws.services.s3.transfer.TransferManager, awsCreds)
s3 <- tx$getAmazonS3Client()
s3$deleteBucket(bucketName)
}
}
##' AWS Support Function: Creates an S3 Bucket
##'
##' Creates an S3 bucket. If the bucket already exists, no warning is returned.
##' @param bucketName string of the name of the bucket to be created
##' @author James "JD" Long
##' @export
makeS3Bucket <- function(bucketName){
#awsCreds <- get("awsCreds", envir = segue.env)
tx <- new(com.amazonaws.services.s3.transfer.TransferManager, awsCreds)
s3 <- tx$getAmazonS3Client()
#test if the bucket exists; if not, make bucket
if (s3$doesBucketExist(bucketName) == FALSE) {
s3$createBucket(bucketName)
} else {
warning("Unable to Create Bucket. Bucket with same name already exists.", call. = FALSE)
}
}
##' AWS Support Function: Uploads a local file to an S3 Bucket
##'
##' If buckName does not exist, it is created and a warning is issued.
##' @param bucketName destination bucket
##' @param localFile local file to be uploaded
##' @author James "JD" Long
##' @export
uploadS3File <- function(bucketName, localFile){
tx <- new(com.amazonaws.services.s3.transfer.TransferManager, awsCreds)
s3 <- tx$getAmazonS3Client()
fileToUpload <- new(File, localFile)
request <- new(com.amazonaws.services.s3.model.PutObjectRequest, bucketName, fileToUpload$getName(), fileToUpload)
s3$putObject(request)
}
##' AWS Support Function: Downloads a key from an S3 Bucket into a local file.
##'
##' Pulls a key (file) from a bucket into a localFile. If the keyName = ".all" then
##' all files from the bucket are pulled and localFile should be a
##' directory name. Ignores "sub directories" in buckets.
##' @param bucketName destination bucket
##' @param keyName key to download. ".all" to pull all keys
##' @param localFile local file name or path if ".all" is called for keyName
##' @author James "JD" Long
##' @export
downloadS3File <- function(bucketName, keyName, localFile){
tx <- new(com.amazonaws.services.s3.transfer.TransferManager, awsCreds)
s3 <- tx$getAmazonS3Client()
if (keyName != ".all") {
request <- new(com.amazonaws.services.s3.model.GetObjectRequest, bucketName, keyName)
theObject <- s3$getObject(request, new(java.io.File, localFile))
} else {
# this will only pull the first page of listings
# so if there are a lot of files it won't grab them all
#
# TODO: make it pull multiple pages of files
# TODO: pull subdirectories too
system(paste("mkdir", localFile), ignore.stderr = TRUE)
lst <- s3$listObjects(bucketName)
objSums <- lst$getObjectSummaries()
listJavaObjs <- .jevalArray(objSums$toArray())
if (length(listJavaObjs)>0){
for (i in 1:length(listJavaObjs)) {
# if statement here just to filter out subdirs
key <- listJavaObjs[[i]]$getKey()[[1]]
#if ( length( unlist(strsplit(key, split="/")) ) == 1) {
if (substring( key, nchar( key ) - 7, nchar( key ) ) != "$folder$") {
localFullFile <- paste(localFile, "/", listJavaObjs[[i]]$getKey()[[1]], sep="")
downloadS3File(bucketName, listJavaObjs[[i]]$getKey()[[1]], localFullFile)
}
#}
}
}
}
}
##' Creates the configuration object, uploads needed files, and starts
##' a Segue Hadoop cluster on Elastic Map Reduce.
##'
##' The the needed files are uploaded to S3 and the EMR nodes are started.
##' @param numInstances number of nodes (EC2 instances)
##' @param cranPackages vector of string names of CRAN packages to load on each cluster node
##' @param customPackages vector of string file names of custom packages to load on each cluster node
##' @param filesOnNodes vector of string names of full path of files to be loaded on each node.
##' Files will be loaded into the local
##' path (i.e. ./file) on each node.
##' @param rObjectsOnNodes a named list of R objects which will be passed to the R
##' session on the worker nodes. Be sure the list has names. The list will be attached
##' on the remote nodes using attach(rObjectsOnNodes). If you list does not have names,
##' this will fail.
##' @param enableDebugging T/F whether EMR debugging should be enabled
##' @param instancesPerNode Number of R instances per node. Default of NULL uses AWS defaults.
##' @param masterInstanceType EC2 instance type for the master node
##' @param slaveInstanceType EC2 instance type for the slave nodes
##' @param location EC2 location name for the cluster
##' @param ec2KeyName EC2 Key used for logging into the main node. Use the user name 'hadoop'
##' @param copy.image T/F whether to copy the entire local environment to the nodes. If this feels
##' fast and loose... you're right! It's nuts. Use it with caution. Very handy when you really need it.
##' @param otherBootstrapActions a list-of-lists of other bootstrap actions to run; chlid list members
## are: "name" == unique identifier of this bootstrap action ; "localFile" == path to local script
## to be uploaded to the temp area in S3; "s3file" == path to an existing script in S3 (won't be
## uploaded to the temp area); "args" == vector of character arguments. "localFile" and "s3file"
## are mutually exclusive but one is required; "args" is optional.
##' @param sourcePackagesToInstall vector of full paths to source packages to be installed on each node
##' @param masterBidPrice Bid price for master server
##' @param slaveBidPrice Bid price for slave (task) server
##' @return an emrlapply() cluster object with appropriate fields
##' populated. Keep in mind that this creates the cluster and starts the cluster running.
##' @author James "JD" Long
##' @examples
##' \dontrun{
##' myCluster <- createCluster(numInstances=2,
##' cranPackages=c("Hmisc", "plyr"))
##' }
##' @export
createCluster <- function(numInstances=2,
cranPackages=NULL,
customPackages=NULL,
filesOnNodes=NULL,
rObjectsOnNodes=NULL,
enableDebugging=FALSE,
instancesPerNode=NULL,
masterInstanceType="m1.large",
slaveInstanceType="m1.large",
location = "us-east-1c",
ec2KeyName=NULL,
copy.image=FALSE ,
otherBootstrapActions=NULL,
sourcePackagesToInstall=NULL,
masterBidPrice=NULL,
slaveBidPrice=NULL
){
## this used to be an argument but not bootstrapping
## caused too many problems
bootStrapLatestR=TRUE
clusterObject <- list(numInstances = numInstances,
cranPackages = cranPackages,
customPackages = customPackages,
enableDebugging = enableDebugging,
bootStrapLatestR = bootStrapLatestR,
filesOnNodes = filesOnNodes,
rObjectsOnNodes = rObjectsOnNodes,
enableDebugging = enableDebugging,
instancesPerNode = instancesPerNode,
masterInstanceType = masterInstanceType,
slaveInstanceType = slaveInstanceType,
location = location,
ec2KeyName = ec2KeyName ,
copy.image = copy.image ,
otherBootstrapActions = otherBootstrapActions,
sourcePackagesToInstall = sourcePackagesToInstall,
masterBidPrice = masterBidPrice,
slaveBidPrice = slaveBidPrice
)
if ( tolower(masterInstanceType) == "m1.small") {
clusterObject$masterInstanceType <- "m1.large"
print("WARNING: masterInstanceType set to m1.small. Segue requires 64 bit OS so the masterInstanceType is being changed to m1.large. You will be billed by Amazon accordingly.")
}
if ( tolower(slaveInstanceType) == "m1.small") {
clusterObject$slaveInstanceType <- "m1.large"
print("WARNING: slaveInstanceType set to m1.small. Segue requires 64 bit OS so the slaveInstanceType is being changed to m1.large. You will be billed by Amazon accordingly.")
}
localTempDir <- paste(tempdir(),
paste(sample(c(0:9, letters), 10, rep=T), collapse=""),
"-segue",
sep="")
clusterObject$localTempDir <- localTempDir
clusterObject$localTempDirOut <- paste(localTempDir, "/out", sep="")
dir.create(localTempDir, showWarnings = TRUE, recursive = TRUE, mode = "0777")
dir.create(clusterObject$localTempDirOut, showWarnings = TRUE, recursive = TRUE, mode = "0777")
s3TempDir <- tolower(unlist(strsplit(localTempDir, "/"))[length(unlist(strsplit(localTempDir, "/")))])
deleteS3Bucket(s3TempDir)
clusterObject$s3TempDir <- s3TempDir
s3TempDirOut <- tolower(paste(s3TempDir , "out", sep=""))
deleteS3Bucket(s3TempDirOut)
clusterObject$s3TempDirOut <- s3TempDirOut
#create the s3 bucket
## TODO: error check this
makeS3Bucket(s3TempDir)
#upload the bootstrapper to S3
if (bootStrapLatestR==TRUE) {
##TODO: error checking in the uploadS3File function
uploadS3File(s3TempDir, system.file("bootstrapLatestR.sh", package="segue") )
uploadS3File(s3TempDir, system.file("update.R", package="segue") )
}
clusterObject$bootStrapLatestR <- bootStrapLatestR
## if copy.image is TRUE then save an image and use the fileOnNodes
## feature to add the saved image to the nodes
if (copy.image == TRUE) {
imageFile <- paste( localTempDir, "/local-workspace-image.RData", sep="" )
save.image( file=imageFile, compress=TRUE )
clusterObject$filesOnNodes = c(clusterObject$filesOnNodes, imageFile)
}
## if customPackages are present, add them to the filesOnNodes
if (is.null(customPackages) == FALSE) {
clusterObject$filesOnNodes = c(clusterObject$filesOnNodes, customPackages)
}
# start cluster
jobFlowId <- startCluster(clusterObject)
clusterObject$jobFlowId <- jobFlowId
return(clusterObject)
}
##' AWS Support Function: Checks the status of a given job on EMR
##'
##' Checks the status of a previously issued job.
##' @param jobFlowId the Job Flow Id of the job to check
##' @return Job Status
##' @author James "JD" Long
##' @export
checkStatus <- function(jobFlowId){
service <- new( com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClient, awsCreds )
request <- new( com.amazonaws.services.elasticmapreduce.model.DescribeJobFlowsRequest )
detailsList <- new( java.util.ArrayList )
detailsList$add(jobFlowId)
request$setJobFlowIds(detailsList)
descriptions <- as.list(service$describeJobFlows(request)$getJobFlows())
descriptions[[1]]$getExecutionStatusDetail()$getState()
}
##' AWS Support Function: Checks the status of a given job on EMR
##'
##' Checks the status of a previously issued step.
##' @param jobFlowId the Job Flow Id of the job to check
##' @return Status of the last step
##' @author James "JD" Long
##' @export
checkLastStepStatus <- function(jobFlowId){
service <- new( com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClient, awsCreds )
request <- new( com.amazonaws.services.elasticmapreduce.model.DescribeJobFlowsRequest )
detailsList <- new( java.util.ArrayList )
detailsList$add(jobFlowId)
request$setJobFlowIds(detailsList)
descriptions <- as.list(service$describeJobFlows(request)$getJobFlows())
#descriptions[[1]]$getExecutionStatusDetail()$getState()
steps <- as.list(descriptions[[1]]$getSteps())
step <- steps[[length(steps)]] #grab the last step only
status <- step$getExecutionStatusDetail()
status$getState()
}
##' Starts a cluster on Amazon's EMR service
##'
##' After a cluster has been defined with createCluster() this function actually
##' starts the machines running. Currently exported, but soon will be internal only.
##'
##' @param clusterObject cluster object to start
##' @return a Job Flow ID
##'
##' @export
startCluster <- function(clusterObject){
numInstances <- clusterObject$numInstances
s3TempDir <- clusterObject$s3TempDir
s3TempDirOut <- clusterObject$s3TempDirOut
bootStrapLatestR <- clusterObject$bootStrapLatestR
verbose <- TRUE
service <- new( com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClient, awsCreds )
request <- new( com.amazonaws.services.elasticmapreduce.model.RunJobFlowRequest )
conf <- new( com.amazonaws.services.elasticmapreduce.model.JobFlowInstancesConfig )
#creates the bootstrap list
bootStrapList <- new( java.util.ArrayList )
if (bootStrapLatestR == TRUE) {
scriptBootActionConfig <- new(com.amazonaws.services.elasticmapreduce.model.ScriptBootstrapActionConfig)
scriptBootActionConfig$setPath(paste("s3://", s3TempDir, "/bootstrapLatestR.sh", sep=""))
bootStrapConfig <- new( com.amazonaws.services.elasticmapreduce.model.BootstrapActionConfig)
with( bootStrapConfig, setScriptBootstrapAction(scriptBootActionConfig))
with( bootStrapConfig, setName("R-InstallLatest"))
bootStrapList$add(bootStrapConfig)
## update packages
scriptBootActionConfig <- new(com.amazonaws.services.elasticmapreduce.model.ScriptBootstrapActionConfig)
scriptBootActionConfig$setPath(paste("s3://", s3TempDir, "/update.R", sep=""))
bootStrapConfig <- new( com.amazonaws.services.elasticmapreduce.model.BootstrapActionConfig)
with( bootStrapConfig, setScriptBootstrapAction(scriptBootActionConfig))
with( bootStrapConfig, setName("R-UpdatePackages"))
bootStrapList$add(bootStrapConfig)
}
## handle additional bootstrap actions, if requested.
if ( ! is.null(clusterObject$otherBootstrapActions) ){
## TODO: more graceful exit here? or would stopifnot() be appropriate, in this case?
stopifnot( "list" == class(clusterObject$otherBootstrapActions) )
invisible( sapply( clusterObject$otherBootstrapActions , function( action ){
scriptBootActionConfig <- new(com.amazonaws.services.elasticmapreduce.model.ScriptBootstrapActionConfig)
## are we uploading a local file to run? or will we use a script that already exists in
## (a non-temporary) S3 bucket?
if( ! is.null( action$localFile ) ){
uploadS3File(clusterObject$s3TempDir , action$localFile)
scriptBootActionConfig$setPath(paste("s3://", clusterObject$s3TempDir, "/" , basename( action$localFile ) , sep=""))
}else if( ! is.null( action$s3file ) ){
scriptBootActionConfig$setPath(action$s3file)
}
if( ! is.null( action$args ) ){
## TODO: proper quoting around args? or leave that for caller?
argsAsList <- new( java.util.ArrayList )
sapply( action$args , function(item){ argsAsList$add(item) } )
scriptBootActionConfig$withArgs(argsAsList)
}
bootStrapConfig <- new( com.amazonaws.services.elasticmapreduce.model.BootstrapActionConfig)
with( bootStrapConfig, setScriptBootstrapAction(scriptBootActionConfig))
with( bootStrapConfig, setName(action$name))
bootStrapList$add(bootStrapConfig)
} ) )
}
if (is.null(clusterObject$filesOnNodes) == FALSE) { # putting files on each node
print("INFO: You have selected files to be put on each node. These files are being uploaded to S3.")
## build a batch file that includes each element of filesOnNodes
## then add the batch file as a boot strap action
## open the output file (bootStrapFiles.sh) in clusterObject$tempDir
## open an output file connection
outfile <- file( paste( clusterObject$localTempDir, "/bootStrapFiles.sh", sep="" ), "w" )
cat("#!/bin/bash", "", file = outfile, sep = "\n")
cat("mkdir /tmp/segue-upload/", "", file = outfile, sep = "\n")
## for each element in filesOnNodes add a hadoop -fs line
for ( file in clusterObject$filesOnNodes ){
remotePath <- paste( "/tmp/segue-upload/", tail(strsplit(file,"/")[[1]], 1), sep="" )
fileName <- tail(strsplit(file,"/")[[1]], 1)
s3Path <- paste( "s3://", clusterObject$s3TempDir, "/", fileName, sep="" )
cat( paste( "hadoop fs -get ", s3Path, remotePath)
, file = outfile, sep = "\n" )
cat( "\n", file = outfile )
# copy each file to S3
uploadS3File( clusterObject$s3TempDir, file )
}
close( outfile )
# copy bootStrapFiles.sh to clusterObject$s3TempDir
uploadS3File( clusterObject$s3TempDir, paste( clusterObject$localTempDir, "/bootStrapFiles.sh", sep="" ) )
# add a bootstrap action that runs bootStrapFiles.sh
scriptBootActionConfig <- new(com.amazonaws.services.elasticmapreduce.model.ScriptBootstrapActionConfig)
scriptBootActionConfig$setPath(paste("s3://", s3TempDir, "/bootStrapFiles.sh", sep=""))
bootStrapConfig <- new( com.amazonaws.services.elasticmapreduce.model.BootstrapActionConfig)
with( bootStrapConfig, setScriptBootstrapAction(scriptBootActionConfig))
with( bootStrapConfig, setName("RBootStrapFiles"))
bootStrapList$add(bootStrapConfig)
print("INFO: Upload of files to S3 is complete.")
}
if (is.null(clusterObject$sourcePackagesToInstall) == FALSE) {
print("INFO: Now building sources packages to install and uploading them based on the sourcePackagesToInstall list.")
## build a batch file that includes each file in sourcePackagesToInstall
## then add the batch file as a boot strap action
## open the output file (installSourcePackages.sh) in clusterObject$tempDir
## open an output file connection
outfile <- file( paste( clusterObject$localTempDir, "/installSourcePackages.sh", sep="" ), "w" )
cat("#!/bin/bash", "", file = outfile, sep = "\n")
cat("mkdir /tmp/segue-source-packages/", "", file = outfile, sep = "\n")
## for each element in sourcePackagesToInstall add a hadoop -fs line
for ( file in clusterObject$sourcePackagesToInstall ){
remotePath <- paste( "/tmp/segue-source-packages/", tail(strsplit(file,"/")[[1]], 1), sep="" )
fileName <- tail(strsplit(file,"/")[[1]], 1)
s3Path <- paste( "s3://", clusterObject$s3TempDir, "/", fileName, sep="" )
cat( paste( "hadoop fs -get ", s3Path, remotePath)
, file = outfile, sep = "\n" )
cat( "\n", file = outfile )
cat( "sudo R CMD INSTALL ", remotePath, "\n", file = outfile, sep = "" )
# copy each file to S3
uploadS3File( clusterObject$s3TempDir, file )
}
close( outfile )
# copy installSourcePackages.sh) to clusterObject$s3TempDir
uploadS3File( clusterObject$s3TempDir, paste( clusterObject$localTempDir, "/installSourcePackages.sh", sep="" ) )
# add a bootstrap action that runs bootStrapFiles.sh
scriptBootActionConfig <- new(com.amazonaws.services.elasticmapreduce.model.ScriptBootstrapActionConfig)
scriptBootActionConfig$setPath(paste("s3://", s3TempDir, "/installSourcePackages.sh", sep=""))
bootStrapConfig <- new( com.amazonaws.services.elasticmapreduce.model.BootstrapActionConfig)
with( bootStrapConfig, setScriptBootstrapAction(scriptBootActionConfig))
with( bootStrapConfig, setName("RinstallSourcePackages"))
bootStrapList$add(bootStrapConfig)
print("INFO: Source packages uploaded.")
}
if (is.null(clusterObject$instancesPerNode) == FALSE) { #sersiously... test this
scriptBootActionConfig <- new(com.amazonaws.services.elasticmapreduce.model.ScriptBootstrapActionConfig)
scriptBootActionConfig$setPath("s3://elasticmapreduce/bootstrap-actions/configure-hadoop")
argList <- new( java.util.ArrayList )
argList$add( "-s" )
argList$add( paste( "mapred.tasktracker.map.tasks.maximum=", clusterObject$instancesPerNode, sep="") )
argList$add( "-s" )
argList$add( paste( "mapred.tasktracker.reduce.tasks.maximum=", clusterObject$instancesPerNode, sep="") )
scriptBootActionConfig$setArgs( argList )
bootStrapConfig <- new( com.amazonaws.services.elasticmapreduce.model.BootstrapActionConfig)
with( bootStrapConfig, setScriptBootstrapAction(scriptBootActionConfig))
with( bootStrapConfig, setName("SetInstancePerNode"))
bootStrapList$add(bootStrapConfig)
}
## this adds the bootstrap to the request
request$setBootstrapActions(bootStrapList)
if ( is.null( clusterObject$ec2KeyName ) != TRUE ) {
conf$setEc2KeyName(clusterObject$ec2KeyName)
}
instanceGroups <- .jnew("java/util/Vector")
if (!is.null(clusterObject$masterBidPrice)) {
masterGroupConf <- new( com.amazonaws.services.elasticmapreduce.model.InstanceGroupConfig )
masterGroupConf$setInstanceCount(new(Integer, as.character(1)))
masterGroupConf$setInstanceRole(new(String, as.character("MASTER")))
masterGroupConf$setInstanceType(new(String, as.character(clusterObject$masterInstanceType)))
masterGroupConf$setMarket(new(String, as.character("SPOT")))
masterGroupConf$setBidPrice(new(String, as.character(clusterObject$masterBidPrice)))
instanceGroups$add(masterGroupConf)
}
if (!is.null(clusterObject$slaveBidPrice)) {
slaveGroupConf <- new( com.amazonaws.services.elasticmapreduce.model.InstanceGroupConfig )
slaveGroupConf$setInstanceCount(new(Integer, as.character(numInstances - 1)))
slaveGroupConf$setInstanceRole(new(String, as.character("CORE")))
slaveGroupConf$setInstanceType(new(String, as.character(clusterObject$slaveInstanceType)))
slaveGroupConf$setMarket(new(String, as.character("SPOT")))
slaveGroupConf$setBidPrice(new(String, as.character(clusterObject$slaveBidPrice)))
instanceGroups$add(slaveGroupConf)
}
if (!is.null(clusterObject$masterBidPrice) || !is.null(clusterObject$slaveBidPrice)) {
conf$setInstanceGroups(instanceGroups)
} else {
# Must configure instances either using instance count,
# master and slave instance type or instance groups but not both
conf$setInstanceCount(new(Integer, as.character(numInstances)))
conf$setMasterInstanceType( clusterObject$masterInstanceType )
conf$setSlaveInstanceType( clusterObject$slaveInstanceType )
}
conf$setKeepJobFlowAliveWhenNoSteps(new(Boolean, TRUE))
conf$setPlacement(new(com.amazonaws.services.elasticmapreduce.model.PlacementType, clusterObject$location))
conf$setHadoopVersion("0.20.205")
request$setInstances(conf)
request$setLogUri(paste("s3://", s3TempDir, "-logs", sep=""))
jobFlowName <- paste("RJob-", date(), sep="")
request$setName(jobFlowName)
request$setAmiVersion("2.0.4")
result <- service$runJobFlow(request)
## seems like this sleep should not be needed... but otherwise
## getJobFlowId() does not get the correct jobflowid
Sys.sleep(15)
jobFlowId <- result$getJobFlowId()
currentStatus <- checkStatus(jobFlowId)
while (currentStatus %in% c("COMPLETED", "FAILED", "TERMINATED", "WAITING", "CANCELLED") == FALSE) {
Sys.sleep(30)
currentStatus <- checkStatus(jobFlowId)
message(paste(currentStatus, " - ", Sys.time(), sep="" ))
}
if (currentStatus == "WAITING") {
message("Your Amazon EMR Hadoop Cluster is ready for action. \nRemember to terminate your cluster with stopCluster().\nAmazon is billing you!")
}
return(jobFlowId)
## TODO: need to catch situations where the cluster failed
}
##' Stops a running cluster
##'
##' Stops a running cluster and deletes temporary directories from EC2
##'
##' @return nothing
##' @author James "JD" Long
##' @param clusterObject a cluster object to stop
##' @export
stopCluster <- function(clusterObject){
jobFlowId <- clusterObject$jobFlowId
service <- new( com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClient, awsCreds )
request <- new( com.amazonaws.services.elasticmapreduce.model.TerminateJobFlowsRequest )
detailsList <- new( java.util.ArrayList )
detailsList$add(jobFlowId)
request$withJobFlowIds(detailsList)
service$terminateJobFlows(request)
## I have no idea why AWS needs sleep before
## I can delete the temp dirs, but these fail
## if I don't have the sleep
Sys.sleep(10)
try( deleteS3Bucket( clusterObject$s3TempDir ), silent=TRUE )
try( deleteS3Bucket( paste( clusterObject$s3TempDir, "-logs", sep="" ) ), silent=TRUE )
try( deleteS3Bucket( clusterObject$s3TempDirOut ), silent=TRUE )
## something weird is going on... I have to do this twice or it
## does not fully delete the s3TempDir's subdirectory
## will need to give this some attention later
Sys.sleep(15)
try( deleteS3Bucket( clusterObject$s3TempDir ), silent=TRUE )
try( deleteS3Bucket( paste( clusterObject$s3TempDir, "-logs", sep="" ) ), silent=TRUE )
try( deleteS3Bucket( clusterObject$s3TempDirOut ), silent=TRUE )
}
##' Submits a job to a running cluster
##'
##' After a cluster has been started this function submits jobs to that cluster.
##' If a job is submitted with enableDebugging=TRUE, all jobs submitted to that
##' cluster will also have debugging enabled. To turn debugging off, the cluster
##' must be stopped and restarted.
##'
##'
##' @param clusterObject a cluster object to submit to
##' @param stopClusterOnComplete set to true if you want the cluster to be shut down
##' after job completes
##' @param taskTimeout maximum time a single unit of work can run (in minutes)
##' @return Execution status of this job
##'
##' @export
submitJob <- function(clusterObject, stopClusterOnComplete=FALSE, taskTimeout=10){
jobFlowId <- clusterObject$jobFlowId
s3TempDir <- clusterObject$s3TempDir
s3TempDirOut <- clusterObject$s3TempDirOut
enableDebugging <- clusterObject$enableDebugging
try(deleteS3Bucket(s3TempDirOut), silent=TRUE)
unlink(clusterObject$localTempDirOut, recursive = TRUE)
dir.create(clusterObject$localTempDirOut)
jobFlowId <- clusterObject$jobFlowId
if (enableDebugging==TRUE){
service <- new( com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClient, awsCreds )
hadoopJarStep <- new(com.amazonaws.services.elasticmapreduce.model.HadoopJarStepConfig)
hadoopJarStep$setJar("s3://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar")
argList <- new( java.util.ArrayList )
argList$add( "s3://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch" )
hadoopJarStep$setArgs(argList)
stepName <- format(Sys.time(), "%Y-%m-%d_%H:%M:%OS5")
stepConfig <- new(com.amazonaws.services.elasticmapreduce.model.StepConfig, stepName, hadoopJarStep)
stepConfig$setActionOnFailure("CANCEL_AND_WAIT")
stepList <- new( java.util.ArrayList )
stepList$add( stepConfig )
request <- new( com.amazonaws.services.elasticmapreduce.model.AddJobFlowStepsRequest, jobFlowId, stepList)
requestResult <- service$addJobFlowSteps(request)
}
service <- new( com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClient, awsCreds )
hadoopJarStep <- new(com.amazonaws.services.elasticmapreduce.model.HadoopJarStepConfig)
hadoopJarStep$setJar("/home/hadoop/contrib/streaming/hadoop-streaming.jar")
argList <- new( java.util.ArrayList )
# the task timeout is passed to us in minutes, but AWS/EMR expects it in milliseconds
taskTimeoutMilliseconds <- taskTimeout * 60 * 1000
argList$add( "-D" )
argList$add( paste( "mapred.task.timeout=" , taskTimeoutMilliseconds , sep="" ) )
argList$add( "-cacheFile" )
argList$add( paste("s3n://", s3TempDir, "/emrData.RData#emrData.RData", sep=""))
argList$add( "-input" )
argList$add( paste("s3n://", s3TempDir, "/stream.txt", sep="") )
argList$add( "-output" )
argList$add( paste("s3n://", s3TempDirOut, "/results", sep="") )
argList$add( "-mapper" )
argList$add( "cat" )
argList$add( "-reducer" )
argList$add( paste("s3n://", s3TempDir, "/mapper.R", sep="" ) )
hadoopJarStep$setArgs(argList)
stepName <- format(Sys.time(), "%Y-%m-%d_%H:%M:%OS5")
stepConfig <- new(com.amazonaws.services.elasticmapreduce.model.StepConfig, stepName, hadoopJarStep)
stepConfig$setActionOnFailure("CANCEL_AND_WAIT")
stepList <- new( java.util.ArrayList )
stepList$add( stepConfig )
request <- new( com.amazonaws.services.elasticmapreduce.model.AddJobFlowStepsRequest, jobFlowId, stepList)
try(deleteS3Bucket(clusterObject$s3TempDirOut), silent=TRUE)
#submit to EMR happens here
service$addJobFlowSteps(request)
Sys.sleep(15)
checkLastStepStatus(jobFlowId)
Sys.sleep(15)
currentStatus <- checkStatus(jobFlowId)
while (currentStatus %in% c("COMPLETED", "FAILED", "TERMINATED", "WAITING", "CANCELLED") == FALSE) {
Sys.sleep(30)
currentStatus <- checkStatus(jobFlowId)
message(paste(currentStatus, " - ", Sys.time(), sep="" ))
}
if (stopClusterOnComplete==TRUE) {
stopCluster(clusterObject)
}
return(currentStatus)
}
| /R/awsFunctions.R | no_license | leeper/segue | R | false | false | 32,174 | r |
## A little simplification would be the first step toward rational living, I think.
## Eleanor Roosevelt
## Lower logging level: LogManager.getLogManager().getLogger("com.amazonaws.request").setLevel(Level.OFF);
## ref: https://forums.aws.amazon.com/thread.jspa?messageID=186655𭤟
##' ##' AWS Support Function: set up credentials
##'
##' sets up the credentials needed to access AWS and optionally sets environment
##' variables for auto loading of credentials in the future
##' @param awsAccessKeyText your AWS Access Key as a string
##' @param awsSecretKeyText your AWS Secret Key as a string
##' @param setEnvironmentVariables T/F would you like environment variables to be set so
##' Segue will read the credentials on load
##' @author James "JD" Long
##' @export
setCredentials <- function(awsAccessKeyText, awsSecretKeyText, setEnvironmentVariables = TRUE){
awsCreds <- new(com.amazonaws.auth.BasicAWSCredentials, awsAccessKeyText, awsSecretKeyText)
assign("awsCreds", awsCreds, envir = .GlobalEnv)
if (setEnvironmentVariables == TRUE) {
Sys.setenv(AWSACCESSKEY = awsAccessKeyText, AWSSECRETKEY = awsSecretKeyText)
}
}
##' AWS Support Function: Delete an S3 Key (a.k.a file)
##'
##' Deteles a key in a given bucket on S3
##' @param bucketName name of the bucket
##' @param keyName the key in the bucket
##' @author James "JD" Long
##' @export
deleteS3Key <- function(bucketName, keyName){
tx <- new(com.amazonaws.services.s3.transfer.TransferManager, awsCreds)
s3 <- tx$getAmazonS3Client()
if (s3$doesBucketExist(bucketName)) {
s3$deleteObject(bucketName, keyName)
}
}
##' AWS Support Function: Empty an S3 bucket
##'
##' Deletes all keys in the designated bucket
##' @param bucketName Name of the bucket to be emptied
##' @author James "JD" Long
##' @export
emptyS3Bucket <- function(bucketName){
tx <- new(com.amazonaws.services.s3.transfer.TransferManager, awsCreds)
s3 <- tx$getAmazonS3Client()
# TODO: need a check to make sure the current user owns the bucket
# before trying to delete everything in it
# there's some risk this might loop forever if they don't own the bucket
if (s3$doesBucketExist(bucketName)) {
lst <- s3$listObjects(bucketName)
objSums <- lst$getObjectSummaries()
listJavaObjs <- .jevalArray(objSums$toArray())
if (length(listJavaObjs)>0){
for (i in 1:length(listJavaObjs)) {
deleteS3Key(bucketName, listJavaObjs[[i]]$getKey()[[1]])
}
}
if (lst$isTruncated()){
#recursion FTW!
emptyS3Bucket(bucketName)
}
}
}
##' AWS Support Function: Delete an S3 Bucket
##'
##' Does nothing if the bucketName does not exist. If bucket contains Keys,
##' all keys are deleted.
##' @param bucketName the bucket to be deleted
##' @author James "JD" Long
##' @export
deleteS3Bucket <- function(bucketName){
tx <- new(com.amazonaws.services.s3.transfer.TransferManager, awsCreds)
s3 <- tx$getAmazonS3Client()
if (s3$doesBucketExist(bucketName) == TRUE) {
emptyS3Bucket(bucketName)
tx <- new(com.amazonaws.services.s3.transfer.TransferManager, awsCreds)
s3 <- tx$getAmazonS3Client()
s3$deleteBucket(bucketName)
}
}
##' AWS Support Function: Creates an S3 Bucket
##'
##' Creates an S3 bucket. If the bucket already exists, no warning is returned.
##' @param bucketName string of the name of the bucket to be created
##' @author James "JD" Long
##' @export
makeS3Bucket <- function(bucketName){
#awsCreds <- get("awsCreds", envir = segue.env)
tx <- new(com.amazonaws.services.s3.transfer.TransferManager, awsCreds)
s3 <- tx$getAmazonS3Client()
#test if the bucket exists; if not, make bucket
if (s3$doesBucketExist(bucketName) == FALSE) {
s3$createBucket(bucketName)
} else {
warning("Unable to Create Bucket. Bucket with same name already exists.", call. = FALSE)
}
}
##' AWS Support Function: Uploads a local file to an S3 Bucket
##'
##' If buckName does not exist, it is created and a warning is issued.
##' @param bucketName destination bucket
##' @param localFile local file to be uploaded
##' @author James "JD" Long
##' @export
uploadS3File <- function(bucketName, localFile){
tx <- new(com.amazonaws.services.s3.transfer.TransferManager, awsCreds)
s3 <- tx$getAmazonS3Client()
fileToUpload <- new(File, localFile)
request <- new(com.amazonaws.services.s3.model.PutObjectRequest, bucketName, fileToUpload$getName(), fileToUpload)
s3$putObject(request)
}
##' AWS Support Function: Downloads a key from an S3 Bucket into a local file.
##'
##' Pulls a key (file) from a bucket into a localFile. If the keyName = ".all" then
##' all files from the bucket are pulled and localFile should be a
##' directory name. Ignores "sub directories" in buckets.
##' @param bucketName destination bucket
##' @param keyName key to download. ".all" to pull all keys
##' @param localFile local file name or path if ".all" is called for keyName
##' @author James "JD" Long
##' @export
downloadS3File <- function(bucketName, keyName, localFile){
tx <- new(com.amazonaws.services.s3.transfer.TransferManager, awsCreds)
s3 <- tx$getAmazonS3Client()
if (keyName != ".all") {
request <- new(com.amazonaws.services.s3.model.GetObjectRequest, bucketName, keyName)
theObject <- s3$getObject(request, new(java.io.File, localFile))
} else {
# this will only pull the first page of listings
# so if there are a lot of files it won't grab them all
#
# TODO: make it pull multiple pages of files
# TODO: pull subdirectories too
system(paste("mkdir", localFile), ignore.stderr = TRUE)
lst <- s3$listObjects(bucketName)
objSums <- lst$getObjectSummaries()
listJavaObjs <- .jevalArray(objSums$toArray())
if (length(listJavaObjs)>0){
for (i in 1:length(listJavaObjs)) {
# if statement here just to filter out subdirs
key <- listJavaObjs[[i]]$getKey()[[1]]
#if ( length( unlist(strsplit(key, split="/")) ) == 1) {
if (substring( key, nchar( key ) - 7, nchar( key ) ) != "$folder$") {
localFullFile <- paste(localFile, "/", listJavaObjs[[i]]$getKey()[[1]], sep="")
downloadS3File(bucketName, listJavaObjs[[i]]$getKey()[[1]], localFullFile)
}
#}
}
}
}
}
##' Creates the configuration object, uploads needed files, and starts
##' a Segue Hadoop cluster on Elastic Map Reduce.
##'
##' The the needed files are uploaded to S3 and the EMR nodes are started.
##' @param numInstances number of nodes (EC2 instances)
##' @param cranPackages vector of string names of CRAN packages to load on each cluster node
##' @param customPackages vector of string file names of custom packages to load on each cluster node
##' @param filesOnNodes vector of string names of full path of files to be loaded on each node.
##' Files will be loaded into the local
##' path (i.e. ./file) on each node.
##' @param rObjectsOnNodes a named list of R objects which will be passed to the R
##' session on the worker nodes. Be sure the list has names. The list will be attached
##' on the remote nodes using attach(rObjectsOnNodes). If you list does not have names,
##' this will fail.
##' @param enableDebugging T/F whether EMR debugging should be enabled
##' @param instancesPerNode Number of R instances per node. Default of NULL uses AWS defaults.
##' @param masterInstanceType EC2 instance type for the master node
##' @param slaveInstanceType EC2 instance type for the slave nodes
##' @param location EC2 location name for the cluster
##' @param ec2KeyName EC2 Key used for logging into the main node. Use the user name 'hadoop'
##' @param copy.image T/F whether to copy the entire local environment to the nodes. If this feels
##' fast and loose... you're right! It's nuts. Use it with caution. Very handy when you really need it.
##' @param otherBootstrapActions a list-of-lists of other bootstrap actions to run; chlid list members
## are: "name" == unique identifier of this bootstrap action ; "localFile" == path to local script
## to be uploaded to the temp area in S3; "s3file" == path to an existing script in S3 (won't be
## uploaded to the temp area); "args" == vector of character arguments. "localFile" and "s3file"
## are mutually exclusive but one is required; "args" is optional.
##' @param sourcePackagesToInstall vector of full paths to source packages to be installed on each node
##' @param masterBidPrice Bid price for master server
##' @param slaveBidPrice Bid price for slave (task) server
##' @return an emrlapply() cluster object with appropriate fields
##' populated. Keep in mind that this creates the cluster and starts the cluster running.
##' @author James "JD" Long
##' @examples
##' \dontrun{
##' myCluster <- createCluster(numInstances=2,
##' cranPackages=c("Hmisc", "plyr"))
##' }
##' @export
createCluster <- function(numInstances=2,
cranPackages=NULL,
customPackages=NULL,
filesOnNodes=NULL,
rObjectsOnNodes=NULL,
enableDebugging=FALSE,
instancesPerNode=NULL,
masterInstanceType="m1.large",
slaveInstanceType="m1.large",
location = "us-east-1c",
ec2KeyName=NULL,
copy.image=FALSE ,
otherBootstrapActions=NULL,
sourcePackagesToInstall=NULL,
masterBidPrice=NULL,
slaveBidPrice=NULL
){
## this used to be an argument but not bootstrapping
## caused too many problems
bootStrapLatestR=TRUE
clusterObject <- list(numInstances = numInstances,
cranPackages = cranPackages,
customPackages = customPackages,
enableDebugging = enableDebugging,
bootStrapLatestR = bootStrapLatestR,
filesOnNodes = filesOnNodes,
rObjectsOnNodes = rObjectsOnNodes,
enableDebugging = enableDebugging,
instancesPerNode = instancesPerNode,
masterInstanceType = masterInstanceType,
slaveInstanceType = slaveInstanceType,
location = location,
ec2KeyName = ec2KeyName ,
copy.image = copy.image ,
otherBootstrapActions = otherBootstrapActions,
sourcePackagesToInstall = sourcePackagesToInstall,
masterBidPrice = masterBidPrice,
slaveBidPrice = slaveBidPrice
)
if ( tolower(masterInstanceType) == "m1.small") {
clusterObject$masterInstanceType <- "m1.large"
print("WARNING: masterInstanceType set to m1.small. Segue requires 64 bit OS so the masterInstanceType is being changed to m1.large. You will be billed by Amazon accordingly.")
}
if ( tolower(slaveInstanceType) == "m1.small") {
clusterObject$slaveInstanceType <- "m1.large"
print("WARNING: slaveInstanceType set to m1.small. Segue requires 64 bit OS so the slaveInstanceType is being changed to m1.large. You will be billed by Amazon accordingly.")
}
localTempDir <- paste(tempdir(),
paste(sample(c(0:9, letters), 10, rep=T), collapse=""),
"-segue",
sep="")
clusterObject$localTempDir <- localTempDir
clusterObject$localTempDirOut <- paste(localTempDir, "/out", sep="")
dir.create(localTempDir, showWarnings = TRUE, recursive = TRUE, mode = "0777")
dir.create(clusterObject$localTempDirOut, showWarnings = TRUE, recursive = TRUE, mode = "0777")
s3TempDir <- tolower(unlist(strsplit(localTempDir, "/"))[length(unlist(strsplit(localTempDir, "/")))])
deleteS3Bucket(s3TempDir)
clusterObject$s3TempDir <- s3TempDir
s3TempDirOut <- tolower(paste(s3TempDir , "out", sep=""))
deleteS3Bucket(s3TempDirOut)
clusterObject$s3TempDirOut <- s3TempDirOut
#create the s3 bucket
## TODO: error check this
makeS3Bucket(s3TempDir)
#upload the bootstrapper to S3
if (bootStrapLatestR==TRUE) {
##TODO: error checking in the uploadS3File function
uploadS3File(s3TempDir, system.file("bootstrapLatestR.sh", package="segue") )
uploadS3File(s3TempDir, system.file("update.R", package="segue") )
}
clusterObject$bootStrapLatestR <- bootStrapLatestR
## if copy.image is TRUE then save an image and use the fileOnNodes
## feature to add the saved image to the nodes
if (copy.image == TRUE) {
imageFile <- paste( localTempDir, "/local-workspace-image.RData", sep="" )
save.image( file=imageFile, compress=TRUE )
clusterObject$filesOnNodes = c(clusterObject$filesOnNodes, imageFile)
}
## if customPackages are present, add them to the filesOnNodes
if (is.null(customPackages) == FALSE) {
clusterObject$filesOnNodes = c(clusterObject$filesOnNodes, customPackages)
}
# start cluster
jobFlowId <- startCluster(clusterObject)
clusterObject$jobFlowId <- jobFlowId
return(clusterObject)
}
##' AWS Support Function: Checks the status of a given job on EMR
##'
##' Checks the status of a previously issued job.
##' @param jobFlowId the Job Flow Id of the job to check
##' @return Job Status
##' @author James "JD" Long
##' @export
checkStatus <- function(jobFlowId){
service <- new( com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClient, awsCreds )
request <- new( com.amazonaws.services.elasticmapreduce.model.DescribeJobFlowsRequest )
detailsList <- new( java.util.ArrayList )
detailsList$add(jobFlowId)
request$setJobFlowIds(detailsList)
descriptions <- as.list(service$describeJobFlows(request)$getJobFlows())
descriptions[[1]]$getExecutionStatusDetail()$getState()
}
##' AWS Support Function: Checks the status of a given job on EMR
##'
##' Checks the status of a previously issued step.
##' @param jobFlowId the Job Flow Id of the job to check
##' @return Status of the last step
##' @author James "JD" Long
##' @export
checkLastStepStatus <- function(jobFlowId){
service <- new( com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClient, awsCreds )
request <- new( com.amazonaws.services.elasticmapreduce.model.DescribeJobFlowsRequest )
detailsList <- new( java.util.ArrayList )
detailsList$add(jobFlowId)
request$setJobFlowIds(detailsList)
descriptions <- as.list(service$describeJobFlows(request)$getJobFlows())
#descriptions[[1]]$getExecutionStatusDetail()$getState()
steps <- as.list(descriptions[[1]]$getSteps())
step <- steps[[length(steps)]] #grab the last step only
status <- step$getExecutionStatusDetail()
status$getState()
}
##' Starts a cluster on Amazon's EMR service
##'
##' After a cluster has been defined with createCluster() this function actually
##' starts the machines running. Currently exported, but soon will be internal only.
##'
##' @param clusterObject cluster object to start
##' @return a Job Flow ID
##'
##' @export
startCluster <- function(clusterObject){
numInstances <- clusterObject$numInstances
s3TempDir <- clusterObject$s3TempDir
s3TempDirOut <- clusterObject$s3TempDirOut
bootStrapLatestR <- clusterObject$bootStrapLatestR
verbose <- TRUE
service <- new( com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClient, awsCreds )
request <- new( com.amazonaws.services.elasticmapreduce.model.RunJobFlowRequest )
conf <- new( com.amazonaws.services.elasticmapreduce.model.JobFlowInstancesConfig )
#creates the bootstrap list
bootStrapList <- new( java.util.ArrayList )
if (bootStrapLatestR == TRUE) {
scriptBootActionConfig <- new(com.amazonaws.services.elasticmapreduce.model.ScriptBootstrapActionConfig)
scriptBootActionConfig$setPath(paste("s3://", s3TempDir, "/bootstrapLatestR.sh", sep=""))
bootStrapConfig <- new( com.amazonaws.services.elasticmapreduce.model.BootstrapActionConfig)
with( bootStrapConfig, setScriptBootstrapAction(scriptBootActionConfig))
with( bootStrapConfig, setName("R-InstallLatest"))
bootStrapList$add(bootStrapConfig)
## update packages
scriptBootActionConfig <- new(com.amazonaws.services.elasticmapreduce.model.ScriptBootstrapActionConfig)
scriptBootActionConfig$setPath(paste("s3://", s3TempDir, "/update.R", sep=""))
bootStrapConfig <- new( com.amazonaws.services.elasticmapreduce.model.BootstrapActionConfig)
with( bootStrapConfig, setScriptBootstrapAction(scriptBootActionConfig))
with( bootStrapConfig, setName("R-UpdatePackages"))
bootStrapList$add(bootStrapConfig)
}
## handle additional bootstrap actions, if requested.
if ( ! is.null(clusterObject$otherBootstrapActions) ){
## TODO: more graceful exit here? or would stopifnot() be appropriate, in this case?
stopifnot( "list" == class(clusterObject$otherBootstrapActions) )
invisible( sapply( clusterObject$otherBootstrapActions , function( action ){
scriptBootActionConfig <- new(com.amazonaws.services.elasticmapreduce.model.ScriptBootstrapActionConfig)
## are we uploading a local file to run? or will we use a script that already exists in
## (a non-temporary) S3 bucket?
if( ! is.null( action$localFile ) ){
uploadS3File(clusterObject$s3TempDir , action$localFile)
scriptBootActionConfig$setPath(paste("s3://", clusterObject$s3TempDir, "/" , basename( action$localFile ) , sep=""))
}else if( ! is.null( action$s3file ) ){
scriptBootActionConfig$setPath(action$s3file)
}
if( ! is.null( action$args ) ){
## TODO: proper quoting around args? or leave that for caller?
argsAsList <- new( java.util.ArrayList )
sapply( action$args , function(item){ argsAsList$add(item) } )
scriptBootActionConfig$withArgs(argsAsList)
}
bootStrapConfig <- new( com.amazonaws.services.elasticmapreduce.model.BootstrapActionConfig)
with( bootStrapConfig, setScriptBootstrapAction(scriptBootActionConfig))
with( bootStrapConfig, setName(action$name))
bootStrapList$add(bootStrapConfig)
} ) )
}
if (is.null(clusterObject$filesOnNodes) == FALSE) { # putting files on each node
print("INFO: You have selected files to be put on each node. These files are being uploaded to S3.")
## build a batch file that includes each element of filesOnNodes
## then add the batch file as a boot strap action
## open the output file (bootStrapFiles.sh) in clusterObject$tempDir
## open an output file connection
outfile <- file( paste( clusterObject$localTempDir, "/bootStrapFiles.sh", sep="" ), "w" )
cat("#!/bin/bash", "", file = outfile, sep = "\n")
cat("mkdir /tmp/segue-upload/", "", file = outfile, sep = "\n")
## for each element in filesOnNodes add a hadoop -fs line
for ( file in clusterObject$filesOnNodes ){
remotePath <- paste( "/tmp/segue-upload/", tail(strsplit(file,"/")[[1]], 1), sep="" )
fileName <- tail(strsplit(file,"/")[[1]], 1)
s3Path <- paste( "s3://", clusterObject$s3TempDir, "/", fileName, sep="" )
cat( paste( "hadoop fs -get ", s3Path, remotePath)
, file = outfile, sep = "\n" )
cat( "\n", file = outfile )
# copy each file to S3
uploadS3File( clusterObject$s3TempDir, file )
}
close( outfile )
# copy bootStrapFiles.sh to clusterObject$s3TempDir
uploadS3File( clusterObject$s3TempDir, paste( clusterObject$localTempDir, "/bootStrapFiles.sh", sep="" ) )
# add a bootstrap action that runs bootStrapFiles.sh
scriptBootActionConfig <- new(com.amazonaws.services.elasticmapreduce.model.ScriptBootstrapActionConfig)
scriptBootActionConfig$setPath(paste("s3://", s3TempDir, "/bootStrapFiles.sh", sep=""))
bootStrapConfig <- new( com.amazonaws.services.elasticmapreduce.model.BootstrapActionConfig)
with( bootStrapConfig, setScriptBootstrapAction(scriptBootActionConfig))
with( bootStrapConfig, setName("RBootStrapFiles"))
bootStrapList$add(bootStrapConfig)
print("INFO: Upload of files to S3 is complete.")
}
if (is.null(clusterObject$sourcePackagesToInstall) == FALSE) {
print("INFO: Now building sources packages to install and uploading them based on the sourcePackagesToInstall list.")
## build a batch file that includes each file in sourcePackagesToInstall
## then add the batch file as a boot strap action
## open the output file (installSourcePackages.sh) in clusterObject$tempDir
## open an output file connection
outfile <- file( paste( clusterObject$localTempDir, "/installSourcePackages.sh", sep="" ), "w" )
cat("#!/bin/bash", "", file = outfile, sep = "\n")
cat("mkdir /tmp/segue-source-packages/", "", file = outfile, sep = "\n")
## for each element in sourcePackagesToInstall add a hadoop -fs line
for ( file in clusterObject$sourcePackagesToInstall ){
remotePath <- paste( "/tmp/segue-source-packages/", tail(strsplit(file,"/")[[1]], 1), sep="" )
fileName <- tail(strsplit(file,"/")[[1]], 1)
s3Path <- paste( "s3://", clusterObject$s3TempDir, "/", fileName, sep="" )
cat( paste( "hadoop fs -get ", s3Path, remotePath)
, file = outfile, sep = "\n" )
cat( "\n", file = outfile )
cat( "sudo R CMD INSTALL ", remotePath, "\n", file = outfile, sep = "" )
# copy each file to S3
uploadS3File( clusterObject$s3TempDir, file )
}
close( outfile )
# copy installSourcePackages.sh) to clusterObject$s3TempDir
uploadS3File( clusterObject$s3TempDir, paste( clusterObject$localTempDir, "/installSourcePackages.sh", sep="" ) )
# add a bootstrap action that runs bootStrapFiles.sh
scriptBootActionConfig <- new(com.amazonaws.services.elasticmapreduce.model.ScriptBootstrapActionConfig)
scriptBootActionConfig$setPath(paste("s3://", s3TempDir, "/installSourcePackages.sh", sep=""))
bootStrapConfig <- new( com.amazonaws.services.elasticmapreduce.model.BootstrapActionConfig)
with( bootStrapConfig, setScriptBootstrapAction(scriptBootActionConfig))
with( bootStrapConfig, setName("RinstallSourcePackages"))
bootStrapList$add(bootStrapConfig)
print("INFO: Source packages uploaded.")
}
if (is.null(clusterObject$instancesPerNode) == FALSE) { #sersiously... test this
scriptBootActionConfig <- new(com.amazonaws.services.elasticmapreduce.model.ScriptBootstrapActionConfig)
scriptBootActionConfig$setPath("s3://elasticmapreduce/bootstrap-actions/configure-hadoop")
argList <- new( java.util.ArrayList )
argList$add( "-s" )
argList$add( paste( "mapred.tasktracker.map.tasks.maximum=", clusterObject$instancesPerNode, sep="") )
argList$add( "-s" )
argList$add( paste( "mapred.tasktracker.reduce.tasks.maximum=", clusterObject$instancesPerNode, sep="") )
scriptBootActionConfig$setArgs( argList )
bootStrapConfig <- new( com.amazonaws.services.elasticmapreduce.model.BootstrapActionConfig)
with( bootStrapConfig, setScriptBootstrapAction(scriptBootActionConfig))
with( bootStrapConfig, setName("SetInstancePerNode"))
bootStrapList$add(bootStrapConfig)
}
## this adds the bootstrap to the request
request$setBootstrapActions(bootStrapList)
if ( is.null( clusterObject$ec2KeyName ) != TRUE ) {
conf$setEc2KeyName(clusterObject$ec2KeyName)
}
instanceGroups <- .jnew("java/util/Vector")
if (!is.null(clusterObject$masterBidPrice)) {
masterGroupConf <- new( com.amazonaws.services.elasticmapreduce.model.InstanceGroupConfig )
masterGroupConf$setInstanceCount(new(Integer, as.character(1)))
masterGroupConf$setInstanceRole(new(String, as.character("MASTER")))
masterGroupConf$setInstanceType(new(String, as.character(clusterObject$masterInstanceType)))
masterGroupConf$setMarket(new(String, as.character("SPOT")))
masterGroupConf$setBidPrice(new(String, as.character(clusterObject$masterBidPrice)))
instanceGroups$add(masterGroupConf)
}
if (!is.null(clusterObject$slaveBidPrice)) {
slaveGroupConf <- new( com.amazonaws.services.elasticmapreduce.model.InstanceGroupConfig )
slaveGroupConf$setInstanceCount(new(Integer, as.character(numInstances - 1)))
slaveGroupConf$setInstanceRole(new(String, as.character("CORE")))
slaveGroupConf$setInstanceType(new(String, as.character(clusterObject$slaveInstanceType)))
slaveGroupConf$setMarket(new(String, as.character("SPOT")))
slaveGroupConf$setBidPrice(new(String, as.character(clusterObject$slaveBidPrice)))
instanceGroups$add(slaveGroupConf)
}
if (!is.null(clusterObject$masterBidPrice) || !is.null(clusterObject$slaveBidPrice)) {
conf$setInstanceGroups(instanceGroups)
} else {
# Must configure instances either using instance count,
# master and slave instance type or instance groups but not both
conf$setInstanceCount(new(Integer, as.character(numInstances)))
conf$setMasterInstanceType( clusterObject$masterInstanceType )
conf$setSlaveInstanceType( clusterObject$slaveInstanceType )
}
conf$setKeepJobFlowAliveWhenNoSteps(new(Boolean, TRUE))
conf$setPlacement(new(com.amazonaws.services.elasticmapreduce.model.PlacementType, clusterObject$location))
conf$setHadoopVersion("0.20.205")
request$setInstances(conf)
request$setLogUri(paste("s3://", s3TempDir, "-logs", sep=""))
jobFlowName <- paste("RJob-", date(), sep="")
request$setName(jobFlowName)
request$setAmiVersion("2.0.4")
result <- service$runJobFlow(request)
## seems like this sleep should not be needed... but otherwise
## getJobFlowId() does not get the correct jobflowid
Sys.sleep(15)
jobFlowId <- result$getJobFlowId()
currentStatus <- checkStatus(jobFlowId)
while (currentStatus %in% c("COMPLETED", "FAILED", "TERMINATED", "WAITING", "CANCELLED") == FALSE) {
Sys.sleep(30)
currentStatus <- checkStatus(jobFlowId)
message(paste(currentStatus, " - ", Sys.time(), sep="" ))
}
if (currentStatus == "WAITING") {
message("Your Amazon EMR Hadoop Cluster is ready for action. \nRemember to terminate your cluster with stopCluster().\nAmazon is billing you!")
}
return(jobFlowId)
## TODO: need to catch situations where the cluster failed
}
##' Stops a running cluster
##'
##' Stops a running cluster and deletes temporary directories from EC2
##'
##' @return nothing
##' @author James "JD" Long
##' @param clusterObject a cluster object to stop
##' @export
stopCluster <- function(clusterObject){
jobFlowId <- clusterObject$jobFlowId
service <- new( com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClient, awsCreds )
request <- new( com.amazonaws.services.elasticmapreduce.model.TerminateJobFlowsRequest )
detailsList <- new( java.util.ArrayList )
detailsList$add(jobFlowId)
request$withJobFlowIds(detailsList)
service$terminateJobFlows(request)
## I have no idea why AWS needs sleep before
## I can delete the temp dirs, but these fail
## if I don't have the sleep
Sys.sleep(10)
try( deleteS3Bucket( clusterObject$s3TempDir ), silent=TRUE )
try( deleteS3Bucket( paste( clusterObject$s3TempDir, "-logs", sep="" ) ), silent=TRUE )
try( deleteS3Bucket( clusterObject$s3TempDirOut ), silent=TRUE )
## something weird is going on... I have to do this twice or it
## does not fully delete the s3TempDir's subdirectory
## will need to give this some attention later
Sys.sleep(15)
try( deleteS3Bucket( clusterObject$s3TempDir ), silent=TRUE )
try( deleteS3Bucket( paste( clusterObject$s3TempDir, "-logs", sep="" ) ), silent=TRUE )
try( deleteS3Bucket( clusterObject$s3TempDirOut ), silent=TRUE )
}
##' Submits a job to a running cluster
##'
##' After a cluster has been started this function submits jobs to that cluster.
##' If a job is submitted with enableDebugging=TRUE, all jobs submitted to that
##' cluster will also have debugging enabled. To turn debugging off, the cluster
##' must be stopped and restarted.
##'
##'
##' @param clusterObject a cluster object to submit to
##' @param stopClusterOnComplete set to true if you want the cluster to be shut down
##' after job completes
##' @param taskTimeout maximum time a single unit of work can run (in minutes)
##' @return Execution status of this job
##'
##' @export
submitJob <- function(clusterObject, stopClusterOnComplete=FALSE, taskTimeout=10){
jobFlowId <- clusterObject$jobFlowId
s3TempDir <- clusterObject$s3TempDir
s3TempDirOut <- clusterObject$s3TempDirOut
enableDebugging <- clusterObject$enableDebugging
try(deleteS3Bucket(s3TempDirOut), silent=TRUE)
unlink(clusterObject$localTempDirOut, recursive = TRUE)
dir.create(clusterObject$localTempDirOut)
jobFlowId <- clusterObject$jobFlowId
if (enableDebugging==TRUE){
service <- new( com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClient, awsCreds )
hadoopJarStep <- new(com.amazonaws.services.elasticmapreduce.model.HadoopJarStepConfig)
hadoopJarStep$setJar("s3://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar")
argList <- new( java.util.ArrayList )
argList$add( "s3://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch" )
hadoopJarStep$setArgs(argList)
stepName <- format(Sys.time(), "%Y-%m-%d_%H:%M:%OS5")
stepConfig <- new(com.amazonaws.services.elasticmapreduce.model.StepConfig, stepName, hadoopJarStep)
stepConfig$setActionOnFailure("CANCEL_AND_WAIT")
stepList <- new( java.util.ArrayList )
stepList$add( stepConfig )
request <- new( com.amazonaws.services.elasticmapreduce.model.AddJobFlowStepsRequest, jobFlowId, stepList)
requestResult <- service$addJobFlowSteps(request)
}
service <- new( com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClient, awsCreds )
hadoopJarStep <- new(com.amazonaws.services.elasticmapreduce.model.HadoopJarStepConfig)
hadoopJarStep$setJar("/home/hadoop/contrib/streaming/hadoop-streaming.jar")
argList <- new( java.util.ArrayList )
# the task timeout is passed to us in minutes, but AWS/EMR expects it in milliseconds
taskTimeoutMilliseconds <- taskTimeout * 60 * 1000
argList$add( "-D" )
argList$add( paste( "mapred.task.timeout=" , taskTimeoutMilliseconds , sep="" ) )
argList$add( "-cacheFile" )
argList$add( paste("s3n://", s3TempDir, "/emrData.RData#emrData.RData", sep=""))
argList$add( "-input" )
argList$add( paste("s3n://", s3TempDir, "/stream.txt", sep="") )
argList$add( "-output" )
argList$add( paste("s3n://", s3TempDirOut, "/results", sep="") )
argList$add( "-mapper" )
argList$add( "cat" )
argList$add( "-reducer" )
argList$add( paste("s3n://", s3TempDir, "/mapper.R", sep="" ) )
hadoopJarStep$setArgs(argList)
stepName <- format(Sys.time(), "%Y-%m-%d_%H:%M:%OS5")
stepConfig <- new(com.amazonaws.services.elasticmapreduce.model.StepConfig, stepName, hadoopJarStep)
stepConfig$setActionOnFailure("CANCEL_AND_WAIT")
stepList <- new( java.util.ArrayList )
stepList$add( stepConfig )
request <- new( com.amazonaws.services.elasticmapreduce.model.AddJobFlowStepsRequest, jobFlowId, stepList)
try(deleteS3Bucket(clusterObject$s3TempDirOut), silent=TRUE)
#submit to EMR happens here
service$addJobFlowSteps(request)
Sys.sleep(15)
checkLastStepStatus(jobFlowId)
Sys.sleep(15)
currentStatus <- checkStatus(jobFlowId)
while (currentStatus %in% c("COMPLETED", "FAILED", "TERMINATED", "WAITING", "CANCELLED") == FALSE) {
Sys.sleep(30)
currentStatus <- checkStatus(jobFlowId)
message(paste(currentStatus, " - ", Sys.time(), sep="" ))
}
if (stopClusterOnComplete==TRUE) {
stopCluster(clusterObject)
}
return(currentStatus)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/docx_add.R
\name{body_bookmark}
\alias{body_bookmark}
\title{add bookmark}
\usage{
body_bookmark(x, id)
}
\arguments{
\item{x}{an rdocx object}
\item{id}{bookmark name}
}
\description{
Add a bookmark at the cursor location.
}
\examples{
# cursor_bookmark ----
library(magrittr)
doc <- read_docx() \%>\%
body_add_par("centered text", style = "centered") \%>\%
body_bookmark("text_to_replace")
}
| /man/body_bookmark.Rd | no_license | plot-and-scatter/officer | R | false | true | 479 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/docx_add.R
\name{body_bookmark}
\alias{body_bookmark}
\title{add bookmark}
\usage{
body_bookmark(x, id)
}
\arguments{
\item{x}{an rdocx object}
\item{id}{bookmark name}
}
\description{
Add a bookmark at the cursor location.
}
\examples{
# cursor_bookmark ----
library(magrittr)
doc <- read_docx() \%>\%
body_add_par("centered text", style = "centered") \%>\%
body_bookmark("text_to_replace")
}
|
# parameter sanitization for arc operations.
arc.operations = function(x, from, to, op = NULL, check.cycles,
check.illegal, update = TRUE, debug = FALSE) {
available.ops = c("set", "drop", "reverse", "seted", "droped")
# check x's class.
check.bn(x)
# check the op code.
if (op %!in% available.ops)
stop("valid op codes are 'set', 'drop' and 'reverse'.")
# a valid node is needed.
check.nodes(nodes = from, graph = x, max.nodes = 1)
# another valid node is needed.
check.nodes(nodes = to, graph = x, max.nodes = 1)
# 'from' must be different from 'to'.
if (identical(from, to))
stop("'from' and 'to' must be different from each other.")
# check logical flags (debug, check.cycles, update).
check.logical(debug)
check.logical(check.cycles)
check.logical(update)
# add/reverse/orient the arc (or the edge).
if (op == "set") {
if (debug)
cat("* setting arc", from, "->", to, ".\n")
if (check.illegal && is.listed(x$learning$illegal, c(from, to)))
stop("arc ", from, " -> ", to,
" is not valid due to the parametric assumptions of the network.")
x$arcs = set.arc.direction(from, to, x$arcs, debug = debug)
}#THEN
else if (op == "drop") {
if (debug)
cat("* dropping any arc between ", from, "and", to, ".\n")
x$arcs = drop.arc.backend(x$arcs, c(from, to), debug = debug)
}#THEN
else if (op == "reverse") {
if (debug)
cat("* reversing any arc between ", from, "and", to, ".\n")
if (check.illegal && is.listed(x$learning$illegal, c(to, from)))
stop("arc ", to, " -> ", from,
" is not valid due to the parametric assumptions of the network.")
x$arcs = reverse.arc.backend(from, to, x$arcs, debug = debug)
}#THEN
else if (op == "seted") {
if (debug)
cat("* setting undirected arc", from, "-", to, ".\n")
if (check.illegal && is.listed(x$learning$illegal, c(to, from), either = TRUE))
stop("undirected arc ", to, " - ", from,
" is not valid due to the parametric assumptions of the network.")
x$arcs = set.edge.backend(from, to, x$arcs, debug = debug)
}#THEN
else if (op == "droped") {
if (debug)
cat("* dropping undirected arc", from, "-", to, ".\n")
x$arcs = drop.edge.backend(x$arcs, c(from, to), debug = debug)
}#THEN
# check whether the graph contains directed cycles; not needed if an arc
# is dropped.
if (check.cycles && (op != "drop"))
if (!is.acyclic(x$arcs, names(x$nodes), debug = debug, directed = TRUE))
stop("the resulting graph contains cycles.")
# update the network structure.
if (update) {
# build the adjacency matrix only once.
amat = arcs2amat(x$arcs, names(x$nodes))
# check which nodes have to be updated.
updated.nodes = unique(c(from, to, x$nodes[[from]]$mb, x$nodes[[to]]$mb))
# update the chosen nodes.
for (node in updated.nodes)
x$nodes[[node]] = cache.partial.structure(names(x$nodes),
target = node, amat = amat, debug = debug)
}#THEN
invisible(x)
}#ARC.OPERATIONS
| /R/arc.operations.R | no_license | MariusDanner/bnlearn | R | false | false | 3,063 | r |
# parameter sanitization for arc operations.
arc.operations = function(x, from, to, op = NULL, check.cycles,
check.illegal, update = TRUE, debug = FALSE) {
available.ops = c("set", "drop", "reverse", "seted", "droped")
# check x's class.
check.bn(x)
# check the op code.
if (op %!in% available.ops)
stop("valid op codes are 'set', 'drop' and 'reverse'.")
# a valid node is needed.
check.nodes(nodes = from, graph = x, max.nodes = 1)
# another valid node is needed.
check.nodes(nodes = to, graph = x, max.nodes = 1)
# 'from' must be different from 'to'.
if (identical(from, to))
stop("'from' and 'to' must be different from each other.")
# check logical flags (debug, check.cycles, update).
check.logical(debug)
check.logical(check.cycles)
check.logical(update)
# add/reverse/orient the arc (or the edge).
if (op == "set") {
if (debug)
cat("* setting arc", from, "->", to, ".\n")
if (check.illegal && is.listed(x$learning$illegal, c(from, to)))
stop("arc ", from, " -> ", to,
" is not valid due to the parametric assumptions of the network.")
x$arcs = set.arc.direction(from, to, x$arcs, debug = debug)
}#THEN
else if (op == "drop") {
if (debug)
cat("* dropping any arc between ", from, "and", to, ".\n")
x$arcs = drop.arc.backend(x$arcs, c(from, to), debug = debug)
}#THEN
else if (op == "reverse") {
if (debug)
cat("* reversing any arc between ", from, "and", to, ".\n")
if (check.illegal && is.listed(x$learning$illegal, c(to, from)))
stop("arc ", to, " -> ", from,
" is not valid due to the parametric assumptions of the network.")
x$arcs = reverse.arc.backend(from, to, x$arcs, debug = debug)
}#THEN
else if (op == "seted") {
if (debug)
cat("* setting undirected arc", from, "-", to, ".\n")
if (check.illegal && is.listed(x$learning$illegal, c(to, from), either = TRUE))
stop("undirected arc ", to, " - ", from,
" is not valid due to the parametric assumptions of the network.")
x$arcs = set.edge.backend(from, to, x$arcs, debug = debug)
}#THEN
else if (op == "droped") {
if (debug)
cat("* dropping undirected arc", from, "-", to, ".\n")
x$arcs = drop.edge.backend(x$arcs, c(from, to), debug = debug)
}#THEN
# check whether the graph contains directed cycles; not needed if an arc
# is dropped.
if (check.cycles && (op != "drop"))
if (!is.acyclic(x$arcs, names(x$nodes), debug = debug, directed = TRUE))
stop("the resulting graph contains cycles.")
# update the network structure.
if (update) {
# build the adjacency matrix only once.
amat = arcs2amat(x$arcs, names(x$nodes))
# check which nodes have to be updated.
updated.nodes = unique(c(from, to, x$nodes[[from]]$mb, x$nodes[[to]]$mb))
# update the chosen nodes.
for (node in updated.nodes)
x$nodes[[node]] = cache.partial.structure(names(x$nodes),
target = node, amat = amat, debug = debug)
}#THEN
invisible(x)
}#ARC.OPERATIONS
|
library(ggplot2)
library(odbc)
library(DBI)
library(rjson)
rm(list=ls())
con <- DBI::dbConnect(odbc::odbc(), "GLASSv3")
myDir1 <- "/projects/varnf/GLASS-III/GLASS-III/results/kallisto/kallisto/aliquot"
myinf1 <- paste(myDir1,dir(myDir1),sep="/")
myinf1 <- sapply(myinf1,function(x)paste(x,dir(x)[3],sep="/"),USE.NAMES=FALSE)
jsons <- lapply(myinf1,function(x)fromJSON(file=x))
names(jsons) <- dir(myDir1)
qc_table <- do.call(rbind,jsons)
qc_table <- as.data.frame(apply(qc_table,2,unlist))
qc_table[,1:7] <- apply(qc_table[,1:7],2,as.numeric)
range(qc_table[,"p_pseudoaligned"])
mean(qc_table[,"p_pseudoaligned"])
median(qc_table[,"p_pseudoaligned"])
qc_table[which(qc_table[,"p_pseudoaligned"] < 50),4:6]
qc_table <- qc_table[,1:7]
qc_table <- cbind(rownames(qc_table),qc_table)
rownames(qc_table) <- NULL
colnames(qc_table)[1] <- "aliquot_barcode"
dbWriteTable(con, Id(schema="analysis", table="kallisto_qc"), qc_table, overwrite=TRUE) | /R/expression/qc/kallisto_qc_table.r | permissive | Kcjohnson/SCGP | R | false | false | 945 | r | library(ggplot2)
library(odbc)
library(DBI)
library(rjson)
rm(list=ls())
con <- DBI::dbConnect(odbc::odbc(), "GLASSv3")
myDir1 <- "/projects/varnf/GLASS-III/GLASS-III/results/kallisto/kallisto/aliquot"
myinf1 <- paste(myDir1,dir(myDir1),sep="/")
myinf1 <- sapply(myinf1,function(x)paste(x,dir(x)[3],sep="/"),USE.NAMES=FALSE)
jsons <- lapply(myinf1,function(x)fromJSON(file=x))
names(jsons) <- dir(myDir1)
qc_table <- do.call(rbind,jsons)
qc_table <- as.data.frame(apply(qc_table,2,unlist))
qc_table[,1:7] <- apply(qc_table[,1:7],2,as.numeric)
range(qc_table[,"p_pseudoaligned"])
mean(qc_table[,"p_pseudoaligned"])
median(qc_table[,"p_pseudoaligned"])
qc_table[which(qc_table[,"p_pseudoaligned"] < 50),4:6]
qc_table <- qc_table[,1:7]
qc_table <- cbind(rownames(qc_table),qc_table)
rownames(qc_table) <- NULL
colnames(qc_table)[1] <- "aliquot_barcode"
dbWriteTable(con, Id(schema="analysis", table="kallisto_qc"), qc_table, overwrite=TRUE) |
# Load libraries ----------------------------------------------------------
library(Chaos01)
library(ggplot2)
library(viridis)
# Source R scripts --------------------------------------------------------
source("BB_chaos_function.R")
source("COG_chaos_function.R")
source("henon.R")
# Set parameter, uncomment desired set ------------------------------------
# Figure 9
a_all <- seq(1, 1.42, by = 0.0005) # set the range and step for parameter a
b <- 0.3 # set parameter b
# Set parameters which were the same for all the runs in the paper --------
length_res <- 10000 # how many iteration should be used for the computation, generated series is of length length_input + throw
throw <- 3000 # how many iteration should be discarded at the beginning of the series
length_c <- 100 # for how many parameters c should the computation be made
window_size <- 4000 # set window size for BB and COG
control_set_size <- length_res / 2 # set control set size for BB and COG
epsilon <- 4.5 # set threshold for COG
threshold <- 0.995 # set threshold for BB
c_all <- seq(pi/5, 4*pi/5, length.out = length_c)
length_a <- length(a_all)
res_mu_cog_x <- rep(NA, length_a)
res_mu_bb_x <- rep(NA, length_a)
res_mu_msd_x <- rep(NA, length_a)
res_mu_cog_y <- rep(NA, length_a)
res_mu_bb_y <- rep(NA, length_a)
res_mu_msd_y <- rep(NA, length_a)
# Computation -------------------------------------------------------------
for(j in 1:length_a)
{
a <- a_all[j]
x <- gen.henon(a, b, length_res + throw)
y <- tail(x[[2]], -throw)
x <- tail(x[[1]], -throw)
res_mu_cog_x[j] <- cog_chaos01(x, length_c, window_size, epsilon)
res_mu_bb_x[j] <- bb_chaos01(x, length_c, window_size, control_set_size, threshold)
res_mu_msd_x[j] <- testChaos01(x, c.gen = "equal")
res_mu_cog_y[j] <- cog_chaos01(x, length_c, window_size, epsilon)
res_mu_bb_y[j] <- bb_chaos01(x, length_c, window_size, control_set_size, threshold)
res_mu_msd_y[j] <- testChaos01(x, c.gen = "equal")
}
results <- data.frame(
a = rep(a_all,6),
res = c(res_mu_bb_x, res_mu_cog_x, res_mu_msd_x, res_mu_bb_y, res_mu_cog_y, res_mu_msd_y),
method = c(rep("BB", length_a),
rep("COG", length_a),
rep("MSD", length_a)),
variable = c(rep("x", length_a * 3), rep("y", length_a * 3))
)
results$method <- factor(results$method, c("MSD", "COG", "BB"))
# Visualization -----------------------------------------------------------
ggplot(results, aes(x = a, y = res, color = method)) +
geom_point() +
facet_grid(variable + method ~ .) +
scale_color_viridis(name = "Method", discrete = T) +
xlab("a") +
ylab("0-1 test for chaos") +
theme_minimal() +
theme(legend.position = "none")
# ggsave(filename = "results_henon.eps", width = 184, height = 104, units = "mm")
| /compute_henon_results.R | permissive | It4innovations/chaos01-ijcm | R | false | false | 2,776 | r | # Load libraries ----------------------------------------------------------
library(Chaos01)
library(ggplot2)
library(viridis)
# Source R scripts --------------------------------------------------------
source("BB_chaos_function.R")
source("COG_chaos_function.R")
source("henon.R")
# Set parameter, uncomment desired set ------------------------------------
# Figure 9
a_all <- seq(1, 1.42, by = 0.0005) # set the range and step for parameter a
b <- 0.3 # set parameter b
# Set parameters which were the same for all the runs in the paper --------
length_res <- 10000 # how many iteration should be used for the computation, generated series is of length length_input + throw
throw <- 3000 # how many iteration should be discarded at the beginning of the series
length_c <- 100 # for how many parameters c should the computation be made
window_size <- 4000 # set window size for BB and COG
control_set_size <- length_res / 2 # set control set size for BB and COG
epsilon <- 4.5 # set threshold for COG
threshold <- 0.995 # set threshold for BB
c_all <- seq(pi/5, 4*pi/5, length.out = length_c)
length_a <- length(a_all)
res_mu_cog_x <- rep(NA, length_a)
res_mu_bb_x <- rep(NA, length_a)
res_mu_msd_x <- rep(NA, length_a)
res_mu_cog_y <- rep(NA, length_a)
res_mu_bb_y <- rep(NA, length_a)
res_mu_msd_y <- rep(NA, length_a)
# Computation -------------------------------------------------------------
for(j in 1:length_a)
{
a <- a_all[j]
x <- gen.henon(a, b, length_res + throw)
y <- tail(x[[2]], -throw)
x <- tail(x[[1]], -throw)
res_mu_cog_x[j] <- cog_chaos01(x, length_c, window_size, epsilon)
res_mu_bb_x[j] <- bb_chaos01(x, length_c, window_size, control_set_size, threshold)
res_mu_msd_x[j] <- testChaos01(x, c.gen = "equal")
res_mu_cog_y[j] <- cog_chaos01(x, length_c, window_size, epsilon)
res_mu_bb_y[j] <- bb_chaos01(x, length_c, window_size, control_set_size, threshold)
res_mu_msd_y[j] <- testChaos01(x, c.gen = "equal")
}
results <- data.frame(
a = rep(a_all,6),
res = c(res_mu_bb_x, res_mu_cog_x, res_mu_msd_x, res_mu_bb_y, res_mu_cog_y, res_mu_msd_y),
method = c(rep("BB", length_a),
rep("COG", length_a),
rep("MSD", length_a)),
variable = c(rep("x", length_a * 3), rep("y", length_a * 3))
)
results$method <- factor(results$method, c("MSD", "COG", "BB"))
# Visualization -----------------------------------------------------------
ggplot(results, aes(x = a, y = res, color = method)) +
geom_point() +
facet_grid(variable + method ~ .) +
scale_color_viridis(name = "Method", discrete = T) +
xlab("a") +
ylab("0-1 test for chaos") +
theme_minimal() +
theme(legend.position = "none")
# ggsave(filename = "results_henon.eps", width = 184, height = 104, units = "mm")
|
###############################################################################
# #
# implement g-comp #
# #
###############################################################################
#"""""""""#
# loading #
#"""""""""#
suppressMessages(library(tidyverse))
suppressMessages(library(magrittr))
suppressMessages(library(readr))
# `Rscript code/g_computation.R bootstrap=TRUE B=1000 n=20000`
args = commandArgs(trailingOnly = TRUE)
if (length(args) > 0) {
for (arg in args) {
eval(parse(text = arg))
}
}
clean <- read_csv("data/cleaned_tamu.csv")[,-1]
clean %<>% mutate(Vaccination_A = as.factor((Vaccination_A >= 1)*1),
Sex_W5 = as.factor(Sex_W5),
College_W2 = as.factor(College_W2))
# if `type` not specified, just do continuous `Y`
if (!("type") %in% ls()) {
type <- "continuous"
family <- "gaussian"
}
if (type == "binary") {
clean %<>% mutate(Hospitalization_Y = as.factor((Hospitalization_Y >= 1)*1))
family <- "binomial"
} else {
type <- "continuous"
family <- "gaussian"
}
cat(paste0("G-computation for ", type, " response will be performed.\n"))
#"""""""""""""""""""""""""#
# g-computation estimator #
#"""""""""""""""""""""""""#
tamu_gcomp <- glm(Hospitalization_Y ~ .,
data = clean,
family = family)
treatment <- clean %>% mutate(Vaccination_A = as.factor(1))
control <- clean %>% mutate(Vaccination_A = as.factor(0))
treatment_predict <- predict(tamu_gcomp, newdata = treatment, type = "response")
control_predict <- predict(tamu_gcomp, newdata = control, type = "response")
Psi <- mean(treatment_predict - control_predict)
cat(paste0("The g-computation estimator is ", Psi, ".\n"))
#""""""""""""""""""""""""""#
# non-parametric bootstrap #
#""""""""""""""""""""""""""#
set.seed(252)
Psi_est <- function(data, n) {
boot_samp <- sample_n(data, n, replace = TRUE)
tamu_glm <- glm(Hospitalization_Y ~ .,
data = boot_samp,
family = family)
treatment <- boot_samp %>% mutate(Vaccination_A = as.factor(1))
control <- boot_samp %>% mutate(Vaccination_A = as.factor(0))
treatment_pred <- predict(tamu_glm, newdata = treatment, type = "response")
control_pred <- predict(tamu_glm, newdata = control, type = "response")
return(mean(treatment_pred - control_pred))
}
# if bootstrap not passed in command line, dont do the bootstrap
if (!("bootstrap") %in% ls()) bootstrap <- FALSE
if (bootstrap) {
# set default if B and n not passed in command line
if (!("B" %in% ls())) B <- 500
if (!("n" %in% ls())) n <- nrow(clean)
estimates <- replicate(B, Psi_est(clean, n))
write.csv(estimates,
paste0("data/g_comp_np_bootstrap_est_",
type,
".csv"),
row.names = FALSE)
cat(paste0("The non-parametric bootstrap estimate of the ",
"g-computation estimator is ",
mean(estimates),
".\n"))
cat(paste0("The non-parametric bootstrap estimate of the ",
"standard deviation of the g-computation estimator is ",
sd(estimates),
".\n"))
}
| /code/g_computation.R | no_license | erickim/PH252D_final_project | R | false | false | 3,362 | r | ###############################################################################
# #
# implement g-comp #
# #
###############################################################################
#"""""""""#
# loading #
#"""""""""#
suppressMessages(library(tidyverse))
suppressMessages(library(magrittr))
suppressMessages(library(readr))
# `Rscript code/g_computation.R bootstrap=TRUE B=1000 n=20000`
args = commandArgs(trailingOnly = TRUE)
if (length(args) > 0) {
for (arg in args) {
eval(parse(text = arg))
}
}
clean <- read_csv("data/cleaned_tamu.csv")[,-1]
clean %<>% mutate(Vaccination_A = as.factor((Vaccination_A >= 1)*1),
Sex_W5 = as.factor(Sex_W5),
College_W2 = as.factor(College_W2))
# if `type` not specified, just do continuous `Y`
if (!("type") %in% ls()) {
type <- "continuous"
family <- "gaussian"
}
if (type == "binary") {
clean %<>% mutate(Hospitalization_Y = as.factor((Hospitalization_Y >= 1)*1))
family <- "binomial"
} else {
type <- "continuous"
family <- "gaussian"
}
cat(paste0("G-computation for ", type, " response will be performed.\n"))
#"""""""""""""""""""""""""#
# g-computation estimator #
#"""""""""""""""""""""""""#
tamu_gcomp <- glm(Hospitalization_Y ~ .,
data = clean,
family = family)
treatment <- clean %>% mutate(Vaccination_A = as.factor(1))
control <- clean %>% mutate(Vaccination_A = as.factor(0))
treatment_predict <- predict(tamu_gcomp, newdata = treatment, type = "response")
control_predict <- predict(tamu_gcomp, newdata = control, type = "response")
Psi <- mean(treatment_predict - control_predict)
cat(paste0("The g-computation estimator is ", Psi, ".\n"))
#""""""""""""""""""""""""""#
# non-parametric bootstrap #
#""""""""""""""""""""""""""#
set.seed(252)
Psi_est <- function(data, n) {
boot_samp <- sample_n(data, n, replace = TRUE)
tamu_glm <- glm(Hospitalization_Y ~ .,
data = boot_samp,
family = family)
treatment <- boot_samp %>% mutate(Vaccination_A = as.factor(1))
control <- boot_samp %>% mutate(Vaccination_A = as.factor(0))
treatment_pred <- predict(tamu_glm, newdata = treatment, type = "response")
control_pred <- predict(tamu_glm, newdata = control, type = "response")
return(mean(treatment_pred - control_pred))
}
# if bootstrap not passed in command line, dont do the bootstrap
if (!("bootstrap") %in% ls()) bootstrap <- FALSE
if (bootstrap) {
# set default if B and n not passed in command line
if (!("B" %in% ls())) B <- 500
if (!("n" %in% ls())) n <- nrow(clean)
estimates <- replicate(B, Psi_est(clean, n))
write.csv(estimates,
paste0("data/g_comp_np_bootstrap_est_",
type,
".csv"),
row.names = FALSE)
cat(paste0("The non-parametric bootstrap estimate of the ",
"g-computation estimator is ",
mean(estimates),
".\n"))
cat(paste0("The non-parametric bootstrap estimate of the ",
"standard deviation of the g-computation estimator is ",
sd(estimates),
".\n"))
}
|
# Set up the cluster
ncpus <- Sys.getenv("PBS_NUM_PPN")
ncpus <- as.numeric(ncpus)-1
ncpus
ncpus <- 64
library(doParallel)
cl <- makeCluster(ncpus)
registerDoParallel(cl)
# Read the Iris data (subset of)
x <- iris[which(iris[,5] != "setosa"), c(1,5)]
# How many runs
trials <- 10000
# We're going to time this
ptime <- system.time({
# the icount() functions gives us an iterator, running from through to the argument
# the foreach function runs parallel jobs, the %dopar% is the operator
r <- foreach(icount(trials), .combine=cbind) %dopar% {
# sample the data
ind <- sample(100, 100, replace=TRUE)
# estimate a model, capture the results
result1 <- glm(x[ind,2]~x[ind,1], family=binomial(logit))
# return the results , which will be concatenated.
coefficients(result1)
}
})[3]
# print how long it took.
ptime
| /programs/day2/2-dopar.R | no_license | dwasser/computing4economists | R | false | false | 833 | r | # Set up the cluster
ncpus <- Sys.getenv("PBS_NUM_PPN")
ncpus <- as.numeric(ncpus)-1
ncpus
ncpus <- 64
library(doParallel)
cl <- makeCluster(ncpus)
registerDoParallel(cl)
# Read the Iris data (subset of)
x <- iris[which(iris[,5] != "setosa"), c(1,5)]
# How many runs
trials <- 10000
# We're going to time this
ptime <- system.time({
# the icount() functions gives us an iterator, running from through to the argument
# the foreach function runs parallel jobs, the %dopar% is the operator
r <- foreach(icount(trials), .combine=cbind) %dopar% {
# sample the data
ind <- sample(100, 100, replace=TRUE)
# estimate a model, capture the results
result1 <- glm(x[ind,2]~x[ind,1], family=binomial(logit))
# return the results , which will be concatenated.
coefficients(result1)
}
})[3]
# print how long it took.
ptime
|
ctmat <-
function(Gmat,y,tr){
#creates a matrix (tmat) with final information of each terminal node of the tree (each column of Gmat)
#tmat= I*7 matrix
#Gmat = nodeindicator matrix
#y = outcome variable, column vector
#tr = treatmentvariable with two values (1: T=1; 2: T=2)
## cardinalities t1, cardinalities t2, and mean and var y|t=1, mean and var y|t=2
#each row of tmat gives this information for each column of Gmat
#thus number of rows of pmat corresponds to number of columns of Gmat
rownum<-ncol(Gmat)
t2mat<-matrix(0,ncol=2,nrow=rownum)
t1mat<-matrix(0,ncol=2,nrow=rownum)
tmat<-matrix(0,ncol=2,nrow=rownum)
##first column displays mean of y
dat<-data.frame(cbind(y=y,tr=tr) )
##second column displays var of y
t1mat[,1]<-sapply(1:rownum,function(kk,Gmat,y,tr){ifelse(sum(Gmat[,kk]==1&tr==1)==0,NA,mean(y[Gmat[,kk]==1&tr==1]))},Gmat=Gmat,y=y,tr=tr)
t1mat[,2]<-sapply(1:rownum,function(kk,Gmat,y,tr){ifelse(sum(Gmat[,kk]==1&tr==1)==0,NA,sqrt(var(y[Gmat[,kk]==1&tr==1]))) },Gmat=Gmat,y=y,tr=tr)
t2mat[,1]<-sapply(1:rownum,function(kk,Gmat,y,tr){ifelse(sum(Gmat[,kk]==1&tr==2)==0,NA,mean(y[Gmat[,kk]==1&tr==2])) },Gmat=Gmat,y=y,tr=tr)
t2mat[,2]<-sapply(1:rownum,function(kk,Gmat,y,tr){ifelse(sum(Gmat[,kk]==1&tr==2)==0,NA,sqrt(var(y[Gmat[,kk]==1&tr==2]))) },Gmat=Gmat,y=y,tr=tr)
tmat<-cbind(apply(Gmat[tr==1,],2,sum),t1mat,apply(Gmat[tr==2,],2,sum),t2mat)
es<-sapply(1:rownum,function(kk,tmat){ifelse(is.na(sum(tmat[kk,c(2:3,5:6)])),NA,computeD(tmat[kk,1],tmat[kk,2],tmat[kk,3],tmat[kk,4],tmat[kk,5],tmat[kk,6])$dval) },tmat=tmat)
se<-sapply(1:rownum,function(kk,tmat){ifelse(is.na(sum(tmat[kk,c(2:3,5:6)])),NA,computeD(tmat[kk,1],tmat[kk,2],tmat[kk,3],tmat[kk,4],tmat[kk,5],tmat[kk,6])$se) },tmat=tmat)
#pval<-sapply(1:rownum,function(kk,tmat,Gmat,y,tr){ifelse(is.na(sum(tmat[kk,c(3:6)])),NA,t.test(y[Gmat[,kk]==1&tr==1],y[Gmat[,kk]==1&tr==0])$p.value) },tmat=tmat,Gmat=Gmat,y=y,tr=tr)
tmat<-cbind(tmat,es,se )
colnames(tmat)<-c("nt1","meant1","sdt1","nt2","meant2","sdt2","d","se")
return(as.matrix(tmat))}
| /quint/R/ctmat.R | no_license | ingted/R-Examples | R | false | false | 2,209 | r | ctmat <-
function(Gmat,y,tr){
#creates a matrix (tmat) with final information of each terminal node of the tree (each column of Gmat)
#tmat= I*7 matrix
#Gmat = nodeindicator matrix
#y = outcome variable, column vector
#tr = treatmentvariable with two values (1: T=1; 2: T=2)
## cardinalities t1, cardinalities t2, and mean and var y|t=1, mean and var y|t=2
#each row of tmat gives this information for each column of Gmat
#thus number of rows of pmat corresponds to number of columns of Gmat
rownum<-ncol(Gmat)
t2mat<-matrix(0,ncol=2,nrow=rownum)
t1mat<-matrix(0,ncol=2,nrow=rownum)
tmat<-matrix(0,ncol=2,nrow=rownum)
##first column displays mean of y
dat<-data.frame(cbind(y=y,tr=tr) )
##second column displays var of y
t1mat[,1]<-sapply(1:rownum,function(kk,Gmat,y,tr){ifelse(sum(Gmat[,kk]==1&tr==1)==0,NA,mean(y[Gmat[,kk]==1&tr==1]))},Gmat=Gmat,y=y,tr=tr)
t1mat[,2]<-sapply(1:rownum,function(kk,Gmat,y,tr){ifelse(sum(Gmat[,kk]==1&tr==1)==0,NA,sqrt(var(y[Gmat[,kk]==1&tr==1]))) },Gmat=Gmat,y=y,tr=tr)
t2mat[,1]<-sapply(1:rownum,function(kk,Gmat,y,tr){ifelse(sum(Gmat[,kk]==1&tr==2)==0,NA,mean(y[Gmat[,kk]==1&tr==2])) },Gmat=Gmat,y=y,tr=tr)
t2mat[,2]<-sapply(1:rownum,function(kk,Gmat,y,tr){ifelse(sum(Gmat[,kk]==1&tr==2)==0,NA,sqrt(var(y[Gmat[,kk]==1&tr==2]))) },Gmat=Gmat,y=y,tr=tr)
tmat<-cbind(apply(Gmat[tr==1,],2,sum),t1mat,apply(Gmat[tr==2,],2,sum),t2mat)
es<-sapply(1:rownum,function(kk,tmat){ifelse(is.na(sum(tmat[kk,c(2:3,5:6)])),NA,computeD(tmat[kk,1],tmat[kk,2],tmat[kk,3],tmat[kk,4],tmat[kk,5],tmat[kk,6])$dval) },tmat=tmat)
se<-sapply(1:rownum,function(kk,tmat){ifelse(is.na(sum(tmat[kk,c(2:3,5:6)])),NA,computeD(tmat[kk,1],tmat[kk,2],tmat[kk,3],tmat[kk,4],tmat[kk,5],tmat[kk,6])$se) },tmat=tmat)
#pval<-sapply(1:rownum,function(kk,tmat,Gmat,y,tr){ifelse(is.na(sum(tmat[kk,c(3:6)])),NA,t.test(y[Gmat[,kk]==1&tr==1],y[Gmat[,kk]==1&tr==0])$p.value) },tmat=tmat,Gmat=Gmat,y=y,tr=tr)
tmat<-cbind(tmat,es,se )
colnames(tmat)<-c("nt1","meant1","sdt1","nt2","meant2","sdt2","d","se")
return(as.matrix(tmat))}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_light_merged.R
\name{calc_light_merged}
\alias{calc_light_merged}
\title{Merge modeled and observed PAR into a single timeseries}
\usage{
calc_light_merged(PAR.obs = mm_data(solar.time, light), solar.time,
latitude, longitude, max.PAR = NA, max.gap = as.difftime(3, units =
"hours"), attach.units = is.unitted(PAR.obs))
}
\arguments{
\item{PAR.obs}{a 2-column data.frame with columns solar.time and light, as in
argument default, containing the full time series of observed light (should
be at a lower temporal resolution than \code{PAR.mod})}
\item{solar.time}{a vector of mean solar times for which the light should be
modeled and merged with the values in PAR.obs}
\item{latitude}{numeric value or vector indicating the site latitude in
decimal degrees (never radians or deg-min-sec, no matter what \code{format}
is) between -90 (South Pole) and 90 (North Pole).}
\item{longitude}{numeric, in degrees, either positive and unitted ("degE" or
"degW") or with sign indicating direction (positive = East), describing
location of the site}
\item{max.PAR}{the maximum PAR, as in calc_light. if NA, this function does
its best to guess a max.PAR that will make modeled light pretty similar to
cloud-free days of observed light}
\item{max.gap}{difftime or NA. If difftime, the maximum gap between a light
observation and a time point in solar.time, beyond which no value will be
given for light at that solar.time. If NA, all values will be modeled, even
if they are many days away from a light observation.}
\item{attach.units}{logical. Should the returned vector be a unitted object?}
}
\description{
Merge two time series (one observed, one modeled) of photosynthetically
active radiation (PAR) for a series of date-times. You can also think about
this as a way to smoothly interpolate a time series of observations.
}
\examples{
\dontrun{
library(dplyr)
library(ggplot2)
library(unitted)
timebounds <- as.POSIXct(c('2008-03-12 00:00', '2008-03-12 23:59'), tz='UTC')
coords <- list(lat=32.4, lon=-96.5)
PAR.obs <- data_frame(
solar.time=seq(timebounds[1], timebounds[2], by=as.difftime(3, units='hours')),
light=c(0, 0, 85.9, 1160.5, 1539.0, 933.9, 0, 0)
) \%>\% as.data.frame()
PAR.mod <- data_frame(
solar.time=seq(timebounds[1], timebounds[2], by=as.difftime(0.25, units='hours')),
light=calc_light(solar.time, latitude=coords$lat, longitude=coords$lon)
) \%>\% as.data.frame()
PAR.merged <- calc_light_merged(PAR.obs, PAR.mod$solar.time,
latitude=coords$lat, longitude=coords$lon, max.gap=as.difftime(20, units='hours'))
ggplot(bind_rows(mutate(v(PAR.obs), type='obs'), mutate(v(PAR.mod), type='mod'),
mutate(v(PAR.merged), type='merged')) \%>\%
mutate(type=ordered(type, levels=c('obs','mod','merged'))),
aes(x=solar.time, y=light, color=type)) + geom_line() + geom_point() + theme_bw()
}
}
| /man/calc_light_merged.Rd | permissive | waternk/streamMetabolizer | R | false | true | 2,933 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_light_merged.R
\name{calc_light_merged}
\alias{calc_light_merged}
\title{Merge modeled and observed PAR into a single timeseries}
\usage{
calc_light_merged(PAR.obs = mm_data(solar.time, light), solar.time,
latitude, longitude, max.PAR = NA, max.gap = as.difftime(3, units =
"hours"), attach.units = is.unitted(PAR.obs))
}
\arguments{
\item{PAR.obs}{a 2-column data.frame with columns solar.time and light, as in
argument default, containing the full time series of observed light (should
be at a lower temporal resolution than \code{PAR.mod})}
\item{solar.time}{a vector of mean solar times for which the light should be
modeled and merged with the values in PAR.obs}
\item{latitude}{numeric value or vector indicating the site latitude in
decimal degrees (never radians or deg-min-sec, no matter what \code{format}
is) between -90 (South Pole) and 90 (North Pole).}
\item{longitude}{numeric, in degrees, either positive and unitted ("degE" or
"degW") or with sign indicating direction (positive = East), describing
location of the site}
\item{max.PAR}{the maximum PAR, as in calc_light. if NA, this function does
its best to guess a max.PAR that will make modeled light pretty similar to
cloud-free days of observed light}
\item{max.gap}{difftime or NA. If difftime, the maximum gap between a light
observation and a time point in solar.time, beyond which no value will be
given for light at that solar.time. If NA, all values will be modeled, even
if they are many days away from a light observation.}
\item{attach.units}{logical. Should the returned vector be a unitted object?}
}
\description{
Merge two time series (one observed, one modeled) of photosynthetically
active radiation (PAR) for a series of date-times. You can also think about
this as a way to smoothly interpolate a time series of observations.
}
\examples{
\dontrun{
library(dplyr)
library(ggplot2)
library(unitted)
timebounds <- as.POSIXct(c('2008-03-12 00:00', '2008-03-12 23:59'), tz='UTC')
coords <- list(lat=32.4, lon=-96.5)
PAR.obs <- data_frame(
solar.time=seq(timebounds[1], timebounds[2], by=as.difftime(3, units='hours')),
light=c(0, 0, 85.9, 1160.5, 1539.0, 933.9, 0, 0)
) \%>\% as.data.frame()
PAR.mod <- data_frame(
solar.time=seq(timebounds[1], timebounds[2], by=as.difftime(0.25, units='hours')),
light=calc_light(solar.time, latitude=coords$lat, longitude=coords$lon)
) \%>\% as.data.frame()
PAR.merged <- calc_light_merged(PAR.obs, PAR.mod$solar.time,
latitude=coords$lat, longitude=coords$lon, max.gap=as.difftime(20, units='hours'))
ggplot(bind_rows(mutate(v(PAR.obs), type='obs'), mutate(v(PAR.mod), type='mod'),
mutate(v(PAR.merged), type='merged')) \%>\%
mutate(type=ordered(type, levels=c('obs','mod','merged'))),
aes(x=solar.time, y=light, color=type)) + geom_line() + geom_point() + theme_bw()
}
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/text_to_sparse_matrix.R
\name{textVectors}
\alias{textVectors}
\title{Tokenize a vector of text and convert to a sparse matrix. TODO: tf-idf is BROKEN, n-grams are broken, skip-grams are broken!}
\usage{
textVectors(x, normalize = FALSE, split_token = " ", verbose = FALSE,
freqCutoff = 0, absCutoff = 0, tfidf = FALSE, idf = NULL,
bagofwords = NULL, spellcheck = FALSE, remove_stopwords = FALSE,
stem = FALSE, ngrams = 1, skips = 0, stops = NULL, pca = FALSE,
pca_comp = 5, pca_rotation = NULL, tsne = FALSE, tsne_dims = 2,
tsne_perplexity = 30)
}
\arguments{
\item{x}{a character vector}
\item{normalize}{normalize the character vector by converting to lowercase, removing accents, and converting punctuation and spaces to single spaces and then trimming the string.}
\item{split_token}{token to use to split the text data. If NULL, text will not be tokenized and the bagofwords will be detected via regular expressions.}
\item{verbose}{whether to print a log while performing the operations}
\item{freqCutoff}{columns below this pct frequency will be removed from the final object}
\item{absCutoff}{columns below this absolute frequency will be removed from the final object}
\item{tfidf}{whether to apply tfidf weighting. NOTE THAT THIS WILL CREATE A DENSE MATRIX, WHICH IN MANY CASES IS BAD.}
\item{idf}{Pre-computed inverse document frequencies (perhaps from another, larger dataset)}
\item{bagofwords}{input bagofwords to use to construct the final matrix}
\item{spellcheck}{if TRUE tokens will be spellchecked before they are stemmed}
\item{remove_stopwords}{if TRUE, english stopwords will be removed from the tokens}
\item{stem}{if TRUE the tokens will be stemmed, after tokenizing and before creating a matrix}
\item{ngrams}{If great than 1, n-grams of this degree will be added to the word bag}
\item{skips}{If great than 0, skips of this degree will be added to the word bag}
\item{stops}{Optional list of stopwords, otherwise a default list will be used.}
\item{pca}{Apply PCA after transforming text to sparse matrix?}
\item{pca_comp}{Number of components to use for PCA}
\item{pca_rotation}{Rotation matrix to use for PCA. If NULL, will be computed by irlba.}
\item{tsne}{Apply the tsne transformation after the PCA rotation?}
\item{tsne_dims}{Dimension of the final TSNE embedding. Should be smaller than pca_comp.}
\item{tsne_perplexity}{Preplexity for the tsne transformation.}
}
\value{
a textVectors object
}
\description{
This code takes a vector of text, cleans it up, tokenizes it, spellchecks it,
removes stopwords, stems it, finds n-grams, crates a bag og words and
converts it to a sparse matrix using a bag of words model. Optinally
it also applies td-idf weighting to the matrix. This function can be slow.
Note that freqCutoff and absCutoff are relative to the number of documents
the term appears in, and ignore its frequency within documents.
}
\examples{
x <- c(
'i like this package written by zach mayer',
'this package is so much fun',
'thanks zach for writing it',
'this package is the best package',
'i want to give zach mayer a million dollars')
textVectors(
x,
absCutoff=1, ngrams=2, stem=TRUE, verbose=TRUE)
textVectors(
x,
absCutoff=1, ngrams=2, skips=1, stem=TRUE, verbose=TRUE, tfidf=TRUE)
}
\references{
\url{http://stackoverflow.com/questions/4942361/how-to-turn-a-list-of-lists-to-a-sparse-matrix-in-r-without-using-lapply}
\url{http://en.wikipedia.org/wiki/Tf-idf}
}
| /man/textVectors.Rd | permissive | WeaselMicu/r2vec | R | false | false | 3,556 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/text_to_sparse_matrix.R
\name{textVectors}
\alias{textVectors}
\title{Tokenize a vector of text and convert to a sparse matrix. TODO: tf-idf is BROKEN, n-grams are broken, skip-grams are broken!}
\usage{
textVectors(x, normalize = FALSE, split_token = " ", verbose = FALSE,
freqCutoff = 0, absCutoff = 0, tfidf = FALSE, idf = NULL,
bagofwords = NULL, spellcheck = FALSE, remove_stopwords = FALSE,
stem = FALSE, ngrams = 1, skips = 0, stops = NULL, pca = FALSE,
pca_comp = 5, pca_rotation = NULL, tsne = FALSE, tsne_dims = 2,
tsne_perplexity = 30)
}
\arguments{
\item{x}{a character vector}
\item{normalize}{normalize the character vector by converting to lowercase, removing accents, and converting punctuation and spaces to single spaces and then trimming the string.}
\item{split_token}{token to use to split the text data. If NULL, text will not be tokenized and the bagofwords will be detected via regular expressions.}
\item{verbose}{whether to print a log while performing the operations}
\item{freqCutoff}{columns below this pct frequency will be removed from the final object}
\item{absCutoff}{columns below this absolute frequency will be removed from the final object}
\item{tfidf}{whether to apply tfidf weighting. NOTE THAT THIS WILL CREATE A DENSE MATRIX, WHICH IN MANY CASES IS BAD.}
\item{idf}{Pre-computed inverse document frequencies (perhaps from another, larger dataset)}
\item{bagofwords}{input bagofwords to use to construct the final matrix}
\item{spellcheck}{if TRUE tokens will be spellchecked before they are stemmed}
\item{remove_stopwords}{if TRUE, english stopwords will be removed from the tokens}
\item{stem}{if TRUE the tokens will be stemmed, after tokenizing and before creating a matrix}
\item{ngrams}{If great than 1, n-grams of this degree will be added to the word bag}
\item{skips}{If great than 0, skips of this degree will be added to the word bag}
\item{stops}{Optional list of stopwords, otherwise a default list will be used.}
\item{pca}{Apply PCA after transforming text to sparse matrix?}
\item{pca_comp}{Number of components to use for PCA}
\item{pca_rotation}{Rotation matrix to use for PCA. If NULL, will be computed by irlba.}
\item{tsne}{Apply the tsne transformation after the PCA rotation?}
\item{tsne_dims}{Dimension of the final TSNE embedding. Should be smaller than pca_comp.}
\item{tsne_perplexity}{Preplexity for the tsne transformation.}
}
\value{
a textVectors object
}
\description{
This code takes a vector of text, cleans it up, tokenizes it, spellchecks it,
removes stopwords, stems it, finds n-grams, crates a bag og words and
converts it to a sparse matrix using a bag of words model. Optinally
it also applies td-idf weighting to the matrix. This function can be slow.
Note that freqCutoff and absCutoff are relative to the number of documents
the term appears in, and ignore its frequency within documents.
}
\examples{
x <- c(
'i like this package written by zach mayer',
'this package is so much fun',
'thanks zach for writing it',
'this package is the best package',
'i want to give zach mayer a million dollars')
textVectors(
x,
absCutoff=1, ngrams=2, stem=TRUE, verbose=TRUE)
textVectors(
x,
absCutoff=1, ngrams=2, skips=1, stem=TRUE, verbose=TRUE, tfidf=TRUE)
}
\references{
\url{http://stackoverflow.com/questions/4942361/how-to-turn-a-list-of-lists-to-a-sparse-matrix-in-r-without-using-lapply}
\url{http://en.wikipedia.org/wiki/Tf-idf}
}
|
## The objective of these functions is to create a special "matrix" object that
## can cache its inverse. The inverse of the special "matrix" is computed. If
## the matrix has not changed, then the inverse previously computed is retrieved
## from the cache.
## This function creates a special "matrix" that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(solve) inv <<- solve
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## This function computes the inverse of the special matrix returned by
## makeCacheMatrix. If the inverse has already been calculated (and the matrix
## remains unchanged), then this function retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data,...)
x$setinv(inv)
inv
}
| /cachematrix.R | no_license | thekeithbritt/ProgrammingAssignment2 | R | false | false | 1,165 | r | ## The objective of these functions is to create a special "matrix" object that
## can cache its inverse. The inverse of the special "matrix" is computed. If
## the matrix has not changed, then the inverse previously computed is retrieved
## from the cache.
## This function creates a special "matrix" that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(solve) inv <<- solve
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## This function computes the inverse of the special matrix returned by
## makeCacheMatrix. If the inverse has already been calculated (and the matrix
## remains unchanged), then this function retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data,...)
x$setinv(inv)
inv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adv_functions.R
\name{sec_adv_manager_sitemap}
\alias{sec_adv_manager_sitemap}
\title{Returns data frame of SEC ADV sitemap}
\usage{
sec_adv_manager_sitemap()
}
\description{
Returns data frame of SEC ADV sitemap
}
| /man/sec_adv_manager_sitemap.Rd | permissive | abresler/fundManageR | R | false | true | 293 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adv_functions.R
\name{sec_adv_manager_sitemap}
\alias{sec_adv_manager_sitemap}
\title{Returns data frame of SEC ADV sitemap}
\usage{
sec_adv_manager_sitemap()
}
\description{
Returns data frame of SEC ADV sitemap
}
|
library(readr)
library(ggplot2)
library(MASS)
setwd("C:/Users/Emily/Documents/AppliedQuantFin")
rm(list=ls())
repmat = function(X,m,n){
##R equivalent of repmat (matlab)
mx = dim(X)[1]
nx = dim(X)[2]
matrix(t(matrix(X,mx,nx*n)),mx*m,nx*n,byrow=T)
}
market <- read_csv("ps4market.csv")
market <- as.matrix(market[,2:3])
market <- market / 100
rf <- mean(market[,2])
market <- market[1:1069,]
vw_returns <- read_csv("ps4vw.csv")
vw_returns <- as.matrix(vw_returns[,2:31])
vw_returns <- vw_returns / 100
#1A
portfolio_means <- apply(vw_returns, 2, mean)
portfolio_sds <- apply(vw_returns, 2, sd)
portfolio_sharpes <- (portfolio_means - rf) / portfolio_sds
#1B
alphas <- 1 : 30
betas <- 1 : 30
for(i in 1:30) {
model <- lm(vw_returns[,i] - rf ~ market[,1])
alphas[i] <- coefficients(model)[1]
betas[i] <- coefficients(model)[2]
errors[,i] <- residuals(model)
}
#F-stat
T <- 1069 #num of observations
L <- 1 #num of factors
N <- 30 #num of portfolios
v_inv <- ginv(cov(vw_returns))
market_sharpe <- mean(market[,1]) / sd(market[,1])
test_stat <- (t(alphas) %*% v_inv %*% alphas) / (1+market_sharpe^2)
F_stat <- obs * (T - N - 1) * test_stat / (N * (T - 2))
p_val <- pf(F_stat, N, T-N-1, 0, lower.tail = FALSE)
#1E
past_returns <- read_csv("ps4past.csv")
past_returns <- as.matrix(past_returns[,2:11])
past_returns <- past_returns / 100
past_means <- apply(past_returns - rf, 2, mean)
past_sds <- apply(past_returns, 2, sd)
past_sharpes <- (past_means) / past_sds
past_alphas <- 1 : 10
past_betas <- 1 : 10
past_errors <- matrix(nrow = 1063, ncol = 10)
for(i in 1:10) {
model2 <- lm(past_returns[,i] - rf ~ market[7:1069,1])
past_alphas[i] <- coefficients(model2)[1]
past_betas[i] <- coefficients(model2)[2]
past_errors[,i] <- residuals(model2)
}
#F-stat
T <- 1063 #num of observations
L <- 1 #num of factors
N <- 10 #num of portfolios
v_inv <- ginv(cov(past_returns))
market_sharpe <- mean(market[,1]) / sd(market[,1])
test_stat <- (t(past_alphas) %*% v_inv %*% past_alphas) / (1+market_sharpe^2)
F_stat <- obs * (T - N - 1) * test_stat / (N * (T - 2))
p_val <- pf(F_stat, N, T-N-1, 0, lower.tail = FALSE)
#1F
T <- 1069 #num of observations
L <- 1 #num of factors
N <- 25 #num of portfolios
beme <- read_csv("ps4beme.csv")
beme <- as.matrix(beme[1:1069,2:26])
beme <- beme / 100
beme_means <- apply(beme, 2, mean)
beme_sds <- apply(beme, 2, sd)
beme_sharpes <- (beme_means - rf) / beme_sds
beme_alphas <- 1 : 25
beme_betas <- 1 : 25
beme_errors <- matrix(nrow = 1069, ncol = 25)
for(i in 1:25) {
model3 <- lm(beme[,i] - rf ~ market[,1])
beme_alphas[i] <- coefficients(model3)[1]
beme_betas[i] <- coefficients(model3)[2]
beme_errors[,i] <- residuals(model3)
}
##F-stat
T <- 1063 #num of observations
L <- 1 #num of factors
N <- 25 #num of portfolios
v_inv <- ginv(cov(beme))
market_sharpe <- mean(market[,1]) / sd(market[,1])
test_stat <- (t(beme_alphas) %*% v_inv %*% beme_alphas) / (1+market_sharpe^2)
F_stat <- obs * (T - N - 1) * test_stat / (N * (T - 2))
p_val <- pf(F_stat, N, T-N-1, 0, lower.tail = FALSE)
#1G
cov_beme <- cov(beme)
cov_beme_inv <- ginv(cov_beme)
one_v = rep(1, 25)
tan_weights <- (cov_beme_inv %*% (beme_means - rf)) / (as.vector(t(one_v) %*% cov_beme_inv %*% (beme_means - rf)))
tan_excess_return <- (beme - rf) %*% tan_weights
model_list <- lm((beme - rf) ~ tan_excess_return)
intercepts <- model_list$coefficients[1,]
tan_sharpe <- mean(tan_excess_return) / sd(tan_excess_return)
test_stat <- (t(intercepts) %*% cov_beme_inv %*% intercepts) / (1+tan_sharpe^2)
obs <- 1073
N <- 25
F_stat <- obs * (obs - N - 1) * test_stat / (N * (obs - 2))
p_val <- pf(F_stat, N, obs-N-1, 0, lower.tail = FALSE)
#1H
beme <- read_csv("ps4beme.csv")
beme <- beme / 100
even_years <- subset(beme, as.integer(beme$Size) %% 2 == 0)
odd_years <- subset(beme, as.integer(beme$Size) %% 2 == 1)
even_training <- subset(even_years, as.integer(even_years$Size * 100) %% 2 == 1)
even_test <- subset(even_years, as.integer(even_years$Size * 100) %% 2 == 0)
odd_training <- subset(odd_years, as.integer(odd_years$Size * 100) %% 2 == 0)
odd_test <- subset(odd_years, as.integer(odd_years$Size * 100) %% 2 == 1)
training_df <- rbind(even_training, odd_training)
test_df <- rbind(even_test, odd_test)
vcov_mat4 = cov(training_df[2:26])
v_inv4 = ginv(vcov_mat4)
training_ret = as.matrix(training_df[,2:26])
avg_train_rtn <- apply(training_df[,2:26], 2, mean, na.rm=TRUE)
avg_train_rtn2 <- avg_train_rtn - rf
tan_weights2 <- (v_inv4 %*% avg_train_rtn2) / (as.vector(t(one_v) %*% v_inv4 %*% avg_train_rtn2))
vcov_mat5 = cov(test_df[2:26])
v_inv5 = ginv(vcov_mat5)
test_ret <- as.matrix(test_df[,2:26])
avg_test_rtn <- apply(test_df[,2:26], 2, mean)
avg_test_rtn2 <- avg_test_rtn - rf
tan_weights3 <- (v_inv5 %*% avg_test_rtn2) / (as.vector(t(one_v) %*% v_inv5 %*% avg_test_rtn2))
first_half_of_returns <- test_ret %*% tan_weights2
second_half <- training_ret %*% tan_weights3
beme_excess <- beme - rf
total_ret <- as.numeric()
for (i in 1:1073){
if (as.integer(beme$Size) %% 2 == 0 && as.integer(beme$Size * 100) %% 2 == 1){
holder <- beme_excess[i,] %*% tan_weights3
total_ret <- c(total_ret, holder)
}
else if (as.integer(beme$Size) %% 2 == 1 && as.integer((beme$Size * 100) %% 2 == 0)){
holder <- beme_excess[i,] %*% tan_weights3
total_ret <- c(total_ret, holder)
}
else{
holder <- beme_excess[i,] %*% tan_weights2
total_ret <- c(total_ret, holder)
}
}
tan_sharpe2 <- mean(total_ret) / sd(total_ret)
model_list5 <- lm(excess_returns_arr3 ~ total_ret)
intercepts5 <- model_list5$coefficients[1,]
test_stat5 <- (t(intercepts5) %*% v_inv3 %*% intercepts5) / (1+tan_sharpe2^2)
obs <- 1073
N <- 25
F_stat5 <- obs * (obs - N - 1) * test_stat5 / (N * (obs - 2))
p_val5 <- pf(F_stat5, N, obs-N-1, 0, lower.tail = FALSE)
#Question 3i
one_v <- rep(1, 30)
v_inv <- ginv(cov(vw_returns))
vw_excess <- vw_returns - rf
industry_tan <- (v_inv %*% (portfolio_means - rf)) / (as.vector(t(one_v) %*% v_inv %*% (portfolio_means - rf)))
ind_tan_returns <- vw_excess %*% industry_tan
i_alphas <- 1 : 25
i_betas <- 1 : 25
beme <- read_csv("ps4beme.csv")
beme <- beme / 100
beme <- beme[1:1069,2:26]
for(i in 1:25) {
model6 <- lm(beme[1:1069,i] - rf ~ ind_tan_returns)
i_alphas[i] <- coefficients(model6)[1]
i_betas[i] <- coefficients(model6)[2]
}
tan_sharpe3 <- mean(ind_tan_returns) / sd(ind_tan_returns)
test_stat6 <- (t(i_alphas) %*% cov_beme_inv %*% i_alphas) / (1+tan_sharpe3^2)
obs <- 1069
N <- 25
F_stat6 <- obs * (obs - N - 1) * test_stat6 / (N * (obs - 2))
p_val6 <- pf(F_stat6, N, obs-N-1, 0, lower.tail = FALSE)
#Question 3j
one_v <- rep(1, 10)
v_inv2 <- ginv(cov(past_returns))
pastperf_tan <- (v_inv2 %*% (past_means)) / (as.vector(t(one_v) %*% v_inv2 %*% (past_means)))
pp_excess <- past_returns - rf
pp_tan_returns <- pp_excess %*% pastperf_tan
j_alphas <- 1 : 25
j_betas <- 1 : 25
for(i in 1:25) {
model7 <- lm(beme[1:1063,i] - rf ~ ind_tan_returns[1:1063])
j_alphas[i] <- coefficients(model7)[1]
j_betas[i] <- coefficients(model7)[2]
}
tan_sharpe_j <- mean(pp_tan_returns) / sd(pp_tan_returns)
test_stat7 <- (t(j_alphas) %*% cov_beme_inv %*% j_alphas) / (1+tan_sharpe_j^2)
obs <- 1063
N <- 25
F_stat7 <- obs * (obs - N - 1) * test_stat7 / (N * (obs - 2))
p_val7 <- pf(F_stat7, N, obs-N-1, 0, lower.tail = FALSE)
#3K
returns_df <- data.frame(tan_ind = ind_tan_returns[7:1069], tan_pp = pp_tan_returns, tan_beme = tan_excess_return[7:1069])
cov_returns <- cov(returns_df)
| /Problem Sets/PS4eeb.R | no_license | alexgarland/Applied_Quant_Finance | R | false | false | 7,483 | r | library(readr)
library(ggplot2)
library(MASS)
setwd("C:/Users/Emily/Documents/AppliedQuantFin")
rm(list=ls())
repmat = function(X,m,n){
##R equivalent of repmat (matlab)
mx = dim(X)[1]
nx = dim(X)[2]
matrix(t(matrix(X,mx,nx*n)),mx*m,nx*n,byrow=T)
}
market <- read_csv("ps4market.csv")
market <- as.matrix(market[,2:3])
market <- market / 100
rf <- mean(market[,2])
market <- market[1:1069,]
vw_returns <- read_csv("ps4vw.csv")
vw_returns <- as.matrix(vw_returns[,2:31])
vw_returns <- vw_returns / 100
#1A
portfolio_means <- apply(vw_returns, 2, mean)
portfolio_sds <- apply(vw_returns, 2, sd)
portfolio_sharpes <- (portfolio_means - rf) / portfolio_sds
#1B
alphas <- 1 : 30
betas <- 1 : 30
for(i in 1:30) {
model <- lm(vw_returns[,i] - rf ~ market[,1])
alphas[i] <- coefficients(model)[1]
betas[i] <- coefficients(model)[2]
errors[,i] <- residuals(model)
}
#F-stat
T <- 1069 #num of observations
L <- 1 #num of factors
N <- 30 #num of portfolios
v_inv <- ginv(cov(vw_returns))
market_sharpe <- mean(market[,1]) / sd(market[,1])
test_stat <- (t(alphas) %*% v_inv %*% alphas) / (1+market_sharpe^2)
F_stat <- obs * (T - N - 1) * test_stat / (N * (T - 2))
p_val <- pf(F_stat, N, T-N-1, 0, lower.tail = FALSE)
#1E
past_returns <- read_csv("ps4past.csv")
past_returns <- as.matrix(past_returns[,2:11])
past_returns <- past_returns / 100
past_means <- apply(past_returns - rf, 2, mean)
past_sds <- apply(past_returns, 2, sd)
past_sharpes <- (past_means) / past_sds
past_alphas <- 1 : 10
past_betas <- 1 : 10
past_errors <- matrix(nrow = 1063, ncol = 10)
for(i in 1:10) {
model2 <- lm(past_returns[,i] - rf ~ market[7:1069,1])
past_alphas[i] <- coefficients(model2)[1]
past_betas[i] <- coefficients(model2)[2]
past_errors[,i] <- residuals(model2)
}
#F-stat
T <- 1063 #num of observations
L <- 1 #num of factors
N <- 10 #num of portfolios
v_inv <- ginv(cov(past_returns))
market_sharpe <- mean(market[,1]) / sd(market[,1])
test_stat <- (t(past_alphas) %*% v_inv %*% past_alphas) / (1+market_sharpe^2)
F_stat <- obs * (T - N - 1) * test_stat / (N * (T - 2))
p_val <- pf(F_stat, N, T-N-1, 0, lower.tail = FALSE)
#1F
T <- 1069 #num of observations
L <- 1 #num of factors
N <- 25 #num of portfolios
beme <- read_csv("ps4beme.csv")
beme <- as.matrix(beme[1:1069,2:26])
beme <- beme / 100
beme_means <- apply(beme, 2, mean)
beme_sds <- apply(beme, 2, sd)
beme_sharpes <- (beme_means - rf) / beme_sds
beme_alphas <- 1 : 25
beme_betas <- 1 : 25
beme_errors <- matrix(nrow = 1069, ncol = 25)
for(i in 1:25) {
model3 <- lm(beme[,i] - rf ~ market[,1])
beme_alphas[i] <- coefficients(model3)[1]
beme_betas[i] <- coefficients(model3)[2]
beme_errors[,i] <- residuals(model3)
}
##F-stat
T <- 1063 #num of observations
L <- 1 #num of factors
N <- 25 #num of portfolios
v_inv <- ginv(cov(beme))
market_sharpe <- mean(market[,1]) / sd(market[,1])
test_stat <- (t(beme_alphas) %*% v_inv %*% beme_alphas) / (1+market_sharpe^2)
F_stat <- obs * (T - N - 1) * test_stat / (N * (T - 2))
p_val <- pf(F_stat, N, T-N-1, 0, lower.tail = FALSE)
#1G
cov_beme <- cov(beme)
cov_beme_inv <- ginv(cov_beme)
one_v = rep(1, 25)
tan_weights <- (cov_beme_inv %*% (beme_means - rf)) / (as.vector(t(one_v) %*% cov_beme_inv %*% (beme_means - rf)))
tan_excess_return <- (beme - rf) %*% tan_weights
model_list <- lm((beme - rf) ~ tan_excess_return)
intercepts <- model_list$coefficients[1,]
tan_sharpe <- mean(tan_excess_return) / sd(tan_excess_return)
test_stat <- (t(intercepts) %*% cov_beme_inv %*% intercepts) / (1+tan_sharpe^2)
obs <- 1073
N <- 25
F_stat <- obs * (obs - N - 1) * test_stat / (N * (obs - 2))
p_val <- pf(F_stat, N, obs-N-1, 0, lower.tail = FALSE)
#1H
beme <- read_csv("ps4beme.csv")
beme <- beme / 100
even_years <- subset(beme, as.integer(beme$Size) %% 2 == 0)
odd_years <- subset(beme, as.integer(beme$Size) %% 2 == 1)
even_training <- subset(even_years, as.integer(even_years$Size * 100) %% 2 == 1)
even_test <- subset(even_years, as.integer(even_years$Size * 100) %% 2 == 0)
odd_training <- subset(odd_years, as.integer(odd_years$Size * 100) %% 2 == 0)
odd_test <- subset(odd_years, as.integer(odd_years$Size * 100) %% 2 == 1)
training_df <- rbind(even_training, odd_training)
test_df <- rbind(even_test, odd_test)
vcov_mat4 = cov(training_df[2:26])
v_inv4 = ginv(vcov_mat4)
training_ret = as.matrix(training_df[,2:26])
avg_train_rtn <- apply(training_df[,2:26], 2, mean, na.rm=TRUE)
avg_train_rtn2 <- avg_train_rtn - rf
tan_weights2 <- (v_inv4 %*% avg_train_rtn2) / (as.vector(t(one_v) %*% v_inv4 %*% avg_train_rtn2))
vcov_mat5 = cov(test_df[2:26])
v_inv5 = ginv(vcov_mat5)
test_ret <- as.matrix(test_df[,2:26])
avg_test_rtn <- apply(test_df[,2:26], 2, mean)
avg_test_rtn2 <- avg_test_rtn - rf
tan_weights3 <- (v_inv5 %*% avg_test_rtn2) / (as.vector(t(one_v) %*% v_inv5 %*% avg_test_rtn2))
first_half_of_returns <- test_ret %*% tan_weights2
second_half <- training_ret %*% tan_weights3
beme_excess <- beme - rf
total_ret <- as.numeric()
for (i in 1:1073){
if (as.integer(beme$Size) %% 2 == 0 && as.integer(beme$Size * 100) %% 2 == 1){
holder <- beme_excess[i,] %*% tan_weights3
total_ret <- c(total_ret, holder)
}
else if (as.integer(beme$Size) %% 2 == 1 && as.integer((beme$Size * 100) %% 2 == 0)){
holder <- beme_excess[i,] %*% tan_weights3
total_ret <- c(total_ret, holder)
}
else{
holder <- beme_excess[i,] %*% tan_weights2
total_ret <- c(total_ret, holder)
}
}
tan_sharpe2 <- mean(total_ret) / sd(total_ret)
model_list5 <- lm(excess_returns_arr3 ~ total_ret)
intercepts5 <- model_list5$coefficients[1,]
test_stat5 <- (t(intercepts5) %*% v_inv3 %*% intercepts5) / (1+tan_sharpe2^2)
obs <- 1073
N <- 25
F_stat5 <- obs * (obs - N - 1) * test_stat5 / (N * (obs - 2))
p_val5 <- pf(F_stat5, N, obs-N-1, 0, lower.tail = FALSE)
#Question 3i
one_v <- rep(1, 30)
v_inv <- ginv(cov(vw_returns))
vw_excess <- vw_returns - rf
industry_tan <- (v_inv %*% (portfolio_means - rf)) / (as.vector(t(one_v) %*% v_inv %*% (portfolio_means - rf)))
ind_tan_returns <- vw_excess %*% industry_tan
i_alphas <- 1 : 25
i_betas <- 1 : 25
beme <- read_csv("ps4beme.csv")
beme <- beme / 100
beme <- beme[1:1069,2:26]
for(i in 1:25) {
model6 <- lm(beme[1:1069,i] - rf ~ ind_tan_returns)
i_alphas[i] <- coefficients(model6)[1]
i_betas[i] <- coefficients(model6)[2]
}
tan_sharpe3 <- mean(ind_tan_returns) / sd(ind_tan_returns)
test_stat6 <- (t(i_alphas) %*% cov_beme_inv %*% i_alphas) / (1+tan_sharpe3^2)
obs <- 1069
N <- 25
F_stat6 <- obs * (obs - N - 1) * test_stat6 / (N * (obs - 2))
p_val6 <- pf(F_stat6, N, obs-N-1, 0, lower.tail = FALSE)
#Question 3j
one_v <- rep(1, 10)
v_inv2 <- ginv(cov(past_returns))
pastperf_tan <- (v_inv2 %*% (past_means)) / (as.vector(t(one_v) %*% v_inv2 %*% (past_means)))
pp_excess <- past_returns - rf
pp_tan_returns <- pp_excess %*% pastperf_tan
j_alphas <- 1 : 25
j_betas <- 1 : 25
for(i in 1:25) {
model7 <- lm(beme[1:1063,i] - rf ~ ind_tan_returns[1:1063])
j_alphas[i] <- coefficients(model7)[1]
j_betas[i] <- coefficients(model7)[2]
}
tan_sharpe_j <- mean(pp_tan_returns) / sd(pp_tan_returns)
test_stat7 <- (t(j_alphas) %*% cov_beme_inv %*% j_alphas) / (1+tan_sharpe_j^2)
obs <- 1063
N <- 25
F_stat7 <- obs * (obs - N - 1) * test_stat7 / (N * (obs - 2))
p_val7 <- pf(F_stat7, N, obs-N-1, 0, lower.tail = FALSE)
#3K
returns_df <- data.frame(tan_ind = ind_tan_returns[7:1069], tan_pp = pp_tan_returns, tan_beme = tan_excess_return[7:1069])
cov_returns <- cov(returns_df)
|
\name{lagSelect}
\alias{lagSelect}
\title{Select the optimal number of lags, given criteria}
\description{Determine the optimal number of lags for dynamic regression
}
\usage{
lagSelect(y, maxp, ic)
}
\arguments{
\item{ y }{A univariate time series data}
\item{ maxp }{the max number of lags}
\item{ ic }{Information criteria, either "AIC" or "BIC"}
}
\details{
Information criteria "AIC" and "BIC" use the R built-in functions.
}
\value{
It returns an integer, indicating the optimal lags
}
\author{Ho Tsungwu <tsungwu@cc.shu.edu.tw>
}
\examples{
#library(pdR)
#data(inf19)
#y<-inf19[,1]
#lagSelect(y,maxp=25,ic="BIC")
}
| /man/lagSelect.Rd | no_license | tsungwu/pdR | R | false | false | 632 | rd | \name{lagSelect}
\alias{lagSelect}
\title{Select the optimal number of lags, given criteria}
\description{Determine the optimal number of lags for dynamic regression
}
\usage{
lagSelect(y, maxp, ic)
}
\arguments{
\item{ y }{A univariate time series data}
\item{ maxp }{the max number of lags}
\item{ ic }{Information criteria, either "AIC" or "BIC"}
}
\details{
Information criteria "AIC" and "BIC" use the R built-in functions.
}
\value{
It returns an integer, indicating the optimal lags
}
\author{Ho Tsungwu <tsungwu@cc.shu.edu.tw>
}
\examples{
#library(pdR)
#data(inf19)
#y<-inf19[,1]
#lagSelect(y,maxp=25,ic="BIC")
}
|
## Matrix inversion can be a costly computation. These functions will cache the inverse of matrix
## And use it when it is already available in the cache, or compute it when it's not available
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y){
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then the cachesolve
## should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data,...)
x$setinverse(i)
i
}
| /cachematrix.R | no_license | pbudidharma/ProgrammingAssignment2 | R | false | false | 1,229 | r | ## Matrix inversion can be a costly computation. These functions will cache the inverse of matrix
## And use it when it is already available in the cache, or compute it when it's not available
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y){
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then the cachesolve
## should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data,...)
x$setinverse(i)
i
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.model.dt.tree.R
\name{xgb.model.dt.tree}
\alias{xgb.model.dt.tree}
\title{Parse a boosted tree model text dump}
\usage{
xgb.model.dt.tree(feature_names = NULL, model = NULL, text = NULL,
trees = NULL, ...)
}
\arguments{
\item{feature_names}{character vector of feature names. If the model already
contains feature names, those would be used when \code{feature_names=NULL} (default value).
Non-null \code{feature_names} could be provided to override those in the model.}
\item{model}{object of class \code{xgb.Booster}}
\item{text}{\code{character} vector previously generated by the \code{xgb.dump}
function (where parameter \code{with_stats = TRUE} should have been set).
\code{text} takes precedence over \code{model}.}
\item{trees}{an integer vector of tree indices that should be parsed.
If set to \code{NULL}, all trees of the model are parsed.
It could be useful, e.g., in multiclass classification to get only
the trees of one certain class. IMPORTANT: the tree index in xgboost models
is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).}
\item{...}{currently not used.}
}
\value{
A \code{data.table} with detailed information about model trees' nodes.
The columns of the \code{data.table} are:
\itemize{
\item \code{Tree}: ID of a tree in a model (integer)
\item \code{Node}: integer ID of a node in a tree (integer)
\item \code{ID}: identifier of a node in a model (character)
\item \code{Feature}: for a branch node, it's a feature id or name (when available);
for a leaf note, it simply labels it as \code{'Leaf'}
\item \code{Split}: location of the split for a branch node (split condition is always "less than")
\item \code{Yes}: ID of the next node when the split condition is met
\item \code{No}: ID of the next node when the split condition is not met
\item \code{Missing}: ID of the next node when branch value is missing
\item \code{Quality}: either the split gain (change in loss) or the leaf value
\item \code{Cover}: metric related to the number of observation either seen by a split
or collected by a leaf during training.
}
}
\description{
Parse a boosted tree model text dump into a \code{data.table} structure.
}
\examples{
# Basic use:
data(agaricus.train, package='xgboost')
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
(dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst))
# This bst has feature_names stored in it, so those would be used when
# the feature_names parameter is not provided:
(dt <- xgb.model.dt.tree(model = bst))
# How to match feature names of splits that are following a current 'Yes' branch:
merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)]
}
| /R-package/man/xgb.model.dt.tree.Rd | permissive | opentable/xgboost | R | false | true | 2,925 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.model.dt.tree.R
\name{xgb.model.dt.tree}
\alias{xgb.model.dt.tree}
\title{Parse a boosted tree model text dump}
\usage{
xgb.model.dt.tree(feature_names = NULL, model = NULL, text = NULL,
trees = NULL, ...)
}
\arguments{
\item{feature_names}{character vector of feature names. If the model already
contains feature names, those would be used when \code{feature_names=NULL} (default value).
Non-null \code{feature_names} could be provided to override those in the model.}
\item{model}{object of class \code{xgb.Booster}}
\item{text}{\code{character} vector previously generated by the \code{xgb.dump}
function (where parameter \code{with_stats = TRUE} should have been set).
\code{text} takes precedence over \code{model}.}
\item{trees}{an integer vector of tree indices that should be parsed.
If set to \code{NULL}, all trees of the model are parsed.
It could be useful, e.g., in multiclass classification to get only
the trees of one certain class. IMPORTANT: the tree index in xgboost models
is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).}
\item{...}{currently not used.}
}
\value{
A \code{data.table} with detailed information about model trees' nodes.
The columns of the \code{data.table} are:
\itemize{
\item \code{Tree}: ID of a tree in a model (integer)
\item \code{Node}: integer ID of a node in a tree (integer)
\item \code{ID}: identifier of a node in a model (character)
\item \code{Feature}: for a branch node, it's a feature id or name (when available);
for a leaf note, it simply labels it as \code{'Leaf'}
\item \code{Split}: location of the split for a branch node (split condition is always "less than")
\item \code{Yes}: ID of the next node when the split condition is met
\item \code{No}: ID of the next node when the split condition is not met
\item \code{Missing}: ID of the next node when branch value is missing
\item \code{Quality}: either the split gain (change in loss) or the leaf value
\item \code{Cover}: metric related to the number of observation either seen by a split
or collected by a leaf during training.
}
}
\description{
Parse a boosted tree model text dump into a \code{data.table} structure.
}
\examples{
# Basic use:
data(agaricus.train, package='xgboost')
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
(dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst))
# This bst has feature_names stored in it, so those would be used when
# the feature_names parameter is not provided:
(dt <- xgb.model.dt.tree(model = bst))
# How to match feature names of splits that are following a current 'Yes' branch:
merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)]
}
|
#############################################################################################################
# Author :
# Jerome Mariette, MIAT, Universite de Toulouse, INRA 31326 Castanet-Tolosan France
# Nathalie Villa-Vialaneix, MIAT, Universite de Toulouse, INRA 31326 Castanet-Tolosan France
#
# Copyright (C) 2017
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#############################################################################################################
.onAttach <- function(libname, pkgname){ packageStartupMessage("\nmixKernel will soon be included in the mixOmics package",
"\n\nVisit http://www.mixOmics.org for a tutorial to use our method.",
"\nAny bug reports or comments? Notify us at jerome.mariette@inra.fr or https://bitbucket.org/klecao/package-mixomics/issues"
)}
| /R/zzz.R | no_license | mtremblayfr/mixKernel | R | false | false | 1,487 | r | #############################################################################################################
# Author :
# Jerome Mariette, MIAT, Universite de Toulouse, INRA 31326 Castanet-Tolosan France
# Nathalie Villa-Vialaneix, MIAT, Universite de Toulouse, INRA 31326 Castanet-Tolosan France
#
# Copyright (C) 2017
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#############################################################################################################
.onAttach <- function(libname, pkgname){ packageStartupMessage("\nmixKernel will soon be included in the mixOmics package",
"\n\nVisit http://www.mixOmics.org for a tutorial to use our method.",
"\nAny bug reports or comments? Notify us at jerome.mariette@inra.fr or https://bitbucket.org/klecao/package-mixomics/issues"
)}
|
# Code written by DS ~April 2020 to summarize TRY database data for OSPREE trait analsyes
#DL edits May 26, 2020 and January 15, 2021 in order to summarize both the try and bien data after we had removed the duplicated data
#Understanding Try data
rm(list=ls()) # remove everything currently held in the R memory
options(stringsAsFactors=FALSE)
library(dplyr)
#Anyone else working with this code should add their info/path here
if(length(grep("deirdreloughnan", getwd())>0)) { setwd("~/Desktop/ospree_trait_analysis/")
} #else if
#(length(grep("XXX", getwd())>0)) { setwd("XXX")
#}
#load in dataset
trt <- read.csv("input/try_bien_dodups.csv")
unique(trt$TraitName)
dataperspecies <- trt %>%
group_by(new.SpeciesName) %>%
summarise(no_rows = length(new.SpeciesName))
dataperspeciesperdataset<- trt %>%
group_by(new.SpeciesName, project_pi) %>%
summarise(no_rows = length(new.SpeciesName))
dataperspeciespertrait<- trt %>%
group_by(new.SpeciesName, TraitName) %>%
summarise(no_rows = length(new.SpeciesName),no_datasets=length(unique(project_pi)))
dataperspeciesperdatasetpertrait<- trt %>%
group_by(new.SpeciesName, project_pi, TraitName) %>%
summarise(no_rows = length(new.SpeciesName))
traitperdataset<- trt %>%
group_by(project_pi) %>%
summarise(no_trait=length(unique(TraitName)))
dat<-dataperspeciespertrait
#dat<-dataperspeciesperdatasetpertrait
| /analyses/traits/Rfiles/Trait_Data_tables.R | no_license | lizzieinvancouver/ospree | R | false | false | 1,391 | r | # Code written by DS ~April 2020 to summarize TRY database data for OSPREE trait analsyes
#DL edits May 26, 2020 and January 15, 2021 in order to summarize both the try and bien data after we had removed the duplicated data
#Understanding Try data
rm(list=ls()) # remove everything currently held in the R memory
options(stringsAsFactors=FALSE)
library(dplyr)
#Anyone else working with this code should add their info/path here
if(length(grep("deirdreloughnan", getwd())>0)) { setwd("~/Desktop/ospree_trait_analysis/")
} #else if
#(length(grep("XXX", getwd())>0)) { setwd("XXX")
#}
#load in dataset
trt <- read.csv("input/try_bien_dodups.csv")
unique(trt$TraitName)
dataperspecies <- trt %>%
group_by(new.SpeciesName) %>%
summarise(no_rows = length(new.SpeciesName))
dataperspeciesperdataset<- trt %>%
group_by(new.SpeciesName, project_pi) %>%
summarise(no_rows = length(new.SpeciesName))
dataperspeciespertrait<- trt %>%
group_by(new.SpeciesName, TraitName) %>%
summarise(no_rows = length(new.SpeciesName),no_datasets=length(unique(project_pi)))
dataperspeciesperdatasetpertrait<- trt %>%
group_by(new.SpeciesName, project_pi, TraitName) %>%
summarise(no_rows = length(new.SpeciesName))
traitperdataset<- trt %>%
group_by(project_pi) %>%
summarise(no_trait=length(unique(TraitName)))
dat<-dataperspeciespertrait
#dat<-dataperspeciesperdatasetpertrait
|
library(eurostat)
library(ggplot2)
library(scales)
migr_asyappctza=get_eurostat("migr_asyappctza")
de_sy_migr_asyappctza <- subset(migr_asyappctza, citizen == "SY" & geo == "DE" & asyl_app == "ASY_APP" & sex != "T" & age != "TOTAL" & age != "Y_LT18")[c(2,4,7:8)]
ggplot(de_sy_migr_asyappctza, aes(x=time, y=values, fill=sex)) + geom_bar(stat="identity", position="dodge") + scale_y_continuous(labels=comma) + scale_x_date() + ggtitle("Asylantr?ge von syrischen Staatsangeh?rigen", subtitle="In Deutschland, nach Geschlecht") + xlab("Jahr") + ylab("Anzahl Asylantr?ge") + scale_fill_discrete() + theme_light()
| /man/eurostat_to_barplot_3lines.R | no_license | muc-fluechtlingsrat/r-eurostat-refugees | R | false | false | 616 | r | library(eurostat)
library(ggplot2)
library(scales)
migr_asyappctza=get_eurostat("migr_asyappctza")
de_sy_migr_asyappctza <- subset(migr_asyappctza, citizen == "SY" & geo == "DE" & asyl_app == "ASY_APP" & sex != "T" & age != "TOTAL" & age != "Y_LT18")[c(2,4,7:8)]
ggplot(de_sy_migr_asyappctza, aes(x=time, y=values, fill=sex)) + geom_bar(stat="identity", position="dodge") + scale_y_continuous(labels=comma) + scale_x_date() + ggtitle("Asylantr?ge von syrischen Staatsangeh?rigen", subtitle="In Deutschland, nach Geschlecht") + xlab("Jahr") + ylab("Anzahl Asylantr?ge") + scale_fill_discrete() + theme_light()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dbUtils.R
\name{convert_all_to_csv}
\alias{convert_all_to_csv}
\title{Writes all dataframes to csv}
\usage{
convert_all_to_csv(dataframes, path)
}
\arguments{
\item{dataframes}{= list of dataframes,}
\item{path}{= path of file/s}
}
\description{
Writes all dataframes to csv
}
\examples{
\dontrun{
convert_all_to_csv(dataframes, path)
}
}
\keyword{bulk}
\keyword{save}
\keyword{write}
| /man/convert_all_to_csv.Rd | permissive | apoorvalal/LalRUtils | R | false | true | 466 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dbUtils.R
\name{convert_all_to_csv}
\alias{convert_all_to_csv}
\title{Writes all dataframes to csv}
\usage{
convert_all_to_csv(dataframes, path)
}
\arguments{
\item{dataframes}{= list of dataframes,}
\item{path}{= path of file/s}
}
\description{
Writes all dataframes to csv
}
\examples{
\dontrun{
convert_all_to_csv(dataframes, path)
}
}
\keyword{bulk}
\keyword{save}
\keyword{write}
|
#!/usr/bin/env Rscript
source("~/SurveyPaper/scripts/Sample_FDR_WYassociations.R")
perms<-read.delim("~/SurveyPaper/data/Substrate_Specific_Associations_10k_permutations.tsv", header=TRUE, stringsAsFactors=FALSE)
permsFDR<-Sampling_FDR(Permutation_df=perms, permutations=10000, pvals=c(0.0001, 0.001, 0.01), perms_to_sample=100)
write.table(permsFDR, "~/SurveyPaper/data/Substrate_Specific_Associations_FDR.tsv", sep="\t", quote=FALSE, row.names=FALSE) | /scripts/BASH_run_FDR.R | no_license | KatieFish/SurveyPaper | R | false | false | 457 | r | #!/usr/bin/env Rscript
source("~/SurveyPaper/scripts/Sample_FDR_WYassociations.R")
perms<-read.delim("~/SurveyPaper/data/Substrate_Specific_Associations_10k_permutations.tsv", header=TRUE, stringsAsFactors=FALSE)
permsFDR<-Sampling_FDR(Permutation_df=perms, permutations=10000, pvals=c(0.0001, 0.001, 0.01), perms_to_sample=100)
write.table(permsFDR, "~/SurveyPaper/data/Substrate_Specific_Associations_FDR.tsv", sep="\t", quote=FALSE, row.names=FALSE) |
library(dplyr)
setwd("~/Dropbox/Brown_Webb/Desktop/ATAC-seq-PEAKS/AvsQ_analysis_2019_v2/Core_promoter")
# Load AvsQ proximal stable gene list
df <- read.table("AvsQ.truStable.proximal.genelist.txt", sep="\t", stringsAsFactors=F)
df[4] <- NULL
colnames(df) <- c("chr", "start", "end", "gene")
# Load RNA-seq data (Leeman et al in vivo)
expres <- read.csv("~/Dropbox/Desktop/RNA-seq-data/DEseq_young_aNSC-qNSC_Leeman_et_al.csv",
sep=",", stringsAsFactors=F)
expres <- expres[,c(1,3,7)]
colnames(expres) <- c("gene", "log2FC", "padj")
# Separate differentially expressed genes
sig <- expres[expres$padj < 0.05, ]
upreg <- sig[sig$log2FC>0, ]
downreg <- sig[sig$log2FC<0, ]
notDE <- setdiff(expres, sig)
# ================================================================================
# Separate AvsQ proximal stable gene list by differential expression
df.upreg <- inner_join(df, upreg, by="gene") # 1683
df.downreg <- inner_join(df, downreg, by="gene") # 1133
df.notDE <- inner_join(df, notDE, by="gene") # 7497
# Write output files
# write.table(`df.upreg`, file="AvsQ.truStable.proximal.upregulated.genelist.txt", sep="\t",
# quote=F, row.names=F, col.names=F)
# write.table(`df.downreg`, file="AvsQ.truStable.proximal.downregulated.genelist.txt", sep="\t",
# quote=F, row.names=F, col.names=F)
# write.table(`df.notDE`, file="AvsQ.truStable.proximal.notDE.genelist.txt", sep="\t",
# quote=F, row.names=F, col.names=F)
# ================================================================================
# For expression analysis v2
# Get AvsQ proximal stable genes that are upregulated by ascending padj
df.upreg.reorder <- `df.upreg`[order(`df.upreg`$padj), ] #1683
# Take top quartile of upregulation
df.upreg.top <- `df.upreg.reorder`[1:420, ]
write.table(`df.upreg.top`, file="AvsQ.truStable.proximal.top.quartile.upreg.genelist.txt",
sep="\t", quote=F, row.names=F, col.names=F)
# Other quartiles of upregulated
df.upreg.second <- `df.upreg.reorder`[421:841, ]
df.upreg.third <- `df.upreg.reorder`[842:1262, ]
df.upreg.bottom <- `df.upreg.reorder`[421:1683, ] # all excluding top quartile
write.table(`df.upreg.second`, file="AvsQ.truStable.proximal.second.quartile.upreg.genelist.txt",
sep="\t", quote=F, row.names=F, col.names=F)
write.table(`df.upreg.third`, file="AvsQ.truStable.proximal.third.quartile.upreg.genelist.txt",
sep="\t", quote=F, row.names=F, col.names=F)
write.table(`df.upreg.bottom`, file="AvsQ.truStable.proximal.bottom.quartile.upreg.genelist.txt",
sep="\t", quote=F, row.names=F, col.names=F)
| /Fig.S4C.1.truStable.promoters.DE.R | no_license | Webb-Laboratory/Maybury-Lewis_et_al_2021 | R | false | false | 2,659 | r |
library(dplyr)
setwd("~/Dropbox/Brown_Webb/Desktop/ATAC-seq-PEAKS/AvsQ_analysis_2019_v2/Core_promoter")
# Load AvsQ proximal stable gene list
df <- read.table("AvsQ.truStable.proximal.genelist.txt", sep="\t", stringsAsFactors=F)
df[4] <- NULL
colnames(df) <- c("chr", "start", "end", "gene")
# Load RNA-seq data (Leeman et al in vivo)
expres <- read.csv("~/Dropbox/Desktop/RNA-seq-data/DEseq_young_aNSC-qNSC_Leeman_et_al.csv",
sep=",", stringsAsFactors=F)
expres <- expres[,c(1,3,7)]
colnames(expres) <- c("gene", "log2FC", "padj")
# Separate differentially expressed genes
sig <- expres[expres$padj < 0.05, ]
upreg <- sig[sig$log2FC>0, ]
downreg <- sig[sig$log2FC<0, ]
notDE <- setdiff(expres, sig)
# ================================================================================
# Separate AvsQ proximal stable gene list by differential expression
df.upreg <- inner_join(df, upreg, by="gene") # 1683
df.downreg <- inner_join(df, downreg, by="gene") # 1133
df.notDE <- inner_join(df, notDE, by="gene") # 7497
# Write output files
# write.table(`df.upreg`, file="AvsQ.truStable.proximal.upregulated.genelist.txt", sep="\t",
# quote=F, row.names=F, col.names=F)
# write.table(`df.downreg`, file="AvsQ.truStable.proximal.downregulated.genelist.txt", sep="\t",
# quote=F, row.names=F, col.names=F)
# write.table(`df.notDE`, file="AvsQ.truStable.proximal.notDE.genelist.txt", sep="\t",
# quote=F, row.names=F, col.names=F)
# ================================================================================
# For expression analysis v2
# Get AvsQ proximal stable genes that are upregulated by ascending padj
df.upreg.reorder <- `df.upreg`[order(`df.upreg`$padj), ] #1683
# Take top quartile of upregulation
df.upreg.top <- `df.upreg.reorder`[1:420, ]
write.table(`df.upreg.top`, file="AvsQ.truStable.proximal.top.quartile.upreg.genelist.txt",
sep="\t", quote=F, row.names=F, col.names=F)
# Other quartiles of upregulated
df.upreg.second <- `df.upreg.reorder`[421:841, ]
df.upreg.third <- `df.upreg.reorder`[842:1262, ]
df.upreg.bottom <- `df.upreg.reorder`[421:1683, ] # all excluding top quartile
write.table(`df.upreg.second`, file="AvsQ.truStable.proximal.second.quartile.upreg.genelist.txt",
sep="\t", quote=F, row.names=F, col.names=F)
write.table(`df.upreg.third`, file="AvsQ.truStable.proximal.third.quartile.upreg.genelist.txt",
sep="\t", quote=F, row.names=F, col.names=F)
write.table(`df.upreg.bottom`, file="AvsQ.truStable.proximal.bottom.quartile.upreg.genelist.txt",
sep="\t", quote=F, row.names=F, col.names=F)
|
#' Put a few rows of a table into one category
#'
#' @description Group a few rows in a table together under a label.
#'
#' @param kable_input Output of `knitr::kable()` with `format` specified
#' @param group_label A character string for the name of the group
#' @param start_row A numeric value that tells the function in which row the
#' group starts. Note that the counting excludes header rows and other group
#' labeling rows
#' @param end_row A numeric value that tells the function in which row the group
#' ends.
#' @param index A named vector providing the index for robust row-grouping tasks.
#' Basically, you can use it in the same way as `add_header_above()`.
#' @param label_row_css A character string for any customized css used for the
#' labeling row. By default, the labeling row will have a solid black line
#' underneath. Only useful for HTML documents.
#' @param latex_gap_space A character value telling LaTeX how large the gap
#' between the previous row and the group labeling row. Only useful for LaTeX
#' documents.
#' @param escape A T/F value showing whether special characters should be
#' escaped.
#'
#' @examples x <- knitr::kable(head(mtcars), "html")
#' # Put Row 2 to Row 5 into a Group and label it as "Group A"
#' group_rows(x, "Group A", 2, 5)
#'
#' @export
group_rows <- function(kable_input, group_label = NULL,
start_row = NULL, end_row = NULL,
index = NULL,
label_row_css = "border-bottom: 1px solid;",
latex_gap_space = "0.3em",
escape = TRUE) {
kable_format <- attr(kable_input, "format")
if (!kable_format %in% c("html", "latex")) {
message("Currently generic markdown table using pandoc is not supported.")
return(kable_input)
}
if (is.null(index)) {
if (kable_format == "html") {
return(group_rows_html(kable_input, group_label, start_row, end_row,
label_row_css, escape))
}
if (kable_format == "latex") {
return(group_rows_latex(kable_input, group_label, start_row, end_row,
latex_gap_space, escape))
}
} else {
index <- group_row_index_translator(index)
out <- kable_input
if (kable_format == "html") {
for (i in 1:nrow(index)) {
out <- group_rows_html(out, index$header[i],
index$start[i], index$end[i],
label_row_css, escape)
}
}
if (kable_format == "latex") {
for (i in 1:nrow(index)) {
out <- group_rows_latex(out, index$header[i],
index$start[i], index$end[i],
latex_gap_space, escape)
}
}
return(out)
}
}
group_row_index_translator <- function(index) {
index <- standardize_header_input(index)
index$start <- cumsum(c(1, index$colspan))[1:length(index$colspan)]
index$end <- cumsum(index$colspan)
index$header <- trimws(index$header)
index <- index[index$header != "", ]
return(index)
}
group_rows_html <- function(kable_input, group_label, start_row, end_row,
label_row_css, escape) {
kable_attrs <- attributes(kable_input)
kable_xml <- read_kable_as_xml(kable_input)
kable_tbody <- xml_tpart(kable_xml, "tbody")
if (escape) {
group_label <- escape_html(group_label)
}
group_header_rows <- attr(kable_input, "group_header_rows")
group_seq <- seq(start_row, end_row)
if (!is.null(group_header_rows)) {
group_seq <- positions_corrector(group_seq, group_header_rows,
length(xml_children(kable_tbody)))
}
# Insert a group header row
starting_node <- xml_child(kable_tbody, group_seq[1])
kable_ncol <- length(xml_children(starting_node))
group_header_row_text <- paste0(
'<tr groupLength="', length(group_seq), '"><td colspan="', kable_ncol,
'" style="', label_row_css, '"><strong>', group_label,
"</strong></td></tr>"
)
group_header_row <- read_xml(group_header_row_text, options = "COMPACT")
xml_add_sibling(starting_node, group_header_row, .where = "before")
# add indentations to items
out <- as_kable_xml(kable_xml)
attributes(out) <- kable_attrs
attr(out, "group_header_rows") <- c(attr(out, "group_header_rows"), group_seq[1])
out <- add_indent_html(out, positions = seq(start_row, end_row))
return(out)
}
group_rows_latex <- function(kable_input, group_label, start_row, end_row,
gap_space, escape) {
table_info <- magic_mirror(kable_input)
out <- enc2utf8(as.character(kable_input))
if (table_info$duplicated_rows) {
dup_fx_out <- fix_duplicated_rows_latex(out, table_info)
out <- dup_fx_out[[1]]
table_info <- dup_fx_out[[2]]
}
if (escape) {
group_label <- escape_latex(group_label)
group_label <- gsub("\\\\", "\\\\\\\\", group_label)
}
# Add group label
rowtext <- table_info$contents[start_row + 1]
if (table_info$booktabs) {
new_rowtext <- paste0(
"\\\\addlinespace[", gap_space, "]\n",
"\\\\multicolumn{", table_info$ncol, "}{l}{\\\\textbf{", group_label,
"}}\\\\\\\\\n",
rowtext
)
} else {
rowtext <- paste0("\\\\hline\n", rowtext)
new_rowtext <- paste0(
"\\\\hline\n\\\\multicolumn{", table_info$ncol, "}{l}{\\\\textbf{",
group_label, "}}\\\\\\\\\n", rowtext
)
}
out <- sub(rowtext, new_rowtext, out)
out <- gsub("\\\\addlinespace\n", "", out)
out <- structure(out, format = "latex", class = "knitr_kable")
table_info$group_rows_used <- TRUE
attr(out, "kable_meta") <- table_info
out <- add_indent_latex(out, seq(start_row, end_row))
return(out)
}
| /R/group_rows.R | no_license | xtmgah/kableExtra | R | false | false | 5,723 | r | #' Put a few rows of a table into one category
#'
#' @description Group a few rows in a table together under a label.
#'
#' @param kable_input Output of `knitr::kable()` with `format` specified
#' @param group_label A character string for the name of the group
#' @param start_row A numeric value that tells the function in which row the
#' group starts. Note that the counting excludes header rows and other group
#' labeling rows
#' @param end_row A numeric value that tells the function in which row the group
#' ends.
#' @param index A named vector providing the index for robust row-grouping tasks.
#' Basically, you can use it in the same way as `add_header_above()`.
#' @param label_row_css A character string for any customized css used for the
#' labeling row. By default, the labeling row will have a solid black line
#' underneath. Only useful for HTML documents.
#' @param latex_gap_space A character value telling LaTeX how large the gap
#' between the previous row and the group labeling row. Only useful for LaTeX
#' documents.
#' @param escape A T/F value showing whether special characters should be
#' escaped.
#'
#' @examples x <- knitr::kable(head(mtcars), "html")
#' # Put Row 2 to Row 5 into a Group and label it as "Group A"
#' group_rows(x, "Group A", 2, 5)
#'
#' @export
group_rows <- function(kable_input, group_label = NULL,
start_row = NULL, end_row = NULL,
index = NULL,
label_row_css = "border-bottom: 1px solid;",
latex_gap_space = "0.3em",
escape = TRUE) {
kable_format <- attr(kable_input, "format")
if (!kable_format %in% c("html", "latex")) {
message("Currently generic markdown table using pandoc is not supported.")
return(kable_input)
}
if (is.null(index)) {
if (kable_format == "html") {
return(group_rows_html(kable_input, group_label, start_row, end_row,
label_row_css, escape))
}
if (kable_format == "latex") {
return(group_rows_latex(kable_input, group_label, start_row, end_row,
latex_gap_space, escape))
}
} else {
index <- group_row_index_translator(index)
out <- kable_input
if (kable_format == "html") {
for (i in 1:nrow(index)) {
out <- group_rows_html(out, index$header[i],
index$start[i], index$end[i],
label_row_css, escape)
}
}
if (kable_format == "latex") {
for (i in 1:nrow(index)) {
out <- group_rows_latex(out, index$header[i],
index$start[i], index$end[i],
latex_gap_space, escape)
}
}
return(out)
}
}
group_row_index_translator <- function(index) {
index <- standardize_header_input(index)
index$start <- cumsum(c(1, index$colspan))[1:length(index$colspan)]
index$end <- cumsum(index$colspan)
index$header <- trimws(index$header)
index <- index[index$header != "", ]
return(index)
}
group_rows_html <- function(kable_input, group_label, start_row, end_row,
label_row_css, escape) {
kable_attrs <- attributes(kable_input)
kable_xml <- read_kable_as_xml(kable_input)
kable_tbody <- xml_tpart(kable_xml, "tbody")
if (escape) {
group_label <- escape_html(group_label)
}
group_header_rows <- attr(kable_input, "group_header_rows")
group_seq <- seq(start_row, end_row)
if (!is.null(group_header_rows)) {
group_seq <- positions_corrector(group_seq, group_header_rows,
length(xml_children(kable_tbody)))
}
# Insert a group header row
starting_node <- xml_child(kable_tbody, group_seq[1])
kable_ncol <- length(xml_children(starting_node))
group_header_row_text <- paste0(
'<tr groupLength="', length(group_seq), '"><td colspan="', kable_ncol,
'" style="', label_row_css, '"><strong>', group_label,
"</strong></td></tr>"
)
group_header_row <- read_xml(group_header_row_text, options = "COMPACT")
xml_add_sibling(starting_node, group_header_row, .where = "before")
# add indentations to items
out <- as_kable_xml(kable_xml)
attributes(out) <- kable_attrs
attr(out, "group_header_rows") <- c(attr(out, "group_header_rows"), group_seq[1])
out <- add_indent_html(out, positions = seq(start_row, end_row))
return(out)
}
group_rows_latex <- function(kable_input, group_label, start_row, end_row,
gap_space, escape) {
table_info <- magic_mirror(kable_input)
out <- enc2utf8(as.character(kable_input))
if (table_info$duplicated_rows) {
dup_fx_out <- fix_duplicated_rows_latex(out, table_info)
out <- dup_fx_out[[1]]
table_info <- dup_fx_out[[2]]
}
if (escape) {
group_label <- escape_latex(group_label)
group_label <- gsub("\\\\", "\\\\\\\\", group_label)
}
# Add group label
rowtext <- table_info$contents[start_row + 1]
if (table_info$booktabs) {
new_rowtext <- paste0(
"\\\\addlinespace[", gap_space, "]\n",
"\\\\multicolumn{", table_info$ncol, "}{l}{\\\\textbf{", group_label,
"}}\\\\\\\\\n",
rowtext
)
} else {
rowtext <- paste0("\\\\hline\n", rowtext)
new_rowtext <- paste0(
"\\\\hline\n\\\\multicolumn{", table_info$ncol, "}{l}{\\\\textbf{",
group_label, "}}\\\\\\\\\n", rowtext
)
}
out <- sub(rowtext, new_rowtext, out)
out <- gsub("\\\\addlinespace\n", "", out)
out <- structure(out, format = "latex", class = "knitr_kable")
table_info$group_rows_used <- TRUE
attr(out, "kable_meta") <- table_info
out <- add_indent_latex(out, seq(start_row, end_row))
return(out)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/tags.R
\name{withTags}
\alias{withTags}
\title{Evaluate an expression using \code{tags}}
\usage{
withTags(code)
}
\arguments{
\item{code}{A set of tags.}
}
\description{
This function makes it simpler to write HTML-generating code. Instead of
needing to specify \code{tags} each time a tag function is used, as in
\code{tags$div()} and \code{tags$p()}, code inside \code{withTags} is
evaluated with \code{tags} searched first, so you can simply use
\code{div()} and \code{p()}.
}
\details{
If your code uses an object which happens to have the same name as an
HTML tag function, such as \code{source()} or \code{summary()}, it will call
the tag function. To call the intended (non-tags function), specify the
namespace, as in \code{base::source()} or \code{base::summary()}.
}
\examples{
# Using tags$ each time
tags$div(class = "myclass",
tags$h3("header"),
tags$p("text")
)
# Equivalent to above, but using withTags
withTags(
div(class = "myclass",
h3("header"),
p("text")
)
)
}
| /man/withTags.Rd | no_license | datastorm-open/htmltools | R | false | false | 1,085 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/tags.R
\name{withTags}
\alias{withTags}
\title{Evaluate an expression using \code{tags}}
\usage{
withTags(code)
}
\arguments{
\item{code}{A set of tags.}
}
\description{
This function makes it simpler to write HTML-generating code. Instead of
needing to specify \code{tags} each time a tag function is used, as in
\code{tags$div()} and \code{tags$p()}, code inside \code{withTags} is
evaluated with \code{tags} searched first, so you can simply use
\code{div()} and \code{p()}.
}
\details{
If your code uses an object which happens to have the same name as an
HTML tag function, such as \code{source()} or \code{summary()}, it will call
the tag function. To call the intended (non-tags function), specify the
namespace, as in \code{base::source()} or \code{base::summary()}.
}
\examples{
# Using tags$ each time
tags$div(class = "myclass",
tags$h3("header"),
tags$p("text")
)
# Equivalent to above, but using withTags
withTags(
div(class = "myclass",
h3("header"),
p("text")
)
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SurvTreeLaplaceHazards_ranger.R
\name{survTreeLaplaceHazardRanger}
\alias{survTreeLaplaceHazardRanger}
\title{Laplace Hazards for a Competing Risk Survival Tree Object}
\usage{
survTreeLaplaceHazardRanger(treeModel, rangerdata, newdata, lambda)
}
\arguments{
\item{treeModel}{Fitted tree object as generated by "ranger" ("class data.frame"). Must be a single ranger tree.}
\item{rangerdata}{Original training data with which \emph{treeModel} was fitted ("class data.frame"). Must be in
long format.}
\item{newdata}{Data in long format for which hazards are to be computed ("class data.frame"). Must
contain the same columns that were used for tree fitting.}
\item{lambda}{Smoothing parameter for laplace-smoothing ("class data.frame"). Must be a non-negative
number. A value of zero corresponds to no smoothing.}
}
\value{
A m by k matrix with m being the length of newdata and k being the
number of classes in \emph{treeModel}. Each row corresponds to the smoothed hazard
of the respective observation.
}
\description{
Predicts the laplace-smoothed hazards of discrete survival data based
on a survival tree from class "ranger". Currently only single-risk data is supported.
}
\examples{
library(pec)
library(caret)
library(ranger)
data(cost)
# Take subsample and convert time to years
cost$time <- ceiling(cost$time/365)
costSubTrain <- cost[1:50,]
costSubTest <- cost[51:70,]
# Specify column names for data augmentation
timeColumn<-"time"
eventColumn<-"status"
costSubTrainLong <- dataLong(costSubTrain, timeColumn, eventColumn)
costSubTestLong <- dataLong(costSubTest, timeColumn, eventColumn)
#create tree
formula <- y ~ timeInt + diabetes + prevStroke + age + sex
rangerTree <- ranger(formula, costSubTrainLong, num.trees = 1, mtry = 5,
classification = TRUE, splitrule = "hellinger", replace = FALSE,
sample.fraction = 1, max.depth = 5)
#compute laplace-smoothed hazards
laplHaz <- survTreeLaplaceHazardRanger(rangerTree, costSubTrainLong,
costSubTestLong, lambda = 1)
laplHaz
}
\keyword{survival}
| /man/survTreeLaplaceHazardRanger.Rd | no_license | cran/discSurv | R | false | true | 2,094 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SurvTreeLaplaceHazards_ranger.R
\name{survTreeLaplaceHazardRanger}
\alias{survTreeLaplaceHazardRanger}
\title{Laplace Hazards for a Competing Risk Survival Tree Object}
\usage{
survTreeLaplaceHazardRanger(treeModel, rangerdata, newdata, lambda)
}
\arguments{
\item{treeModel}{Fitted tree object as generated by "ranger" ("class data.frame"). Must be a single ranger tree.}
\item{rangerdata}{Original training data with which \emph{treeModel} was fitted ("class data.frame"). Must be in
long format.}
\item{newdata}{Data in long format for which hazards are to be computed ("class data.frame"). Must
contain the same columns that were used for tree fitting.}
\item{lambda}{Smoothing parameter for laplace-smoothing ("class data.frame"). Must be a non-negative
number. A value of zero corresponds to no smoothing.}
}
\value{
A m by k matrix with m being the length of newdata and k being the
number of classes in \emph{treeModel}. Each row corresponds to the smoothed hazard
of the respective observation.
}
\description{
Predicts the laplace-smoothed hazards of discrete survival data based
on a survival tree from class "ranger". Currently only single-risk data is supported.
}
\examples{
library(pec)
library(caret)
library(ranger)
data(cost)
# Take subsample and convert time to years
cost$time <- ceiling(cost$time/365)
costSubTrain <- cost[1:50,]
costSubTest <- cost[51:70,]
# Specify column names for data augmentation
timeColumn<-"time"
eventColumn<-"status"
costSubTrainLong <- dataLong(costSubTrain, timeColumn, eventColumn)
costSubTestLong <- dataLong(costSubTest, timeColumn, eventColumn)
#create tree
formula <- y ~ timeInt + diabetes + prevStroke + age + sex
rangerTree <- ranger(formula, costSubTrainLong, num.trees = 1, mtry = 5,
classification = TRUE, splitrule = "hellinger", replace = FALSE,
sample.fraction = 1, max.depth = 5)
#compute laplace-smoothed hazards
laplHaz <- survTreeLaplaceHazardRanger(rangerTree, costSubTrainLong,
costSubTestLong, lambda = 1)
laplHaz
}
\keyword{survival}
|
# Rscript barplot_with_line.R
require(ggplot2)
require(ggsci)
require(patchwork)
require(foreach)
plotType <- "svg"
source("../../Cancer_HiC_data_TAD_DA/utils_fct.R")
source("../../Yuanlong_Cancer_HiC_data_TAD_DA/subtype_cols.R")
source("../settings.R")
outFolder <- "BARPLOT_WITH_LINE"
dir.create(outFolder, recursive = TRUE)
myHeight <- 5
myWidth <- 7
plotCex <- 1.4
interval_fcc <- c("<=0.5", "]0.5-1[", "1")
lineVar <- c( "]0.75, 1]")
dt2 <- get(load("BARPLOT_WITH_FCC_FRACT//all_dt.Rdata"))
dt2$dataset <- file.path(dt2$hicds, dt2$exprds)
# dt2$intervalFCC <- factor(dt2$intervalFCC, levels=interval_fcc)
stopifnot(!is.na(dt2$intervalFCC))
stopifnot(!duplicated(dt2))
dt3 <- get(load("BARPLOT_WITH_TOPFCC_FRACT///all_dt.Rdata"))
dt3$dataset <- file.path(dt3$hicds, dt3$exprds)
for(curr_ds in unique(dt3$dataset)) {
stopifnot( sum(dt2$nFCC[dt2$dataset == curr_ds & dt2$intervalFCC %in% c("]0.5, 0.75]", "]0.75, 1]")]) ==
sum(dt3$countFCC[dt3$dataset == curr_ds]))
}
dt2$nFCC[dt2$dataset == "Barutcu_MCF-10A_40kb/TCGAbrca_lum_bas" & dt2$intervalFCC %in% c("]0.5, 0.75]", "]0.75, 1]")] ==
dt3$countFCC[dt3$dataset == "Barutcu_MCF-10A_40kb/TCGAbrca_lum_bas"]
dt1 <- get(load("FCC_WAVE_PLOT_NOABS/all_fcc_dt.Rdata"))
dt1$dataset <- file.path(dt1$hicds, dt1$exprds)
dt1 <- dt1[order(dt1$fcc_auc, decreasing = TRUE),]
ds_levels <- as.character(dt1$dataset)
stopifnot(!duplicated(dt1))
dscols <- all_cols[all_cmps[basename(as.character(dt1$dataset))]]
dt2$dataset <- factor(dt2$dataset, levels=ds_levels)
stopifnot(!is.na(dt2$dataset))
plot_dt2 <- dt2[dt2$intervalFCC == lineVar,]
plot_dt2$dataset <- factor(plot_dt2$dataset, levels=ds_levels)
stopifnot(!is.na(plot_dt2$dataset))
plot_dt2 <- plot_dt2[order(plot_dt2$dataset),]
stopifnot(!is.na(plot_dt2))
my_main <- "FCC AUC ratio"
my_main2 <- paste0("Ratio TADs with FCC \u2208 ", lineVar)
linecol <- "brown"
############################## BAR COLS BY DATASET
outFile <- file.path(outFolder, paste0("fcc_barplot_coloredBars.", plotType))
do.call(plotType, list(outFile, height=myHeight, width=myWidth*1.2))
par(mar=par()$mar+c(2,0,0,2))
par(family=fontFamily)
barp <- barplot(dt1$fcc_auc-1,
ylab="FCC AUC ratio", cex.lab=1.2,
main = my_main,
# xlab="Datasets",
cex.main = plotCex,
# xlab=paste0("Datasets\n(n=", nrow(dt1), ")"),
xlab="",
col=dscols, axes=F)
axis(2, at = seq(0, 0.8, by=0.1), labels = seq(0, 0.8, by=0.1)+1)
mtext(1, text=paste0("Datasets\n(n=", nrow(dt1), ")"), line=2, cex=plotCex)
# add the line
par(new = T, family=fontFamily)
plot(x=barp,
# ylim = c(0,1),
xlab="", ylab="", lty=1,pch=16,lwd=2,
y=plot_dt2$countFCC, type="b", col = linecol,
axes=FALSE)
axis(side=4, col = linecol, col.ticks = linecol, col.axis=linecol, at = seq(0, 1, by=0.05))
mtext(side = 4, line = 2, text=my_main2, col=linecol, cex=plotCex)
# legend("bottom", pch=16, col=c(all_cols, linecol),
# legend=c(names(all_cols), paste0("Ratio\n", lineVar, " \nTADs")),
# lty=c(rep(-1, length(all_cols), 1)),
# cex = c(rep(plotCex, length(all_cols)), -1),
# inset=c(0,-1), xpd=TRUE,
# horiz = TRUE,
# bty="n")
legend("bottom", pch=16, col=c(all_cols),
legend=c(names(all_cols)),
lty=c(rep(-1, length(all_cols))),
cex = c(rep(plotCex, length(all_cols))),
inset=c(0,-0.5),
xpd=TRUE,
horiz = TRUE,
bty="n")
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
| /FIG_1/barplot_with_line.R | no_license | marzuf/MANUSCRIPT_FIGURES | R | false | false | 3,587 | r | # Rscript barplot_with_line.R
require(ggplot2)
require(ggsci)
require(patchwork)
require(foreach)
plotType <- "svg"
source("../../Cancer_HiC_data_TAD_DA/utils_fct.R")
source("../../Yuanlong_Cancer_HiC_data_TAD_DA/subtype_cols.R")
source("../settings.R")
outFolder <- "BARPLOT_WITH_LINE"
dir.create(outFolder, recursive = TRUE)
myHeight <- 5
myWidth <- 7
plotCex <- 1.4
interval_fcc <- c("<=0.5", "]0.5-1[", "1")
lineVar <- c( "]0.75, 1]")
dt2 <- get(load("BARPLOT_WITH_FCC_FRACT//all_dt.Rdata"))
dt2$dataset <- file.path(dt2$hicds, dt2$exprds)
# dt2$intervalFCC <- factor(dt2$intervalFCC, levels=interval_fcc)
stopifnot(!is.na(dt2$intervalFCC))
stopifnot(!duplicated(dt2))
dt3 <- get(load("BARPLOT_WITH_TOPFCC_FRACT///all_dt.Rdata"))
dt3$dataset <- file.path(dt3$hicds, dt3$exprds)
for(curr_ds in unique(dt3$dataset)) {
stopifnot( sum(dt2$nFCC[dt2$dataset == curr_ds & dt2$intervalFCC %in% c("]0.5, 0.75]", "]0.75, 1]")]) ==
sum(dt3$countFCC[dt3$dataset == curr_ds]))
}
dt2$nFCC[dt2$dataset == "Barutcu_MCF-10A_40kb/TCGAbrca_lum_bas" & dt2$intervalFCC %in% c("]0.5, 0.75]", "]0.75, 1]")] ==
dt3$countFCC[dt3$dataset == "Barutcu_MCF-10A_40kb/TCGAbrca_lum_bas"]
dt1 <- get(load("FCC_WAVE_PLOT_NOABS/all_fcc_dt.Rdata"))
dt1$dataset <- file.path(dt1$hicds, dt1$exprds)
dt1 <- dt1[order(dt1$fcc_auc, decreasing = TRUE),]
ds_levels <- as.character(dt1$dataset)
stopifnot(!duplicated(dt1))
dscols <- all_cols[all_cmps[basename(as.character(dt1$dataset))]]
dt2$dataset <- factor(dt2$dataset, levels=ds_levels)
stopifnot(!is.na(dt2$dataset))
plot_dt2 <- dt2[dt2$intervalFCC == lineVar,]
plot_dt2$dataset <- factor(plot_dt2$dataset, levels=ds_levels)
stopifnot(!is.na(plot_dt2$dataset))
plot_dt2 <- plot_dt2[order(plot_dt2$dataset),]
stopifnot(!is.na(plot_dt2))
my_main <- "FCC AUC ratio"
my_main2 <- paste0("Ratio TADs with FCC \u2208 ", lineVar)
linecol <- "brown"
############################## BAR COLS BY DATASET
outFile <- file.path(outFolder, paste0("fcc_barplot_coloredBars.", plotType))
do.call(plotType, list(outFile, height=myHeight, width=myWidth*1.2))
par(mar=par()$mar+c(2,0,0,2))
par(family=fontFamily)
barp <- barplot(dt1$fcc_auc-1,
ylab="FCC AUC ratio", cex.lab=1.2,
main = my_main,
# xlab="Datasets",
cex.main = plotCex,
# xlab=paste0("Datasets\n(n=", nrow(dt1), ")"),
xlab="",
col=dscols, axes=F)
axis(2, at = seq(0, 0.8, by=0.1), labels = seq(0, 0.8, by=0.1)+1)
mtext(1, text=paste0("Datasets\n(n=", nrow(dt1), ")"), line=2, cex=plotCex)
# add the line
par(new = T, family=fontFamily)
plot(x=barp,
# ylim = c(0,1),
xlab="", ylab="", lty=1,pch=16,lwd=2,
y=plot_dt2$countFCC, type="b", col = linecol,
axes=FALSE)
axis(side=4, col = linecol, col.ticks = linecol, col.axis=linecol, at = seq(0, 1, by=0.05))
mtext(side = 4, line = 2, text=my_main2, col=linecol, cex=plotCex)
# legend("bottom", pch=16, col=c(all_cols, linecol),
# legend=c(names(all_cols), paste0("Ratio\n", lineVar, " \nTADs")),
# lty=c(rep(-1, length(all_cols), 1)),
# cex = c(rep(plotCex, length(all_cols)), -1),
# inset=c(0,-1), xpd=TRUE,
# horiz = TRUE,
# bty="n")
legend("bottom", pch=16, col=c(all_cols),
legend=c(names(all_cols)),
lty=c(rep(-1, length(all_cols))),
cex = c(rep(plotCex, length(all_cols))),
inset=c(0,-0.5),
xpd=TRUE,
horiz = TRUE,
bty="n")
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
|
# ----------------------------------------------------------------------------------- #
# This script performs model ensembing, projection and forecasting
# ----------------------------------------------------------------------------------- #
rm(list=ls())
require(biomod2)
require(raster)
# Define the work directory for BIOMOD2
# !!! Change the working directory according to your system
# setwd("D:/MyDocs/Projects/Colab_ECivantos/data/_MOD_PREDS_v2")
# ----------------------------------------------------------------------------------- #
# Get the best modeling algorithms based on the median for all calibrated models
#
# Then, for each of the best algorithms extract the 2.5% top performing models based
# on ROC/AUC. The selected models in the ensModels list object will then be used for
# ensembling procedures
# ----------------------------------------------------------------------------------- #
# Species codes
#spCodes<-c("BUCAL","LACLE","PODHP","SALAM","CHALU","LACSH")
spCodes<-c("ANGFR","BUCAL","CHALU","LACSH")
# Modeling algorithms used
modAlgos<-c("GLM","GBM","GAM","CTA","FDA","RF","MAXENT")
# Nr of algorithms to retain
#nBest<-c(4,3,3,3)
#nBest<-rep(4,4)
#names(nBest)<-spCodes
nBest<-5
# List of best models retained
ensModels<-list()
## Extract the best modeling algorithms/runs
for(sp in spCodes){
setwd(paste("D:/MyDocs/Projects/Colab_ECivantos/data/_MOD_OBJECTS",sp,sep="/"))
# Load the biomod modeling object for each species
load(paste(sp,"_BIOMOD2_ModObject_v2.RData",sep=""))
modEvals<-get_evaluations(modObj)
ensModels[[sp]]<-c()
modEvalByAlgo<-c()
# Select the 5 best modeling algorithms
for(m in modAlgos)
modEvalByAlgo<-c(modEvalByAlgo,median(modEvals[3,1,m,,],na.rm=TRUE))
names(modEvalByAlgo)<-modAlgos
modEvalByAlgo<-sort(modEvalByAlgo,decreasing=TRUE)
print(sp)
print(modEvalByAlgo)
# Best 5 modeling algorithms
modAlgos1<-names(modEvalByAlgo)[1:nBest]
#
# Extract the best calibrated models for each of the 5 best modeling algorithms
# based on the 0.975 quantile
for(m in modAlgos1){
tmp<-modEvals[3,1,m,,]
quant<-quantile(tmp,probs=0.975,na.rm=TRUE)
for(i in 1:ncol(tmp)){
for(j in 1:nrow(tmp)){
# If the model performance is equal or above the 97.5% quantile then it's retained for ensembling
if(!is.na(tmp[j,i]) & (tmp[j,i] >= quant))
ensModels[[sp]]<-c(ensModels[[sp]],paste(sp,"_PA",i,"_RUN",j,"_",m,sep=""))
}
}
}
}
# ----------------------------------------------------------------------------------- #
# Perform ensemble modeling
# ----------------------------------------------------------------------------------- #
#spCodes<-c("BUCAL","LACLE","PODHP","SALAM","CHALU","LACSH")
spCodes<-c("ANGFR","BUCAL","CHALU","LACSH")
setwd("I:/temp/colab_EC")
for(sp in spCodes){
# Load the modeling object
load(paste("D:/MyDocs/Projects/Colab_ECivantos/data/_MOD_OBJECTS/",sp,"/",sp,"_BIOMOD2_ModObject_v2.RData",sep=""))
# Perform ensembling based on the previously selected models (in the ensModels list object)
# Uses mean, median and weighted-mean ensemble types
#
ensembleObj <- BIOMOD_EnsembleModeling( modeling.output = modObj,
chosen.models = ensModels[[sp]],
em.by = "all",
eval.metric = c("ROC","TSS"),
eval.metric.quality.threshold = NULL, # Discard models below 0.5
prob.mean = TRUE,
prob.cv = FALSE,
prob.ci = FALSE,
prob.ci.alpha = 0.05,
prob.median = TRUE,
committee.averaging = FALSE,
prob.mean.weight = TRUE,
prob.mean.weight.decay = "proportional")
# Save the ensemble object
save(ensembleObj,file=paste(sp,"_BIOMOD2_ensembleObj_v3_AllEvalStats.RData",sep=""))
# Calculate evaluation scores for the ensemble models
print(get_evaluations(ensembleObj))
}
# ----------------------------------------------------------------------------------- #
# Reads rasters contained in each input scenario folder and produces
# new raster stacks used for projection and ensemble forecasting
#
# Forecast results based on current conditions and cc-scenarios for 2050 (RCP 2.6 and 8.5)
# ----------------------------------------------------------------------------------- #
# !!! Change the path to the raster directories according to your system
# This function lists only GeoTIFF files in each folder
rstDataFileList<-list(
current=list.files("D:/MyDocs/Projects/Colab_ECivantos/data/SpatialData/_Present",pattern=".tif$",full=TRUE),
rcp26 = list.files("D:/MyDocs/Projects/Colab_ECivantos/data/SpatialData/2050_B2_RCP26",pattern=".tif$",full=TRUE),
rcp85 = list.files("D:/MyDocs/Projects/Colab_ECivantos/data/SpatialData/2050_A1_RCP85",pattern=".tif$",full=TRUE)
)
# Build a raster stack using the selected variables
selVars<-c(2,4,5,6,14,20,23,29,32,24)
# Indicate which rasters are factor/categorical variables
selVarFactor<-c(rep(FALSE,length(selVars)-1),TRUE)
##
j<-0
rstList<-list()
rstStacks<-list()
for(rstDataFiles in rstDataFileList){
j<-j+1
z<-0
for(i in selVars){
z<-z+1
if(selVarFactor[z]){
rstList[[z]]<-as.factor(raster(rstDataFiles[i]))
}
else{
# A workaround to convert 8-bit rasters into floats and avoid using them as factors
rstList[[z]]<-raster(rstDataFiles[i])
if(is.factor(rstList[[z]]))
rstList[[z]]<-rstList[[z]]*(1+1E-38)
}
}
# Create the raster stack for each projection type
rstStacks[[j]]<-stack(rstList)
}
## Reclassify values
## 5/8 (Abandoned lands to semi-natural vegetation)
# 2050 / RCP 2.6
#
rstData<-getValues(rstStacks[[2]])
ind<-rstData[,"CLC_LULC"]==5
ind[is.na(ind)]<-FALSE
rstData[ind,"CLC_LULC"]<-3
ind<-rstData[,"CLC_LULC"]==8
ind[is.na(ind)]<-FALSE
rstData[ind,"CLC_LULC"]<-3
rstStacks[[2]]<-setValues(rstStacks[[2]],rstData[,"CLC_LULC"],layer=10)
# 2050 / RCP 8.5
#
rstData<-getValues(rstStacks[[3]])
ind<-rstData[,"CLC_LULC"]==5
ind[is.na(ind)]<-FALSE
rstData[ind,"CLC_LULC"]<-3
ind<-rstData[,"CLC_LULC"]==8
ind[is.na(ind)]<-FALSE
rstData[ind,"CLC_LULC"]<-3
rstStacks[[3]]<-setValues(rstStacks[[3]],rstData[,"CLC_LULC"],layer=10)
# ----------------------------------------------------------------------------------- #
# Perform projection and ensemble forecasting for each scenario
# ----------------------------------------------------------------------------------- #
memory.limit(6000)
## Set the working directory to the base dir (with a folder per species)
setwd("I:/temp/colab_EC")
# Species codes and distances
#spCodes<-c("BUCAL","LACLE","PODHP","SALAM","CHALU","LACSH")
spCodes<-c("LACSH")
# Projection names
projNames<-c("Current","RCP26_2050","RCP85_2050")
for(sp in spCodes){
# Load the modeling object (modObject) and the ensemble object (ensembleObj)
# from the target species folders
load(paste(getwd(),"/",sp,"/",sp,"_BIOMOD2_ModObject_v2.RData",sep=""))
load(paste(getwd(),"/",sp,"/",sp,"_BIOMOD2_ensembleObj_v2_ROC.RData",sep=""))
for(i in 1:length(rstStacks)){
cat("Processing species:",sp,"| Scenario:",projNames[i],".......\n\n")
# Perform projection for each scenario using models kept by the ensembling
#
projectionObj <- BIOMOD_Projection( modeling.output = modObj,
new.env = rstStacks[[i]],
proj.name = projNames[i],
selected.models = get_kept_models(ensembleObj,model=1),
compress = TRUE,
build.clamping.mask = FALSE)
# Save the projection object
save(projectionObj,file=paste(sp,"_BIOMOD2_projectionObj_",projNames[i],"_v2.RData",sep=""))
# Perform ensemble forecasting
BIOMOD_EnsembleForecasting( projection.output = projectionObj,
EM.output = ensembleObj)
# Export the ensemble projections into GeoTIFF format
# Read raster data
rstEnsProj.avg<-raster(paste(getwd(),"/",sp,"/proj_",projNames[i],"/proj_",projNames[i],"_",sp,"_ensemble.grd",sep=""),band=1)
rstEnsProj.med<-raster(paste(getwd(),"/",sp,"/proj_",projNames[i],"/proj_",projNames[i],"_",sp,"_ensemble.grd",sep=""),band=2)
rstEnsProj.wmn<-raster(paste(getwd(),"/",sp,"/proj_",projNames[i],"/proj_",projNames[i],"_",sp,"_ensemble.grd",sep=""),band=3)
# Write new raster data into GeoTIFF
writeRaster(rstEnsProj.avg,filename=paste(getwd(),"/",sp,"/proj_",projNames[i],"/proj_",projNames[i],"_",sp,"_ensemble_avg.tif",sep=""))
writeRaster(rstEnsProj.med,filename=paste(getwd(),"/",sp,"/proj_",projNames[i],"/proj_",projNames[i],"_",sp,"_ensemble_med.tif",sep=""))
writeRaster(rstEnsProj.wmn,filename=paste(getwd(),"/",sp,"/proj_",projNames[i],"/proj_",projNames[i],"_",sp,"_ensemble_wmn.tif",sep=""))
cat("\n\n")
}
}
| /R/MODEL_DEV/BIOMOD2_EnsembleAndProjection_v1.R | no_license | joaofgoncalves/HerpConn | R | false | false | 8,709 | r |
# ----------------------------------------------------------------------------------- #
# This script performs model ensembing, projection and forecasting
# ----------------------------------------------------------------------------------- #
rm(list=ls())
require(biomod2)
require(raster)
# Define the work directory for BIOMOD2
# !!! Change the working directory according to your system
# setwd("D:/MyDocs/Projects/Colab_ECivantos/data/_MOD_PREDS_v2")
# ----------------------------------------------------------------------------------- #
# Get the best modeling algorithms based on the median for all calibrated models
#
# Then, for each of the best algorithms extract the 2.5% top performing models based
# on ROC/AUC. The selected models in the ensModels list object will then be used for
# ensembling procedures
# ----------------------------------------------------------------------------------- #
# Species codes
#spCodes<-c("BUCAL","LACLE","PODHP","SALAM","CHALU","LACSH")
spCodes<-c("ANGFR","BUCAL","CHALU","LACSH")
# Modeling algorithms used
modAlgos<-c("GLM","GBM","GAM","CTA","FDA","RF","MAXENT")
# Nr of algorithms to retain
#nBest<-c(4,3,3,3)
#nBest<-rep(4,4)
#names(nBest)<-spCodes
nBest<-5
# List of best models retained
ensModels<-list()
## Extract the best modeling algorithms/runs
for(sp in spCodes){
setwd(paste("D:/MyDocs/Projects/Colab_ECivantos/data/_MOD_OBJECTS",sp,sep="/"))
# Load the biomod modeling object for each species
load(paste(sp,"_BIOMOD2_ModObject_v2.RData",sep=""))
modEvals<-get_evaluations(modObj)
ensModels[[sp]]<-c()
modEvalByAlgo<-c()
# Select the 5 best modeling algorithms
for(m in modAlgos)
modEvalByAlgo<-c(modEvalByAlgo,median(modEvals[3,1,m,,],na.rm=TRUE))
names(modEvalByAlgo)<-modAlgos
modEvalByAlgo<-sort(modEvalByAlgo,decreasing=TRUE)
print(sp)
print(modEvalByAlgo)
# Best 5 modeling algorithms
modAlgos1<-names(modEvalByAlgo)[1:nBest]
#
# Extract the best calibrated models for each of the 5 best modeling algorithms
# based on the 0.975 quantile
for(m in modAlgos1){
tmp<-modEvals[3,1,m,,]
quant<-quantile(tmp,probs=0.975,na.rm=TRUE)
for(i in 1:ncol(tmp)){
for(j in 1:nrow(tmp)){
# If the model performance is equal or above the 97.5% quantile then it's retained for ensembling
if(!is.na(tmp[j,i]) & (tmp[j,i] >= quant))
ensModels[[sp]]<-c(ensModels[[sp]],paste(sp,"_PA",i,"_RUN",j,"_",m,sep=""))
}
}
}
}
# ----------------------------------------------------------------------------------- #
# Perform ensemble modeling
# ----------------------------------------------------------------------------------- #
#spCodes<-c("BUCAL","LACLE","PODHP","SALAM","CHALU","LACSH")
spCodes<-c("ANGFR","BUCAL","CHALU","LACSH")
setwd("I:/temp/colab_EC")
for(sp in spCodes){
# Load the modeling object
load(paste("D:/MyDocs/Projects/Colab_ECivantos/data/_MOD_OBJECTS/",sp,"/",sp,"_BIOMOD2_ModObject_v2.RData",sep=""))
# Perform ensembling based on the previously selected models (in the ensModels list object)
# Uses mean, median and weighted-mean ensemble types
#
ensembleObj <- BIOMOD_EnsembleModeling( modeling.output = modObj,
chosen.models = ensModels[[sp]],
em.by = "all",
eval.metric = c("ROC","TSS"),
eval.metric.quality.threshold = NULL, # Discard models below 0.5
prob.mean = TRUE,
prob.cv = FALSE,
prob.ci = FALSE,
prob.ci.alpha = 0.05,
prob.median = TRUE,
committee.averaging = FALSE,
prob.mean.weight = TRUE,
prob.mean.weight.decay = "proportional")
# Save the ensemble object
save(ensembleObj,file=paste(sp,"_BIOMOD2_ensembleObj_v3_AllEvalStats.RData",sep=""))
# Calculate evaluation scores for the ensemble models
print(get_evaluations(ensembleObj))
}
# ----------------------------------------------------------------------------------- #
# Reads rasters contained in each input scenario folder and produces
# new raster stacks used for projection and ensemble forecasting
#
# Forecast results based on current conditions and cc-scenarios for 2050 (RCP 2.6 and 8.5)
# ----------------------------------------------------------------------------------- #
# !!! Change the path to the raster directories according to your system
# This function lists only GeoTIFF files in each folder
rstDataFileList<-list(
current=list.files("D:/MyDocs/Projects/Colab_ECivantos/data/SpatialData/_Present",pattern=".tif$",full=TRUE),
rcp26 = list.files("D:/MyDocs/Projects/Colab_ECivantos/data/SpatialData/2050_B2_RCP26",pattern=".tif$",full=TRUE),
rcp85 = list.files("D:/MyDocs/Projects/Colab_ECivantos/data/SpatialData/2050_A1_RCP85",pattern=".tif$",full=TRUE)
)
# Build a raster stack using the selected variables
selVars<-c(2,4,5,6,14,20,23,29,32,24)
# Indicate which rasters are factor/categorical variables
selVarFactor<-c(rep(FALSE,length(selVars)-1),TRUE)
##
j<-0
rstList<-list()
rstStacks<-list()
for(rstDataFiles in rstDataFileList){
j<-j+1
z<-0
for(i in selVars){
z<-z+1
if(selVarFactor[z]){
rstList[[z]]<-as.factor(raster(rstDataFiles[i]))
}
else{
# A workaround to convert 8-bit rasters into floats and avoid using them as factors
rstList[[z]]<-raster(rstDataFiles[i])
if(is.factor(rstList[[z]]))
rstList[[z]]<-rstList[[z]]*(1+1E-38)
}
}
# Create the raster stack for each projection type
rstStacks[[j]]<-stack(rstList)
}
## Reclassify values
## 5/8 (Abandoned lands to semi-natural vegetation)
# 2050 / RCP 2.6
#
rstData<-getValues(rstStacks[[2]])
ind<-rstData[,"CLC_LULC"]==5
ind[is.na(ind)]<-FALSE
rstData[ind,"CLC_LULC"]<-3
ind<-rstData[,"CLC_LULC"]==8
ind[is.na(ind)]<-FALSE
rstData[ind,"CLC_LULC"]<-3
rstStacks[[2]]<-setValues(rstStacks[[2]],rstData[,"CLC_LULC"],layer=10)
# 2050 / RCP 8.5
#
rstData<-getValues(rstStacks[[3]])
ind<-rstData[,"CLC_LULC"]==5
ind[is.na(ind)]<-FALSE
rstData[ind,"CLC_LULC"]<-3
ind<-rstData[,"CLC_LULC"]==8
ind[is.na(ind)]<-FALSE
rstData[ind,"CLC_LULC"]<-3
rstStacks[[3]]<-setValues(rstStacks[[3]],rstData[,"CLC_LULC"],layer=10)
# ----------------------------------------------------------------------------------- #
# Perform projection and ensemble forecasting for each scenario
# ----------------------------------------------------------------------------------- #
memory.limit(6000)
## Set the working directory to the base dir (with a folder per species)
setwd("I:/temp/colab_EC")
# Species codes and distances
#spCodes<-c("BUCAL","LACLE","PODHP","SALAM","CHALU","LACSH")
spCodes<-c("LACSH")
# Projection names
projNames<-c("Current","RCP26_2050","RCP85_2050")
for(sp in spCodes){
# Load the modeling object (modObject) and the ensemble object (ensembleObj)
# from the target species folders
load(paste(getwd(),"/",sp,"/",sp,"_BIOMOD2_ModObject_v2.RData",sep=""))
load(paste(getwd(),"/",sp,"/",sp,"_BIOMOD2_ensembleObj_v2_ROC.RData",sep=""))
for(i in 1:length(rstStacks)){
cat("Processing species:",sp,"| Scenario:",projNames[i],".......\n\n")
# Perform projection for each scenario using models kept by the ensembling
#
projectionObj <- BIOMOD_Projection( modeling.output = modObj,
new.env = rstStacks[[i]],
proj.name = projNames[i],
selected.models = get_kept_models(ensembleObj,model=1),
compress = TRUE,
build.clamping.mask = FALSE)
# Save the projection object
save(projectionObj,file=paste(sp,"_BIOMOD2_projectionObj_",projNames[i],"_v2.RData",sep=""))
# Perform ensemble forecasting
BIOMOD_EnsembleForecasting( projection.output = projectionObj,
EM.output = ensembleObj)
# Export the ensemble projections into GeoTIFF format
# Read raster data
rstEnsProj.avg<-raster(paste(getwd(),"/",sp,"/proj_",projNames[i],"/proj_",projNames[i],"_",sp,"_ensemble.grd",sep=""),band=1)
rstEnsProj.med<-raster(paste(getwd(),"/",sp,"/proj_",projNames[i],"/proj_",projNames[i],"_",sp,"_ensemble.grd",sep=""),band=2)
rstEnsProj.wmn<-raster(paste(getwd(),"/",sp,"/proj_",projNames[i],"/proj_",projNames[i],"_",sp,"_ensemble.grd",sep=""),band=3)
# Write new raster data into GeoTIFF
writeRaster(rstEnsProj.avg,filename=paste(getwd(),"/",sp,"/proj_",projNames[i],"/proj_",projNames[i],"_",sp,"_ensemble_avg.tif",sep=""))
writeRaster(rstEnsProj.med,filename=paste(getwd(),"/",sp,"/proj_",projNames[i],"/proj_",projNames[i],"_",sp,"_ensemble_med.tif",sep=""))
writeRaster(rstEnsProj.wmn,filename=paste(getwd(),"/",sp,"/proj_",projNames[i],"/proj_",projNames[i],"_",sp,"_ensemble_wmn.tif",sep=""))
cat("\n\n")
}
}
|
#' Fits a GAM with a threshold-formulation
#'
#' \code{thresh_gam} fits a Generalized Additive Model (GAM) with a threshold
#' formulation using the \code{by} argument in the smoothing function
#' \code{\link[mgcv]{s}}:
#' gam(IND ~ s(pressure1, by = threshold_variable_low) +
#' s(pressure 1, by = threshold threshold_variable_high)).
#' The threshold value is estimated from the data and chosen by minimizing
#' the GCV score (termed "gcvv" in the threshold-GAM object) over an interval
#' defined by the lower and upper quantiles (see the \code{a} and \code{b}
#' arguments respectively) of the threshold variable.
#'
#' @param model A single GAM object from the model tibble needed to extract
#' the family and the link function.
#' @param ind_vec A vector with the IND training observations (including or excluding
#' defined outliers).
#' @param press_vec A vector with the training observations (including or excluding
#' defined outliers) of pressure 1 (i.e. the original significant pressure in the
#' GAM(M)).
#' @param t_var A vector with the training observations (including or excluding
#' defined outliers) of the threshold variable (i.e. a second pressure variable).
#' @param name_t_var The name of the threshold variable (pressure 2). t_var will be
#' named after this string in the model formula.
#' @param k Choice of knots (for the smoothing function \code{\link{s}}); the
#' default is 4 to avoid over-parameterization.
#' @param a The lower quantile value of the selected threshold variable, which
#' the estimated threshold is not allowed to exceed; the default is 0.2.
#' @param b The upper quantile value of the selected threshold variable, which
#' the estimated threshold is not allowed to exceed; the default is 0.8.
#'
#' @details
#' \code{thresh_gam} creates first a sequence of evenly spaced threshold values
#' within the boundaries set by the lower and upper quantiles (defined by a and b).
#' For each threshold value that leads to a new splitting of the threshold
#' variables a threshold-GAM is applied: one smoothing function is applied
#' to only those observations where the threshold variable has been below the threshold
#' value for the given time step (year). A second smoothing function is applied to
#' observations where the threshold variable is above the prior defined threshold.
#' From the list of computed models the threshold-GAM with the lowest Generalized
#' Cross Validation (GCV) and its threshold value are selected and returned. For more
#' infos on threshold-GAMs see also the details section in \code{\link{test_interaction}}.
#'
#' @return
#' The function returns a \code{gam} object with the additional class \code{tgam}.
#' All method functions for \code{gam} can be applied to this function. The object
#' has four additional elements:
#' \describe{
#' \item{\code{mr}}{The threshold value of the best threshold-GAM.}
#' \item{\code{mgcv}}{The GCV of the best threshold-GAM.}
#' \item{\code{gcvv}}{A vector of the GCV values of all fitted threshold-GAMs.}
#' \item{\code{t_val}}{A vector of all tested threshold values within the
#' boundaries set by the lower and upper quantiles.}
#' \item{\code{train_na}}{A logical vector indicating missing values.}
#' }
#'
#' @seealso \code{\link{test_interaction}} and \code{\link{loocv_thresh_gam}}
#' which apply the function
#'
#' @keywords internal
#' @export
#'
#' @examples
#' # Using some models of the Baltic Sea demo data in this package
#' test <- thresh_gam(model = model_gam_ex$model[[1]],
#' ind_vec = ind_init_ex$ind_train[[1]],
#' press_vec = ind_init_ex$press_train[[1]],
#' t_var = ind_init_ex$press_train[[2]],
#' name_t_var = "Ssum", k = 4, a = 0.2, b = 0.8)
thresh_gam <- function(model, ind_vec, press_vec, t_var,
name_t_var, k, a, b) {
nthd <- length(press_vec)
lower <- stats::quantile(t_var, prob = a, na.rm = TRUE)
upper <- stats::quantile(t_var, prob = b, na.rm = TRUE)
t_val <- seq(from = lower, to = upper, by = (upper -
lower)/nthd)
# family and link
family <- mgcv::summary.gam(model)$family[[1]]
if (stringr::str_detect(family, "Negative Binomial")) {
family <- "nb"
}
link <- mgcv::summary.gam(model)$family[[2]]
thresh_gams <- compare_thresholds(t_val, t_var)
# create input for the model
dat <- data.frame(ind = ind_vec, press = press_vec,
t_var = t_var)
names(dat) <- c(all.vars(model$formula), name_t_var)
thresh_gams$model <- vector(length = nrow(thresh_gams),
mode = "list")
for (i in 1:nrow(thresh_gams)) {
if (thresh_gams$change[i]) {
# create the model formula: get ind, press, t_var,
# level t_var
formula <- paste0(names(dat)[1], " ~ 1 + s(",
names(dat)[2], ", by = I(1 * (", names(dat)[3],
" <= ", round(thresh_gams$t_val[i],
digits = 3), ")), k = ", k, ") + s(",
names(dat)[2], ", by = I(1 * (", names(dat)[3],
" > ", round(thresh_gams$t_val[i],
digits = 3), ")), k = ", k, ")")
# create the model
mod <- mgcv::gam(formula = stats::as.formula(formula),
na.action = "na.omit", family = paste0(family,
"(link = ", link, ")"), nthd = nthd,
a = a, b = b, data = dat)
mod$original_data <- dat
mod$mr <- thresh_gams$t_val[i]
thresh_gams$model[[i]] <- mod
}
}
# Extract gcvv from models and add gcvv for each
# model not generated
gcvv <- vector(mode = "numeric", length = nrow(thresh_gams))
gcvv[thresh_gams$change] <- purrr::map_dbl(thresh_gams$model[thresh_gams$change],
~.$gcv.ubre)
for (i in 1:length(gcvv)) {
if (gcvv[i] == 0) {
gcvv[i] <- gcvv[i - 1]
}
}
# find best model
thresh_gams <- thresh_gams[thresh_gams$change, ]
thresh_gams$gcvv <- purrr::map_dbl(thresh_gams$model,
~.$gcv.ubre)
# Extract model with the lowest gcvv score overall.
# In case of identical values, select
# chronologically. We can easily debug this code!
best_model_id <- min(which(thresh_gams$gcvv ==
min(thresh_gams$gcvv)))
# create output
if (length(best_model_id) == 1) {
res <- thresh_gams$model[[best_model_id]]
res$mgcv <- thresh_gams$gcvv[best_model_id]
res$gcvv <- gcvv[order(t_val)]
res$t_val <- sort(t_val)
class(res) <- c("thresh_gam", "gam", "glm",
"lm")
return(res)
} else {
stop("No thresh_gam available!")
}
}
| /R/thresh_gam.R | no_license | saskiaotto/INDperform | R | false | false | 6,379 | r | #' Fits a GAM with a threshold-formulation
#'
#' \code{thresh_gam} fits a Generalized Additive Model (GAM) with a threshold
#' formulation using the \code{by} argument in the smoothing function
#' \code{\link[mgcv]{s}}:
#' gam(IND ~ s(pressure1, by = threshold_variable_low) +
#' s(pressure 1, by = threshold threshold_variable_high)).
#' The threshold value is estimated from the data and chosen by minimizing
#' the GCV score (termed "gcvv" in the threshold-GAM object) over an interval
#' defined by the lower and upper quantiles (see the \code{a} and \code{b}
#' arguments respectively) of the threshold variable.
#'
#' @param model A single GAM object from the model tibble needed to extract
#' the family and the link function.
#' @param ind_vec A vector with the IND training observations (including or excluding
#' defined outliers).
#' @param press_vec A vector with the training observations (including or excluding
#' defined outliers) of pressure 1 (i.e. the original significant pressure in the
#' GAM(M)).
#' @param t_var A vector with the training observations (including or excluding
#' defined outliers) of the threshold variable (i.e. a second pressure variable).
#' @param name_t_var The name of the threshold variable (pressure 2). t_var will be
#' named after this string in the model formula.
#' @param k Choice of knots (for the smoothing function \code{\link{s}}); the
#' default is 4 to avoid over-parameterization.
#' @param a The lower quantile value of the selected threshold variable, which
#' the estimated threshold is not allowed to exceed; the default is 0.2.
#' @param b The upper quantile value of the selected threshold variable, which
#' the estimated threshold is not allowed to exceed; the default is 0.8.
#'
#' @details
#' \code{thresh_gam} creates first a sequence of evenly spaced threshold values
#' within the boundaries set by the lower and upper quantiles (defined by a and b).
#' For each threshold value that leads to a new splitting of the threshold
#' variables a threshold-GAM is applied: one smoothing function is applied
#' to only those observations where the threshold variable has been below the threshold
#' value for the given time step (year). A second smoothing function is applied to
#' observations where the threshold variable is above the prior defined threshold.
#' From the list of computed models the threshold-GAM with the lowest Generalized
#' Cross Validation (GCV) and its threshold value are selected and returned. For more
#' infos on threshold-GAMs see also the details section in \code{\link{test_interaction}}.
#'
#' @return
#' The function returns a \code{gam} object with the additional class \code{tgam}.
#' All method functions for \code{gam} can be applied to this function. The object
#' has four additional elements:
#' \describe{
#' \item{\code{mr}}{The threshold value of the best threshold-GAM.}
#' \item{\code{mgcv}}{The GCV of the best threshold-GAM.}
#' \item{\code{gcvv}}{A vector of the GCV values of all fitted threshold-GAMs.}
#' \item{\code{t_val}}{A vector of all tested threshold values within the
#' boundaries set by the lower and upper quantiles.}
#' \item{\code{train_na}}{A logical vector indicating missing values.}
#' }
#'
#' @seealso \code{\link{test_interaction}} and \code{\link{loocv_thresh_gam}}
#' which apply the function
#'
#' @keywords internal
#' @export
#'
#' @examples
#' # Using some models of the Baltic Sea demo data in this package
#' test <- thresh_gam(model = model_gam_ex$model[[1]],
#' ind_vec = ind_init_ex$ind_train[[1]],
#' press_vec = ind_init_ex$press_train[[1]],
#' t_var = ind_init_ex$press_train[[2]],
#' name_t_var = "Ssum", k = 4, a = 0.2, b = 0.8)
thresh_gam <- function(model, ind_vec, press_vec, t_var,
name_t_var, k, a, b) {
nthd <- length(press_vec)
lower <- stats::quantile(t_var, prob = a, na.rm = TRUE)
upper <- stats::quantile(t_var, prob = b, na.rm = TRUE)
t_val <- seq(from = lower, to = upper, by = (upper -
lower)/nthd)
# family and link
family <- mgcv::summary.gam(model)$family[[1]]
if (stringr::str_detect(family, "Negative Binomial")) {
family <- "nb"
}
link <- mgcv::summary.gam(model)$family[[2]]
thresh_gams <- compare_thresholds(t_val, t_var)
# create input for the model
dat <- data.frame(ind = ind_vec, press = press_vec,
t_var = t_var)
names(dat) <- c(all.vars(model$formula), name_t_var)
thresh_gams$model <- vector(length = nrow(thresh_gams),
mode = "list")
for (i in 1:nrow(thresh_gams)) {
if (thresh_gams$change[i]) {
# create the model formula: get ind, press, t_var,
# level t_var
formula <- paste0(names(dat)[1], " ~ 1 + s(",
names(dat)[2], ", by = I(1 * (", names(dat)[3],
" <= ", round(thresh_gams$t_val[i],
digits = 3), ")), k = ", k, ") + s(",
names(dat)[2], ", by = I(1 * (", names(dat)[3],
" > ", round(thresh_gams$t_val[i],
digits = 3), ")), k = ", k, ")")
# create the model
mod <- mgcv::gam(formula = stats::as.formula(formula),
na.action = "na.omit", family = paste0(family,
"(link = ", link, ")"), nthd = nthd,
a = a, b = b, data = dat)
mod$original_data <- dat
mod$mr <- thresh_gams$t_val[i]
thresh_gams$model[[i]] <- mod
}
}
# Extract gcvv from models and add gcvv for each
# model not generated
gcvv <- vector(mode = "numeric", length = nrow(thresh_gams))
gcvv[thresh_gams$change] <- purrr::map_dbl(thresh_gams$model[thresh_gams$change],
~.$gcv.ubre)
for (i in 1:length(gcvv)) {
if (gcvv[i] == 0) {
gcvv[i] <- gcvv[i - 1]
}
}
# find best model
thresh_gams <- thresh_gams[thresh_gams$change, ]
thresh_gams$gcvv <- purrr::map_dbl(thresh_gams$model,
~.$gcv.ubre)
# Extract model with the lowest gcvv score overall.
# In case of identical values, select
# chronologically. We can easily debug this code!
best_model_id <- min(which(thresh_gams$gcvv ==
min(thresh_gams$gcvv)))
# create output
if (length(best_model_id) == 1) {
res <- thresh_gams$model[[best_model_id]]
res$mgcv <- thresh_gams$gcvv[best_model_id]
res$gcvv <- gcvv[order(t_val)]
res$t_val <- sort(t_val)
class(res) <- c("thresh_gam", "gam", "glm",
"lm")
return(res)
} else {
stop("No thresh_gam available!")
}
}
|
/2020-03-03 nhl goals.R | no_license | easmala/TidyTuesday | R | false | false | 5,319 | r | ||
## The function returns the inverted matrix, but since the process is computationally
## intensive, it saves the inverted matrix in cache to facilitate its further retrieval. There are
## two parts to the function - the first creates a matrix object via super assignement
## operator that works across environments. The second part computes the inverse, but
## first checks whether it was computed already and, if yes, simply returns it from cache.
makeCacheMatrix <- function(x = matrix()) {
m<-NULL ##assign the inverse matrix to NULL
set<-function(y){
x<<-y
m<<-NULL
} ## super-assign the initial matrix to variable
get<-function()
x
setmatrix<-function(mat)
m<<-mat
getmatrix<-function()
m
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## cacheSolve computes the inverse of the matrix created in the above function, taking it
## as an arugment. It first checks whether it was computed and, if yes, retruns it from cache.
## If not, it computes the inverse and stores it in cache.
cacheSolve <- function(x, ...) {
m<-x$getmatrix() ## assign the value returned from the above function
if(!is.null(m)){
message("getting cashed matrix")
return(m)
} ## check whether the cached matrix is not NULL
data<-x$get() ## get data from the above
m<-solve(data,...) ## inverse it
x$setmatrix(m)
m
## Return a matrix that is the inverse of 'x'
} | /cachematrix.R | no_license | DSstudent11/ProgrammingAssignment2 | R | false | false | 1,563 | r |
## The function returns the inverted matrix, but since the process is computationally
## intensive, it saves the inverted matrix in cache to facilitate its further retrieval. There are
## two parts to the function - the first creates a matrix object via super assignement
## operator that works across environments. The second part computes the inverse, but
## first checks whether it was computed already and, if yes, simply returns it from cache.
makeCacheMatrix <- function(x = matrix()) {
m<-NULL ##assign the inverse matrix to NULL
set<-function(y){
x<<-y
m<<-NULL
} ## super-assign the initial matrix to variable
get<-function()
x
setmatrix<-function(mat)
m<<-mat
getmatrix<-function()
m
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## cacheSolve computes the inverse of the matrix created in the above function, taking it
## as an arugment. It first checks whether it was computed and, if yes, retruns it from cache.
## If not, it computes the inverse and stores it in cache.
cacheSolve <- function(x, ...) {
m<-x$getmatrix() ## assign the value returned from the above function
if(!is.null(m)){
message("getting cashed matrix")
return(m)
} ## check whether the cached matrix is not NULL
data<-x$get() ## get data from the above
m<-solve(data,...) ## inverse it
x$setmatrix(m)
m
## Return a matrix that is the inverse of 'x'
} |
library(recipes)
### Name: summary.recipe
### Title: Summarize a Recipe
### Aliases: summary.recipe
### ** Examples
rec <- recipe( ~ ., data = USArrests)
summary(rec)
rec <- step_pca(rec, all_numeric(), num = 3)
summary(rec) # still the same since not yet trained
rec <- prep(rec, training = USArrests)
summary(rec)
| /data/genthat_extracted_code/recipes/examples/summary.recipe.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 323 | r | library(recipes)
### Name: summary.recipe
### Title: Summarize a Recipe
### Aliases: summary.recipe
### ** Examples
rec <- recipe( ~ ., data = USArrests)
summary(rec)
rec <- step_pca(rec, all_numeric(), num = 3)
summary(rec) # still the same since not yet trained
rec <- prep(rec, training = USArrests)
summary(rec)
|
# The location for the SNPs which will be used for fine mapping(the small region and the big region)
#
# copyright (c) 2014-2020 - Shijie Lyu
# last modified Dec.4, 2014
# first written Dec.4, 2014
setwd("C:/Data/600KSNPchip")
bigregionSNPs <- read.table("Analysis/finemappingSNPswithannotation.txt", sep = "\t", header=TRUE) # load the SNPs for fine mapping which are in the big region(3855 SNPs)
smallregionstart <- 69583407
smallregionend <- 78715886
smallregionind <- which(as.numeric(as.character(bigregionSNPs[,"Physical.Position"])) > smallregionstart & # Larger than the start
as.numeric(as.character(bigregionSNPs[,"Physical.Position"])) < smallregionend) # Smaller then the end
smallregionSNPs <- bigregionSNPs[smallregionind, ] # SNPs in the small region(1246 SNPs)
write.table(smallregionSNPs, "Analysis/finemappingSNPinsmallregion.txt", sep="\t") | /R/SNPsforfinemappingsmallregion.R | no_license | ShijieLyu/PhDProject | R | false | false | 924 | r | # The location for the SNPs which will be used for fine mapping(the small region and the big region)
#
# copyright (c) 2014-2020 - Shijie Lyu
# last modified Dec.4, 2014
# first written Dec.4, 2014
setwd("C:/Data/600KSNPchip")
bigregionSNPs <- read.table("Analysis/finemappingSNPswithannotation.txt", sep = "\t", header=TRUE) # load the SNPs for fine mapping which are in the big region(3855 SNPs)
smallregionstart <- 69583407
smallregionend <- 78715886
smallregionind <- which(as.numeric(as.character(bigregionSNPs[,"Physical.Position"])) > smallregionstart & # Larger than the start
as.numeric(as.character(bigregionSNPs[,"Physical.Position"])) < smallregionend) # Smaller then the end
smallregionSNPs <- bigregionSNPs[smallregionind, ] # SNPs in the small region(1246 SNPs)
write.table(smallregionSNPs, "Analysis/finemappingSNPinsmallregion.txt", sep="\t") |
####################################################
###### Cumulant generating function ##
####################################################
h_vol<-function(para_h,h,ret,rt){
# para_h<-c() set up the parameters of the model
a0=para_h[1]; b1=para_h[2]; a1=para_h[3]; gama= para_h[4]; lambda= para_h[5]; a=para_h[6]; b=para_h[7] ; ro=para_h[8] ## ; c=para_h[5]; d=para_h[6]
## Mean 0 and Variance 1
c0=a^2 - b^2
c=((sqrt(a^2 - b^2))^(3/2))/a
d=(-b/a)*(sqrt(a^2 - b^2))^(1/2)
# Parameter under the physical probability
h0=a0/(1- (b1+a1*(1+gama^2)))
b0=abs(b)
g0=(b1+a1*(1+gama^2))
drapeau=0
if (a0<=0){drapeau=1}
if (b1<=0){drapeau=1}
if (a1<=0){drapeau=1}
if (b0<=0){drapeau=1}
if (c<=0){drapeau=1}
if (c0<=0){drapeau=1}
if (a<=0){drapeau=1}
if (ro<=0){drapeau=1}
if (ro>=1){drapeau=1}
if (gama<=0){drapeau=1}
if (lambda<=0){drapeau=1}
if (g0<=0.7){drapeau=1}
if (g0>=9.997){drapeau=1}
if (is.na(b0)==TRUE){drapeau=1}else{
if (b0<=0){drapeau=1}
if (b0>=a){drapeau=1}
if (b0==Inf){drapeau=1}
if (1/b0==Inf){drapeau=1}
}
if (is.na(h0)==TRUE){drapeau=1}else{
if (h0<=0){drapeau=1}
if (abs(h0)==Inf){drapeau=1}
if (1/abs(h0)==Inf){drapeau=1}
}
if (is.na(c)==TRUE){drapeau=1}else{
if (c<=0){drapeau=1}
if (abs(c)==Inf){drapeau=1}
if (1/abs(c)==Inf){drapeau=1}
}
if (is.na(h)==TRUE){drapeau=1}else{
if (h<=0){drapeau=1}
if (abs(h)==Inf){drapeau=1}
if (1/abs(h)==Inf){drapeau=1}
}
if (drapeau==0){
resultat= a0 +b1*h+a1*h*(((ret-rt+K_eps(sqrt(h),a,b,c,d))/(sqrt(h)))-lambda-gama)^2
}else{
resultat=NA
}
return(resultat)
}
##############################################################
###### Conditional variance with risk netral Proba ##
##############################################################
h<-function(para_h,Data.returns){
rt=Data.returns$rt/250 #### Interest rate Data : Data.BSJ$rt
ret=Data.returns$ret #### Returns : Data.BSJ$ret
Z1=length(ret)
# para_h<-c() set up the parameters of the model
a0=para_h[1]; b1=para_h[2]; a1=para_h[3]; gama= para_h[4]; lambda= para_h[5]; a=para_h[6]; b=para_h[7] ; ro=para_h[8] ## ; c=para_h[5]; d=para_h[6]
## Mean 0 and Variance 1
c0=a^2 - b^2
c=((sqrt(a^2 - b^2))^(3/2))/a
d=(-b/a)*(sqrt(a^2 - b^2))^(1/2)
# Parameter under the physical probability
h0=a0/(1- (b1+a1*(1+gama^2)))
b0=abs(b)
g0=(b1+a1*(1+gama^2))
h_star = c() #### A vector containing h from the model,
h_star[1]=a0/(1- (b1+a1*(1+gama^2))) #### The first value for h,
for (i in 2:Z1){
h_star[i]=h_vol(para_h,h_star[i-1],ret[i-1],rt[i-1])
# a0 +b1*h_star[i-1]+a1*h_star[i-1]*(((ret[i-1]-rt[i-1]- lambda*sqrt(h_star[i-1]))/(sqrt(h_star[i-1])))-lambda-gama)^2
}
drapeau=0
if (a0<=0){drapeau=1}
if (b1<=0){drapeau=1}
if (a1<=0){drapeau=1}
if (b0<=0){drapeau=1}
if (c<=0){drapeau=1}
if (c0<=0){drapeau=1}
if (a<=0){drapeau=1}
if (ro<=0){drapeau=1}
if (ro>=1){drapeau=1}
if (gama<=0){drapeau=1}
if (lambda<=0){drapeau=1}
if (g0<=0.7){drapeau=1}
if (g0>=9.997){drapeau=1}
if (is.na(b0)==TRUE){drapeau=1}else{
if (b0<=0){drapeau=1}
if (b0>=a){drapeau=1}
if (b0==Inf){drapeau=1}
if (1/b0==Inf){drapeau=1}
}
if (is.na(h0)==TRUE){drapeau=1}else{
if (h0<=0){drapeau=1}
if (abs(h0)==Inf){drapeau=1}
if (1/abs(h0)==Inf){drapeau=1}
}
if (is.na(c)==TRUE){drapeau=1}else{
if (c<=0){drapeau=1}
if (abs(c)==Inf){drapeau=1}
if (1/abs(c)==Inf){drapeau=1}
}
if (drapeau==0){
resultat=h_star
}else{
resultat=rep(NA, Z1)
}
return(resultat)
}
######################
###### VIX ##
######################
VIX_Q<-function(para_h,h,Ret,r){
tau = 250
T_0=22
# para_h<-c() set up the parameters of the model
a0=para_h[1]; b1=para_h[2]; a1=para_h[3]; gama= para_h[4]; lambda= para_h[5]; a=para_h[6]; b=para_h[7] ; ro=para_h[8]## ; c=para_h[5]; d=para_h[6]
## Mean 0 and Variance 1
c0=a^2 - b^2
c=((sqrt(a^2 - b^2))^(3/2))/a
d=(-b/a)*(sqrt(a^2 - b^2))^(1/2)
# Parameter under the physical probability
h0=a0/(1- (b1+a1*(1+gama^2)))
b0=abs(b)
drapeau=0
if (a0<=0){drapeau=1}
if (b1<=0){drapeau=1}
if (a1<=0){drapeau=1}
if (b0<=0){drapeau=1}
if (c<=0){drapeau=1}
if (c0<=0){drapeau=1}
if (a<=0){drapeau=1}
if (ro<=0){drapeau=1}
if (ro>=1){drapeau=1}
if (gama<=0){drapeau=1}
if (is.na(b0)==TRUE){drapeau=1}else{
if (b0<=0){drapeau=1}
if (b0>=a){drapeau=1}
if (b0==Inf){drapeau=1}
if (1/b0==Inf){drapeau=1}
}
if (is.na(h0)==TRUE){drapeau=1}else{
if (h0<=0){drapeau=1}
if (abs(h0)==Inf){drapeau=1}
if (1/abs(h0)==Inf){drapeau=1}
}
if (is.na(c)==TRUE){drapeau=1}else{
if (c<=0){drapeau=1}
if (abs(c)==Inf){drapeau=1}
if (1/abs(c)==Inf){drapeau=1}
}
Psy = b1+a1*(1+(lambda+gama)^2)
# VIX
if (drapeau==0){
resultat= 100*sqrt(tau/T_0)*sqrt(a0*((22/(1-Psy))- ((1-(Psy^T_0))/(1-Psy)^2))+ h*((1-(Psy^T_0))/(1-Psy)))
}else{
resultat=NA
}
return(resultat)
}
###########################################################
##### The Log-likeelihood over all Option ####
###########################################################
NGARCH_likelihood_vix <- function(para_h,Data.returns) {
Vix=Data.returns$VIX
ret =Data.returns$ret #### Returns : Data.BSJ$ret
rt=Data.returns$rt/250
# para_h<-c() set up the parameters of the model
a0=para_h[1]; b1=para_h[2]; a1=para_h[3]; gama= para_h[4]; lambda= para_h[5]; a=para_h[6]; b=para_h[7]; ro=para_h[8] ## ; c=para_h[5]; d=para_h[6]
## Mean 0 and Variance 1
c0=a^2 - b^2
c=((sqrt(a^2 - b^2))^(3/2))/a
d=(-b/a)*(sqrt(a^2 - b^2))^(1/2)
# Parameter under the physical probability
h0=a0/(1- (b1+a1*(1+gama^2)))
b0=abs(b)
VIX_Market<-Vix
Nvix=length(Vix)
h = h(para_h,Data.returns)
VIX_Model <- rep(NA, Nvix)
for (i in 1:Nvix){
VIX_Model[i]= VIX_Q(para_h,h[i+1],ret[i+1],rt[i+1])
}
error <- rep(NA, Nvix)
error[Nvix]=0
for (i in 1:Nvix-1){
error[i]= VIX_Market[i] - VIX_Model[i]
}
error_2 <- rep(NA, Nvix)
error_2[1]=0
for (i in 2:Nvix){
error_2[i]= ((error[i]-ro*error[i-1])^2)/(1-ro^2)
}
sigma=mean(error^2)
log_like=-1/2*sum(log(sigma)+((error^2)/sigma))-(Nvix/2)*(log(2*pi)+log(sigma*(1-(ro^2))))+ (1/2)*(log(sigma*(1-(ro^2)))-log(sigma))-(1/(2*sigma))*(error[i]^2+sum(error_2))
return(log_like)
}
| /estimationJob/NGARCH/N_Ess_ret_VIX/Loglik_VIX_NGARCH.R | no_license | Fanirisoa/dynamic_pricing | R | false | false | 6,981 | r | ####################################################
###### Cumulant generating function ##
####################################################
h_vol<-function(para_h,h,ret,rt){
# para_h<-c() set up the parameters of the model
a0=para_h[1]; b1=para_h[2]; a1=para_h[3]; gama= para_h[4]; lambda= para_h[5]; a=para_h[6]; b=para_h[7] ; ro=para_h[8] ## ; c=para_h[5]; d=para_h[6]
## Mean 0 and Variance 1
c0=a^2 - b^2
c=((sqrt(a^2 - b^2))^(3/2))/a
d=(-b/a)*(sqrt(a^2 - b^2))^(1/2)
# Parameter under the physical probability
h0=a0/(1- (b1+a1*(1+gama^2)))
b0=abs(b)
g0=(b1+a1*(1+gama^2))
drapeau=0
if (a0<=0){drapeau=1}
if (b1<=0){drapeau=1}
if (a1<=0){drapeau=1}
if (b0<=0){drapeau=1}
if (c<=0){drapeau=1}
if (c0<=0){drapeau=1}
if (a<=0){drapeau=1}
if (ro<=0){drapeau=1}
if (ro>=1){drapeau=1}
if (gama<=0){drapeau=1}
if (lambda<=0){drapeau=1}
if (g0<=0.7){drapeau=1}
if (g0>=9.997){drapeau=1}
if (is.na(b0)==TRUE){drapeau=1}else{
if (b0<=0){drapeau=1}
if (b0>=a){drapeau=1}
if (b0==Inf){drapeau=1}
if (1/b0==Inf){drapeau=1}
}
if (is.na(h0)==TRUE){drapeau=1}else{
if (h0<=0){drapeau=1}
if (abs(h0)==Inf){drapeau=1}
if (1/abs(h0)==Inf){drapeau=1}
}
if (is.na(c)==TRUE){drapeau=1}else{
if (c<=0){drapeau=1}
if (abs(c)==Inf){drapeau=1}
if (1/abs(c)==Inf){drapeau=1}
}
if (is.na(h)==TRUE){drapeau=1}else{
if (h<=0){drapeau=1}
if (abs(h)==Inf){drapeau=1}
if (1/abs(h)==Inf){drapeau=1}
}
if (drapeau==0){
resultat= a0 +b1*h+a1*h*(((ret-rt+K_eps(sqrt(h),a,b,c,d))/(sqrt(h)))-lambda-gama)^2
}else{
resultat=NA
}
return(resultat)
}
##############################################################
###### Conditional variance with risk netral Proba ##
##############################################################
h<-function(para_h,Data.returns){
rt=Data.returns$rt/250 #### Interest rate Data : Data.BSJ$rt
ret=Data.returns$ret #### Returns : Data.BSJ$ret
Z1=length(ret)
# para_h<-c() set up the parameters of the model
a0=para_h[1]; b1=para_h[2]; a1=para_h[3]; gama= para_h[4]; lambda= para_h[5]; a=para_h[6]; b=para_h[7] ; ro=para_h[8] ## ; c=para_h[5]; d=para_h[6]
## Mean 0 and Variance 1
c0=a^2 - b^2
c=((sqrt(a^2 - b^2))^(3/2))/a
d=(-b/a)*(sqrt(a^2 - b^2))^(1/2)
# Parameter under the physical probability
h0=a0/(1- (b1+a1*(1+gama^2)))
b0=abs(b)
g0=(b1+a1*(1+gama^2))
h_star = c() #### A vector containing h from the model,
h_star[1]=a0/(1- (b1+a1*(1+gama^2))) #### The first value for h,
for (i in 2:Z1){
h_star[i]=h_vol(para_h,h_star[i-1],ret[i-1],rt[i-1])
# a0 +b1*h_star[i-1]+a1*h_star[i-1]*(((ret[i-1]-rt[i-1]- lambda*sqrt(h_star[i-1]))/(sqrt(h_star[i-1])))-lambda-gama)^2
}
drapeau=0
if (a0<=0){drapeau=1}
if (b1<=0){drapeau=1}
if (a1<=0){drapeau=1}
if (b0<=0){drapeau=1}
if (c<=0){drapeau=1}
if (c0<=0){drapeau=1}
if (a<=0){drapeau=1}
if (ro<=0){drapeau=1}
if (ro>=1){drapeau=1}
if (gama<=0){drapeau=1}
if (lambda<=0){drapeau=1}
if (g0<=0.7){drapeau=1}
if (g0>=9.997){drapeau=1}
if (is.na(b0)==TRUE){drapeau=1}else{
if (b0<=0){drapeau=1}
if (b0>=a){drapeau=1}
if (b0==Inf){drapeau=1}
if (1/b0==Inf){drapeau=1}
}
if (is.na(h0)==TRUE){drapeau=1}else{
if (h0<=0){drapeau=1}
if (abs(h0)==Inf){drapeau=1}
if (1/abs(h0)==Inf){drapeau=1}
}
if (is.na(c)==TRUE){drapeau=1}else{
if (c<=0){drapeau=1}
if (abs(c)==Inf){drapeau=1}
if (1/abs(c)==Inf){drapeau=1}
}
if (drapeau==0){
resultat=h_star
}else{
resultat=rep(NA, Z1)
}
return(resultat)
}
######################
###### VIX ##
######################
VIX_Q<-function(para_h,h,Ret,r){
tau = 250
T_0=22
# para_h<-c() set up the parameters of the model
a0=para_h[1]; b1=para_h[2]; a1=para_h[3]; gama= para_h[4]; lambda= para_h[5]; a=para_h[6]; b=para_h[7] ; ro=para_h[8]## ; c=para_h[5]; d=para_h[6]
## Mean 0 and Variance 1
c0=a^2 - b^2
c=((sqrt(a^2 - b^2))^(3/2))/a
d=(-b/a)*(sqrt(a^2 - b^2))^(1/2)
# Parameter under the physical probability
h0=a0/(1- (b1+a1*(1+gama^2)))
b0=abs(b)
drapeau=0
if (a0<=0){drapeau=1}
if (b1<=0){drapeau=1}
if (a1<=0){drapeau=1}
if (b0<=0){drapeau=1}
if (c<=0){drapeau=1}
if (c0<=0){drapeau=1}
if (a<=0){drapeau=1}
if (ro<=0){drapeau=1}
if (ro>=1){drapeau=1}
if (gama<=0){drapeau=1}
if (is.na(b0)==TRUE){drapeau=1}else{
if (b0<=0){drapeau=1}
if (b0>=a){drapeau=1}
if (b0==Inf){drapeau=1}
if (1/b0==Inf){drapeau=1}
}
if (is.na(h0)==TRUE){drapeau=1}else{
if (h0<=0){drapeau=1}
if (abs(h0)==Inf){drapeau=1}
if (1/abs(h0)==Inf){drapeau=1}
}
if (is.na(c)==TRUE){drapeau=1}else{
if (c<=0){drapeau=1}
if (abs(c)==Inf){drapeau=1}
if (1/abs(c)==Inf){drapeau=1}
}
Psy = b1+a1*(1+(lambda+gama)^2)
# VIX
if (drapeau==0){
resultat= 100*sqrt(tau/T_0)*sqrt(a0*((22/(1-Psy))- ((1-(Psy^T_0))/(1-Psy)^2))+ h*((1-(Psy^T_0))/(1-Psy)))
}else{
resultat=NA
}
return(resultat)
}
###########################################################
##### The Log-likeelihood over all Option ####
###########################################################
NGARCH_likelihood_vix <- function(para_h,Data.returns) {
Vix=Data.returns$VIX
ret =Data.returns$ret #### Returns : Data.BSJ$ret
rt=Data.returns$rt/250
# para_h<-c() set up the parameters of the model
a0=para_h[1]; b1=para_h[2]; a1=para_h[3]; gama= para_h[4]; lambda= para_h[5]; a=para_h[6]; b=para_h[7]; ro=para_h[8] ## ; c=para_h[5]; d=para_h[6]
## Mean 0 and Variance 1
c0=a^2 - b^2
c=((sqrt(a^2 - b^2))^(3/2))/a
d=(-b/a)*(sqrt(a^2 - b^2))^(1/2)
# Parameter under the physical probability
h0=a0/(1- (b1+a1*(1+gama^2)))
b0=abs(b)
VIX_Market<-Vix
Nvix=length(Vix)
h = h(para_h,Data.returns)
VIX_Model <- rep(NA, Nvix)
for (i in 1:Nvix){
VIX_Model[i]= VIX_Q(para_h,h[i+1],ret[i+1],rt[i+1])
}
error <- rep(NA, Nvix)
error[Nvix]=0
for (i in 1:Nvix-1){
error[i]= VIX_Market[i] - VIX_Model[i]
}
error_2 <- rep(NA, Nvix)
error_2[1]=0
for (i in 2:Nvix){
error_2[i]= ((error[i]-ro*error[i-1])^2)/(1-ro^2)
}
sigma=mean(error^2)
log_like=-1/2*sum(log(sigma)+((error^2)/sigma))-(Nvix/2)*(log(2*pi)+log(sigma*(1-(ro^2))))+ (1/2)*(log(sigma*(1-(ro^2)))-log(sigma))-(1/(2*sigma))*(error[i]^2+sum(error_2))
return(log_like)
}
|
rm(list=ls())
require(splines)
# install.packages("splines")
# #Set values for State ANSI
# IA <- 19;IL <- 17;IN <- 18;MN <- 27;OH <- 39;WI <- 55;MO <- 29;NB <- 31;KS <- 20;SD <- 46
# #select 10 states region
# ten_states <- c(IA, IL, IN, MN, OH, WI, MO, NB, KS, SD)
# # setwd("../Albert_Crop")
# climate.EVI <- read.csv("../Albert_Crop/climate.EVImonthly.csv")
# #set mydata as a subset of the dataset which has percent harvest > 2%
# mydata = climate.EVI[which(climate.EVI$PercentHarvest > 0.02),]
# #set FIPS and State ANSI as factors
# mydata$FIPS = as.factor(mydata$FIPS); mydata$State.ANSI = as.factor(mydata$State.ANSI)
# #rainfed subset, select only counties that have Rainfed Area > 10000 (or whatever number you want)
# rainfed.subset <- mydata[which(mydata$Area.rain > 10000), ]
# climate.EVI <- read.csv("../Albert_Crop/climate.EVImonthly.csv")
IA <- 19;IL <- 17;IN <- 18;MN <- 27;OH <- 39;WI <- 55;MO <- 29;NB <- 31;KS <- 20;SD <- 46
three_states = c(IL,IN,IA)
seven_states <- c(IA, IL, IN, MN, OH, WI, MO)
ten_states <- c(IA, IL, IN, MN, OH, WI, MO, NB, KS, SD)
mydata = climate.EVI[which(climate.EVI$PercentHarvest > 0.01),];mydata$FIPS = as.factor(mydata$FIPS); mydata$State.ANSI = as.factor(mydata$State.ANSI)
#mydata = mydata[mydata$State.ANSI %in% seven_states,]
# year.fit <- lm(yield ~ poly(year,2,raw=T), data=mydata)
year.fit <- lm(yield ~ year, data=mydata) # Use linear trends
yearly.means <- predict(year.fit,mydata)
yc <- mydata
yc$yield.cor <- yc$yield - yearly.means
subset = yc
# IRR.lm.rmse.test = rep(NA,15);IRR.test.r2.cor2 = rep(NA,15)
# rain.lm.rmse.test = rep(NA,15);rain.test.r2.cor2 = rep(NA,15)
# Actual.yield = rep(NA, 15)
# Predicted.yield = rep(NA,15)
# sequential = matrix(nrow=20,ncol=6)
year = seq(from = 1981, to = 2015);pred.year = seq(from=21, to=35)
train_region <- ten_states
test_region <- ten_states
i=15
test.year = year[pred.year[i]]
#train.year = year[-pred.year[i]]
train.year = year[1:(pred.year[i])]
data.train = subset[which(subset[, 1] %in% train.year & subset$State.ANSI %in% train_region),]
data.test = subset[which(subset[, 1] %in% test.year & subset$State.ANSI %in% test_region),]
# original.test = subset[which(subset[,1] %in% test.year & subset$State.ANSI %in% test_region),]
# lm.fit <- lm(yield.cor ~
# # + bs(degree.day5, knots = c(200,250,300,350,400), degree = 1)
# # + bs(degree.day6, knots = c(300,350,400,450,500), degree = 1)
# + bs(degree.day7, knots = c(350,400,450,500,550), degree = 1)
# + bs(degree.day8, knots = c(350,400,450,500,550), degree = 1)
# + bs(degree.day9, knots = c(200,250,300,350,400), degree = 1)
# + bs(mvpd5, knots = c(6,7,8,9,10), degree=1)
# # + bs(mvpd6, knots = c(6,7,8,9,10), degree=1)
# + bs(mvpd7, knots = c(6,7,8,9,10), degree=1)
# + bs(mvpd8, knots = c(6,7,8,9,10), degree=1)
# # + bs(mvpd9, knots = c(6,7,8,9,10), degree=1)
# + bs(precip5, knots = c(100,200,300), degree = 1)
# + bs(precip6, knots = c(100,200,300), degree = 1)
# + bs(precip7, knots = c(100,200,300), degree = 1)
# + bs(precip8, knots = c(100,200,300), degree = 1)
# + FIPS
# ,data = data.train)
# This can basically reproduce the heavry rainfall impact
# lm.fit <- lm(yield.cor ~
# + bs(degree.day7, knots = c(350,400,450,500,550), degree = 1)
# + bs(degree.day8, knots = c(350,400,450,500,550), degree = 1)
# + bs(degree.day9, knots = c(200,250,300,350,400), degree = 1)
# + bs(mvpd5, knots = c(6,7,8,9,10), degree=1)
# + bs(mvpd7, knots = c(6,7,8,9,10), degree=1)
# + bs(mvpd8, knots = c(6,7,8,9,10), degree=1)
# + bs(precip5, knots = c(100,200,300), degree = 1)
# + bs(precip6, knots = c(100,200,300), degree = 1)
# + bs(precip7, knots = c(100,200,300), degree = 1)
# + bs(precip8, knots = c(100,200,300), degree = 1)
# + FIPS
# ,data = data.train)
# ~ degree.day7 + degree.day8 + degree.day9
# + mvpd5 + mvpd7 + mvpd8
# + bs(mvpd5, knots = c(6,7,8,9,10), degree=1)
# + bs(mvpd7, knots = c(6,7,8,9,10), degree=1)
# + bs(mvpd8, knots = c(6,7,8,9,10), degree=1)
lm.fit <- lm(yield.cor ~
degree.day7 + degree.day8 + degree.day9
+ mt5 + mt6 + mt7 + mt8
+ bs(precip5, knots = c(100,200,300), degree = 1)
+ bs(precip6, knots = c(100,200,300), degree = 1)
+ bs(precip7, knots = c(100,200,300), degree = 1)
+ bs(precip8, knots = c(100,200,300), degree = 1)
+ FIPS
,data = data.train)
require(car)
Anova(lm.fit)
for (i in 1:35){
test.year <- year[i]
assign(paste0("data.test",test.year), subset[which(subset[, 1] %in% test.year & subset$State.ANSI %in% test_region),])
assign(paste0("lm.pred",test.year), rep(NA,dim(get(paste0("data.test",test.year)))[1]))
assign(paste0("lm.pred",test.year), predict(lm.fit,get(paste0("data.test",test.year)))) #+ yearly.means[which(subset[, 1] %in% test.year & subset$State.ANSI %in% test_region)])
}
Yan <- rbind.data.frame(data.test1981,data.test1982,data.test1983,data.test1984,data.test1985,data.test1986,data.test1987,data.test1988, data.test1989, data.test1990,
data.test1991,data.test1992,data.test1993,data.test1994,data.test1995,data.test1996,data.test1997,data.test1998, data.test1999,
data.test2000,data.test2001,data.test2002,data.test2003,data.test2004,data.test2005,data.test2006,data.test2007,data.test2008, data.test2009, data.test2010,
data.test2011, data.test2012, data.test2013, data.test2014, data.test2015)
predictions <- c(lm.pred1981,lm.pred1982,lm.pred1983,lm.pred1984,lm.pred1985,lm.pred1986,lm.pred1987,lm.pred1988, lm.pred1989, lm.pred1990,
lm.pred1991,lm.pred1992,lm.pred1993,lm.pred1994,lm.pred1995,lm.pred1996,lm.pred1997,lm.pred1998, lm.pred1999,
lm.pred2000,lm.pred2001,lm.pred2002,lm.pred2003,lm.pred2004,lm.pred2005,lm.pred2006,lm.pred2007,lm.pred2008, lm.pred2009, lm.pred2010,
lm.pred2011, lm.pred2012, lm.pred2013, lm.pred2014, lm.pred2015)
Yan.data <- data.frame(Yan,predictions)
lm.fit
data.train
# Yan.data %in% test.year & subset$State.ANSI
Yan.data$predictions[which(Yan.data$FIPS=='17019')]
# subset(Yan.data, select = c("year","FIPS","yield.cor", "predictions"))[which(Yan.data$FIPS==17019)]
Yan.data
yield_prediction <- subset(Yan.data, select = c("year","FIPS","yield.cor", "predictions"))
write.csv(yield_prediction, "../data/yield_predicition_reproduced.csv")
yield_prediction_ten <- subset(Yan.data, select = c("year","FIPS","yield.cor", "predictions"))
write.csv(yield_prediction_ten, "../data/yield_predicition_reproduced_tenstates.csv")
yield_prediction_ten_linear_trend <- subset(Yan.data, select = c("year","FIPS","yield.cor", "predictions"))
write.csv(yield_prediction_ten_linear_trend, "../data/yield_predicition_reproduced_tenstates_linear_trend.csv")
yield_prediction_test <- subset(Yan.data, select = c("year","FIPS","yield.cor", "predictions"))
write.csv(yield_prediction_test, "../data/yield_predicition_reproduced_test.csv")
| /predictionCode/old_Albert_model.R | no_license | yogabbagabb/Corn | R | false | false | 7,555 | r |
rm(list=ls())
require(splines)
# install.packages("splines")
# #Set values for State ANSI
# IA <- 19;IL <- 17;IN <- 18;MN <- 27;OH <- 39;WI <- 55;MO <- 29;NB <- 31;KS <- 20;SD <- 46
# #select 10 states region
# ten_states <- c(IA, IL, IN, MN, OH, WI, MO, NB, KS, SD)
# # setwd("../Albert_Crop")
# climate.EVI <- read.csv("../Albert_Crop/climate.EVImonthly.csv")
# #set mydata as a subset of the dataset which has percent harvest > 2%
# mydata = climate.EVI[which(climate.EVI$PercentHarvest > 0.02),]
# #set FIPS and State ANSI as factors
# mydata$FIPS = as.factor(mydata$FIPS); mydata$State.ANSI = as.factor(mydata$State.ANSI)
# #rainfed subset, select only counties that have Rainfed Area > 10000 (or whatever number you want)
# rainfed.subset <- mydata[which(mydata$Area.rain > 10000), ]
# climate.EVI <- read.csv("../Albert_Crop/climate.EVImonthly.csv")
IA <- 19;IL <- 17;IN <- 18;MN <- 27;OH <- 39;WI <- 55;MO <- 29;NB <- 31;KS <- 20;SD <- 46
three_states = c(IL,IN,IA)
seven_states <- c(IA, IL, IN, MN, OH, WI, MO)
ten_states <- c(IA, IL, IN, MN, OH, WI, MO, NB, KS, SD)
mydata = climate.EVI[which(climate.EVI$PercentHarvest > 0.01),];mydata$FIPS = as.factor(mydata$FIPS); mydata$State.ANSI = as.factor(mydata$State.ANSI)
#mydata = mydata[mydata$State.ANSI %in% seven_states,]
# year.fit <- lm(yield ~ poly(year,2,raw=T), data=mydata)
year.fit <- lm(yield ~ year, data=mydata) # Use linear trends
yearly.means <- predict(year.fit,mydata)
yc <- mydata
yc$yield.cor <- yc$yield - yearly.means
subset = yc
# IRR.lm.rmse.test = rep(NA,15);IRR.test.r2.cor2 = rep(NA,15)
# rain.lm.rmse.test = rep(NA,15);rain.test.r2.cor2 = rep(NA,15)
# Actual.yield = rep(NA, 15)
# Predicted.yield = rep(NA,15)
# sequential = matrix(nrow=20,ncol=6)
year = seq(from = 1981, to = 2015);pred.year = seq(from=21, to=35)
train_region <- ten_states
test_region <- ten_states
i=15
test.year = year[pred.year[i]]
#train.year = year[-pred.year[i]]
train.year = year[1:(pred.year[i])]
data.train = subset[which(subset[, 1] %in% train.year & subset$State.ANSI %in% train_region),]
data.test = subset[which(subset[, 1] %in% test.year & subset$State.ANSI %in% test_region),]
# original.test = subset[which(subset[,1] %in% test.year & subset$State.ANSI %in% test_region),]
# lm.fit <- lm(yield.cor ~
# # + bs(degree.day5, knots = c(200,250,300,350,400), degree = 1)
# # + bs(degree.day6, knots = c(300,350,400,450,500), degree = 1)
# + bs(degree.day7, knots = c(350,400,450,500,550), degree = 1)
# + bs(degree.day8, knots = c(350,400,450,500,550), degree = 1)
# + bs(degree.day9, knots = c(200,250,300,350,400), degree = 1)
# + bs(mvpd5, knots = c(6,7,8,9,10), degree=1)
# # + bs(mvpd6, knots = c(6,7,8,9,10), degree=1)
# + bs(mvpd7, knots = c(6,7,8,9,10), degree=1)
# + bs(mvpd8, knots = c(6,7,8,9,10), degree=1)
# # + bs(mvpd9, knots = c(6,7,8,9,10), degree=1)
# + bs(precip5, knots = c(100,200,300), degree = 1)
# + bs(precip6, knots = c(100,200,300), degree = 1)
# + bs(precip7, knots = c(100,200,300), degree = 1)
# + bs(precip8, knots = c(100,200,300), degree = 1)
# + FIPS
# ,data = data.train)
# This can basically reproduce the heavry rainfall impact
# lm.fit <- lm(yield.cor ~
# + bs(degree.day7, knots = c(350,400,450,500,550), degree = 1)
# + bs(degree.day8, knots = c(350,400,450,500,550), degree = 1)
# + bs(degree.day9, knots = c(200,250,300,350,400), degree = 1)
# + bs(mvpd5, knots = c(6,7,8,9,10), degree=1)
# + bs(mvpd7, knots = c(6,7,8,9,10), degree=1)
# + bs(mvpd8, knots = c(6,7,8,9,10), degree=1)
# + bs(precip5, knots = c(100,200,300), degree = 1)
# + bs(precip6, knots = c(100,200,300), degree = 1)
# + bs(precip7, knots = c(100,200,300), degree = 1)
# + bs(precip8, knots = c(100,200,300), degree = 1)
# + FIPS
# ,data = data.train)
# ~ degree.day7 + degree.day8 + degree.day9
# + mvpd5 + mvpd7 + mvpd8
# + bs(mvpd5, knots = c(6,7,8,9,10), degree=1)
# + bs(mvpd7, knots = c(6,7,8,9,10), degree=1)
# + bs(mvpd8, knots = c(6,7,8,9,10), degree=1)
lm.fit <- lm(yield.cor ~
degree.day7 + degree.day8 + degree.day9
+ mt5 + mt6 + mt7 + mt8
+ bs(precip5, knots = c(100,200,300), degree = 1)
+ bs(precip6, knots = c(100,200,300), degree = 1)
+ bs(precip7, knots = c(100,200,300), degree = 1)
+ bs(precip8, knots = c(100,200,300), degree = 1)
+ FIPS
,data = data.train)
require(car)
Anova(lm.fit)
for (i in 1:35){
test.year <- year[i]
assign(paste0("data.test",test.year), subset[which(subset[, 1] %in% test.year & subset$State.ANSI %in% test_region),])
assign(paste0("lm.pred",test.year), rep(NA,dim(get(paste0("data.test",test.year)))[1]))
assign(paste0("lm.pred",test.year), predict(lm.fit,get(paste0("data.test",test.year)))) #+ yearly.means[which(subset[, 1] %in% test.year & subset$State.ANSI %in% test_region)])
}
Yan <- rbind.data.frame(data.test1981,data.test1982,data.test1983,data.test1984,data.test1985,data.test1986,data.test1987,data.test1988, data.test1989, data.test1990,
data.test1991,data.test1992,data.test1993,data.test1994,data.test1995,data.test1996,data.test1997,data.test1998, data.test1999,
data.test2000,data.test2001,data.test2002,data.test2003,data.test2004,data.test2005,data.test2006,data.test2007,data.test2008, data.test2009, data.test2010,
data.test2011, data.test2012, data.test2013, data.test2014, data.test2015)
predictions <- c(lm.pred1981,lm.pred1982,lm.pred1983,lm.pred1984,lm.pred1985,lm.pred1986,lm.pred1987,lm.pred1988, lm.pred1989, lm.pred1990,
lm.pred1991,lm.pred1992,lm.pred1993,lm.pred1994,lm.pred1995,lm.pred1996,lm.pred1997,lm.pred1998, lm.pred1999,
lm.pred2000,lm.pred2001,lm.pred2002,lm.pred2003,lm.pred2004,lm.pred2005,lm.pred2006,lm.pred2007,lm.pred2008, lm.pred2009, lm.pred2010,
lm.pred2011, lm.pred2012, lm.pred2013, lm.pred2014, lm.pred2015)
Yan.data <- data.frame(Yan,predictions)
lm.fit
data.train
# Yan.data %in% test.year & subset$State.ANSI
Yan.data$predictions[which(Yan.data$FIPS=='17019')]
# subset(Yan.data, select = c("year","FIPS","yield.cor", "predictions"))[which(Yan.data$FIPS==17019)]
Yan.data
yield_prediction <- subset(Yan.data, select = c("year","FIPS","yield.cor", "predictions"))
write.csv(yield_prediction, "../data/yield_predicition_reproduced.csv")
yield_prediction_ten <- subset(Yan.data, select = c("year","FIPS","yield.cor", "predictions"))
write.csv(yield_prediction_ten, "../data/yield_predicition_reproduced_tenstates.csv")
yield_prediction_ten_linear_trend <- subset(Yan.data, select = c("year","FIPS","yield.cor", "predictions"))
write.csv(yield_prediction_ten_linear_trend, "../data/yield_predicition_reproduced_tenstates_linear_trend.csv")
yield_prediction_test <- subset(Yan.data, select = c("year","FIPS","yield.cor", "predictions"))
write.csv(yield_prediction_test, "../data/yield_predicition_reproduced_test.csv")
|
A.B. Olshen, E.S. Venkatraman (aka Venkatraman E. Seshan), R. Lucito and M. Wigler, \emph{Circular binary segmentation for the analysis of array-based DNA copy number data}, Biostatistics, 2004
| /incl/OlshenVenkatraman_2004.Rd | no_license | HenrikBengtsson/PSCBS | R | false | false | 194 | rd | A.B. Olshen, E.S. Venkatraman (aka Venkatraman E. Seshan), R. Lucito and M. Wigler, \emph{Circular binary segmentation for the analysis of array-based DNA copy number data}, Biostatistics, 2004
|
#######################################
# WASH Benefits STH finished floor analysis - Bangladesh
# Table 1 summary stats
# Sensitivity analysis trimming by
# extreme propensity score values
#######################################
# configure directories, load libraries and base functions
source(paste0(here::here(), "/0-config.R"))
library(dplyr)
library(data.table)
# load data
bd <- data.table((readRDS(clean_bdata_path)))
# truncate data to those without potential positivity violations
bpred = readRDS(bpred_path)
bpred$dataid = as.character(bpred$dataid)
btrim_lower = min(bpred$pred, na.rm=TRUE) + 0.05
btrim_upper = max(bpred$pred, na.rm=TRUE) - 0.05
bpred_drop = bpred %>% filter(pred<btrim_lower | pred>btrim_upper) %>%
dplyr::select(dataid) %>% pull()
nrow(bd)
bd = bd %>% filter(!dataid %in% bpred_drop)
nrow(bd)
#---------------------------
# List and label covariates
#---------------------------
# Assign as data table
dt <- as.data.table(bd)
# Household level characteristics
#----------------------------------------
# mom's age
#----------------------------------------
row_momage = mean_se_strat(
Y_0 = dt$momage[dt$floor == 0],
Y_1 = dt$momage[dt$floor == 1],
Yname = "Mother's age, years",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# mom's educ attainment (years)
#----------------------------------------
dt = dt[, edu_none := ifelse(momedu=="No education", 1, 0)]
dt = dt[, edu_primary := ifelse(momedu=="Primary (1-5y)", 1, 0)]
dt = dt[, edu_secondary := ifelse(momedu=="Secondary (>5y)", 1, 0)]
row_momedu_none = mean_se_strat(
Y_0 = dt$edu_none[dt$floor == 0],
Y_1 = dt$edu_none[dt$floor == 1],
Yname = "No education",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_momedu_primary = mean_se_strat(
Y_0 = dt$edu_primary[dt$floor == 0],
Y_1 = dt$edu_primary[dt$floor == 1],
Yname = "At least some primary education",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_momedu_secondary = mean_se_strat(
Y_0 = dt$edu_secondary[dt$floor == 0],
Y_1 = dt$edu_secondary[dt$floor == 1],
Yname = "At least some secondary education",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# mom's height (cm)
#----------------------------------------
row_momheight = mean_se_strat(
Y_0 = dt$momheight[dt$floor == 0],
Y_1 = dt$momheight[dt$floor == 1],
Yname = "Mother's height, cm",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# N individuals in HH <=18 yrs
#----------------------------------------
row_Nind18 = mean_se_strat(
Y_0 = dt$Nlt18[dt$floor == 0],
Y_1 = dt$Nlt18[dt$floor == 1],
Yname = "# individuals living in compound <=18 yrs",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# N individuals in compound
#----------------------------------------
row_Nind = mean_se_strat(
Y_0 = dt$Ncomp[dt$floor == 0],
Y_1 = dt$Ncomp[dt$floor == 1],
Yname = "Total individuals living in compound",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# HFIAS category (binary secure/insecure)
#----------------------------------------
assert_that(names(table(is.na(dt$hfiacat)))=="FALSE")
dt = dt[, foodsecure := ifelse(hfiacat == "Food Secure", 1,
ifelse(hfiacat == "Food Insecure", 0, 999))]
row_foodsecure = mean_se_strat(
Y_0 = dt$foodsecure[dt$floor == 0],
Y_1 = dt$foodsecure[dt$floor == 1],
Yname = "Food secure",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# has electricity
#----------------------------------------
row_electricity = mean_se_strat(
Y_0 = dt$elec[dt$floor == 0],
Y_1 = dt$elec[dt$floor == 1],
Yname = "Has electricity",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# has improved wall materials
#----------------------------------------
row_walls = mean_se_strat(
Y_0 = dt$walls[dt$floor == 0],
Y_1 = dt$walls[dt$floor == 1],
Yname = "Has improved wall materials",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# has improved roof material
#----------------------------------------
row_roof = mean_se_strat(
Y_0 = dt$roof[dt$floor == 0],
Y_1 = dt$roof[dt$floor == 1],
Yname = "Has improved roof material",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# assets
#----------------------------------------
row_wardrobe = mean_se_strat(
Y_0 = dt$asset_wardrobe[dt$floor == 0],
Y_1 = dt$asset_wardrobe[dt$floor == 1],
Yname = "Owns >=1 wardrobe",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_table = mean_se_strat(
Y_0 = dt$asset_table[dt$floor == 0],
Y_1 = dt$asset_table[dt$floor == 1],
Yname = "Owns >=1 table",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_chair = mean_se_strat(
Y_0 = dt$asset_chair[dt$floor == 0],
Y_1 = dt$asset_chair[dt$floor == 1],
Yname = "Owns >=1 chair",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_khat = mean_se_strat(
Y_0 = dt$asset_khat[dt$floor == 0],
Y_1 = dt$asset_khat[dt$floor == 1],
Yname = "Owns >=1 khat",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_chouki = mean_se_strat(
Y_0 = dt$asset_chouki[dt$floor == 0],
Y_1 = dt$asset_chouki[dt$floor == 1],
Yname = "Owns >=1 chouki",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_tv = mean_se_strat(
Y_0 = dt$asset_tv[dt$floor == 0],
Y_1 = dt$asset_tv[dt$floor == 1],
Yname = "Owns >=1 tv",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_refrigerator = mean_se_strat(
Y_0 = dt$asset_refrig[dt$floor == 0],
Y_1 = dt$asset_refrig[dt$floor == 1],
Yname = "Owns >=1 fridge",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_bike = mean_se_strat(
Y_0 = dt$asset_bike[dt$floor == 0],
Y_1 = dt$asset_bike[dt$floor == 1],
Yname = "Owns >=1 bicycle",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_moto = mean_se_strat(
Y_0 = dt$asset_moto[dt$floor == 0],
Y_1 = dt$asset_moto[dt$floor == 1],
Yname = "Owns >=1 motorcycle",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_sewmach = mean_se_strat(
Y_0 = dt$asset_sewmach[dt$floor == 0],
Y_1 = dt$asset_sewmach[dt$floor == 1],
Yname = "Owns >=1 sewing machine",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_mobile = mean_se_strat(
Y_0 = dt$asset_mobile[dt$floor == 0],
Y_1 = dt$asset_mobile[dt$floor == 1],
Yname = "Owns >=1 mobile phone",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# intervention arm
#----------------------------------------
assert_that(names(table(is.na(dt$tr)))=="FALSE")
dt = dt[, arm_control := ifelse(tr=="Control", 1, 0)]
dt = dt[, arm_HW := ifelse(tr=="Handwashing", 1, 0)]
dt = dt[, arm_N := ifelse(tr=="Nutrition", 1, 0)]
dt = dt[, arm_NWSH := ifelse(tr=="Nutrition + WSH", 1, 0)]
dt = dt[, arm_S := ifelse(tr=="Sanitation", 1, 0)]
dt = dt[, arm_W := ifelse(tr=="Water", 1, 0)]
dt = dt[, arm_WSH := ifelse(tr=="WSH", 1, 0)]
dt = dt[, arm_missing := ifelse(tr=="", 1, 0)]
row_arm_control = mean_se_strat(
Y_0 = dt$arm_control[dt$floor == 0],
Y_1 = dt$arm_control[dt$floor == 1],
Yname = "Control",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_arm_HW = mean_se_strat(
Y_0 = dt$arm_HW[dt$floor == 0],
Y_1 = dt$arm_HW[dt$floor == 1],
Yname = "Handwashing",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_arm_N = mean_se_strat(
Y_0 = dt$arm_N[dt$floor == 0],
Y_1 = dt$arm_N[dt$floor == 1],
Yname = "Nutrition",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_arm_NWSH = mean_se_strat(
Y_0 = dt$arm_NWSH[dt$floor == 0],
Y_1 = dt$arm_NWSH[dt$floor == 1],
Yname = "Nutrition + WSH",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_arm_S = mean_se_strat(
Y_0 = dt$arm_S[dt$floor == 0],
Y_1 = dt$arm_S[dt$floor == 1],
Yname = "Sanitation",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_arm_W = mean_se_strat(
Y_0 = dt$arm_W[dt$floor == 0],
Y_1 = dt$arm_W[dt$floor == 1],
Yname = "Water",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_arm_WSH = mean_se_strat(
Y_0 = dt$arm_WSH[dt$floor == 0],
Y_1 = dt$arm_WSH[dt$floor == 1],
Yname = "Combined WSH",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# child agecat
#----------------------------------------
assert_that(names(table(is.na(dt$agecat)))=="FALSE") # Should output TRUE
dt = dt[, age_0to5 := ifelse(agecat=="0-5 years", 1, 0)]
dt = dt[, age_6to12 := ifelse(agecat =="6-12 years", 1, 0)]
row_child0to5 = mean_se_strat(
Y_0 = dt$age_0to5[dt$floor == 0],
Y_1 = dt$age_0to5[dt$floor == 1],
Yname = "Child's age 0-5 years",
id_0 = dt$personid[dt$floor == 0],
id_1 = dt$personid[dt$floor == 1]
)
# Note KE data has 6-15 years category
row_child6to12 = mean_se_strat(
Y_0 = dt$age_6to12[dt$floor == 0],
Y_1 = dt$age_6to12[dt$floor == 1],
Yname = "Child's age 6-12 years",
id_0 = dt$personid[dt$floor == 0],
id_1 = dt$personid[dt$floor == 1]
)
#----------------------------------------
# child mean age, years
#----------------------------------------
row_childageyr = mean_se_strat(
Y_0 = dt$agey[dt$floor == 0],
Y_1 = dt$agey[dt$floor == 1],
Yname = "Child age, years",
id_0 = dt$personid[dt$floor == 0],
id_1 = dt$personid[dt$floor == 1]
)
#----------------------------------------
# month -- move this to table 2
#----------------------------------------
dt$svymonth <- substr(dt$svydate, 3, 5)
#----------------------------------------
# child sex
#----------------------------------------
assert_that(names(table(is.na(dt$sex)))=="FALSE")
dt = dt[, sexmale := ifelse(sex=="male", 1, 0)]
row_childmale = mean_se_strat(
Y_0 = dt$sexmale[dt$floor == 0],
Y_1 = dt$sexmale[dt$floor == 1],
Yname = "Male, %",
id_0 = dt$personid[dt$floor == 0],
id_1 = dt$personid[dt$floor == 1]
)
########################################################
# Make tables
########################################################
# Table of household characteristics
table = bind_rows(
row_momage, row_momheight, row_momedu_primary, row_momedu_secondary,
row_Nind18, row_Nind,
row_foodsecure,
row_electricity, row_walls, row_roof, row_tv,
row_bike, row_moto, row_mobile,
row_childageyr, row_childmale
)
noscale <- c("Mother's age, years", "Mother's height, cm",
"# individuals living in compound <=18 yrs",
"Total individuals living in compound",
"Child age, years"
)
table <- table %>%
mutate(results = if_else(col %in% noscale,
pt.est.f(
mean = Mean,
digits = 1,
scale = 1
),
pt.est.f(
mean = Mean,
digits = 1,
scale = 100
)))
Outcome_list <- c("Mother's age, years", "Mother's height, cm",
"At least some primary education", "At least some secondary education",
"# individuals living in compound <=18 yrs", "Total individuals living in compound",
"Food secure",
"Has electricity", "Has improved wall materials", "Has improved roof material", "Owns >=1 tv",
"Owns >=1 bicycle", "Owns >=1 motorcycle", "Owns >=1 mobile phone",
"Child age, years", "Male, %"
)
# To fix error: Each row of output must be identified by a unique combination of keys.
table <- table %>%
dplyr::mutate(obs = row_number())
table_wide = table %>% select(col, results, Site) %>%
spread(Site, results)
table_wide_N = table %>% dplyr::select(col, N, Site) %>%
spread(Site, N)
assert_that(all(table_wide$Outcome == table_wide_N$Outcome))
table_wide_all=data.frame(cbind(
as.character(table_wide$col),
table_wide_N$fin,
table_wide$fin,
table_wide_N$unf,
table_wide$unf
))
colnames(table_wide_all) = c(
"Variable",
"N, finished", "Result, finished",
"N, unfinished", "Results, unfinished"
)
# Reorder the rows
table_wide_all_ordered <- table_wide_all %>%
arrange(match(Variable, Outcome_list))
maternal_header = c("Maternal", rep("", 4))
compound_header = c("Compound", rep("", 4))
household_header = c("Household", rep("", 4))
# arm_header = c("Intervention assignment", rep("", 4))
child_header = c("Child", rep("", 4))
names(maternal_header) = colnames(table_wide_all_ordered)
names(compound_header) = colnames(table_wide_all_ordered)
names(household_header) = colnames(table_wide_all_ordered)
# names(arm_header) = colnames(table_wide_all_ordered)
names(child_header) = colnames(table_wide_all_ordered)
table_wide_out = bind_rows(
maternal_header, table_wide_all_ordered[1:4,],
compound_header, table_wide_all_ordered[5:6,],
household_header, table_wide_all_ordered[7:14,],
child_header, table_wide_all_ordered[15:16,]
)
table_bd_full <- table_wide_out
########################################################
# Save tables
########################################################
write.csv(table_bd_full, file=paste0(tab_path, "/table-characteristics-pos-bd.txt"), row.names=FALSE)
write.csv(table_bd_full, file=paste0(tab_path, "/table-characteristics-pos-bd.csv"), row.names=FALSE)
| /3-table-scripts/3a-table-characteristics-pos-bd.R | no_license | gabbyrbh/washb-floors-public | R | false | false | 13,876 | r | #######################################
# WASH Benefits STH finished floor analysis - Bangladesh
# Table 1 summary stats
# Sensitivity analysis trimming by
# extreme propensity score values
#######################################
# configure directories, load libraries and base functions
source(paste0(here::here(), "/0-config.R"))
library(dplyr)
library(data.table)
# load data
bd <- data.table((readRDS(clean_bdata_path)))
# truncate data to those without potential positivity violations
bpred = readRDS(bpred_path)
bpred$dataid = as.character(bpred$dataid)
btrim_lower = min(bpred$pred, na.rm=TRUE) + 0.05
btrim_upper = max(bpred$pred, na.rm=TRUE) - 0.05
bpred_drop = bpred %>% filter(pred<btrim_lower | pred>btrim_upper) %>%
dplyr::select(dataid) %>% pull()
nrow(bd)
bd = bd %>% filter(!dataid %in% bpred_drop)
nrow(bd)
#---------------------------
# List and label covariates
#---------------------------
# Assign as data table
dt <- as.data.table(bd)
# Household level characteristics
#----------------------------------------
# mom's age
#----------------------------------------
row_momage = mean_se_strat(
Y_0 = dt$momage[dt$floor == 0],
Y_1 = dt$momage[dt$floor == 1],
Yname = "Mother's age, years",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# mom's educ attainment (years)
#----------------------------------------
dt = dt[, edu_none := ifelse(momedu=="No education", 1, 0)]
dt = dt[, edu_primary := ifelse(momedu=="Primary (1-5y)", 1, 0)]
dt = dt[, edu_secondary := ifelse(momedu=="Secondary (>5y)", 1, 0)]
row_momedu_none = mean_se_strat(
Y_0 = dt$edu_none[dt$floor == 0],
Y_1 = dt$edu_none[dt$floor == 1],
Yname = "No education",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_momedu_primary = mean_se_strat(
Y_0 = dt$edu_primary[dt$floor == 0],
Y_1 = dt$edu_primary[dt$floor == 1],
Yname = "At least some primary education",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_momedu_secondary = mean_se_strat(
Y_0 = dt$edu_secondary[dt$floor == 0],
Y_1 = dt$edu_secondary[dt$floor == 1],
Yname = "At least some secondary education",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# mom's height (cm)
#----------------------------------------
row_momheight = mean_se_strat(
Y_0 = dt$momheight[dt$floor == 0],
Y_1 = dt$momheight[dt$floor == 1],
Yname = "Mother's height, cm",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# N individuals in HH <=18 yrs
#----------------------------------------
row_Nind18 = mean_se_strat(
Y_0 = dt$Nlt18[dt$floor == 0],
Y_1 = dt$Nlt18[dt$floor == 1],
Yname = "# individuals living in compound <=18 yrs",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# N individuals in compound
#----------------------------------------
row_Nind = mean_se_strat(
Y_0 = dt$Ncomp[dt$floor == 0],
Y_1 = dt$Ncomp[dt$floor == 1],
Yname = "Total individuals living in compound",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# HFIAS category (binary secure/insecure)
#----------------------------------------
assert_that(names(table(is.na(dt$hfiacat)))=="FALSE")
dt = dt[, foodsecure := ifelse(hfiacat == "Food Secure", 1,
ifelse(hfiacat == "Food Insecure", 0, 999))]
row_foodsecure = mean_se_strat(
Y_0 = dt$foodsecure[dt$floor == 0],
Y_1 = dt$foodsecure[dt$floor == 1],
Yname = "Food secure",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# has electricity
#----------------------------------------
row_electricity = mean_se_strat(
Y_0 = dt$elec[dt$floor == 0],
Y_1 = dt$elec[dt$floor == 1],
Yname = "Has electricity",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# has improved wall materials
#----------------------------------------
row_walls = mean_se_strat(
Y_0 = dt$walls[dt$floor == 0],
Y_1 = dt$walls[dt$floor == 1],
Yname = "Has improved wall materials",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# has improved roof material
#----------------------------------------
row_roof = mean_se_strat(
Y_0 = dt$roof[dt$floor == 0],
Y_1 = dt$roof[dt$floor == 1],
Yname = "Has improved roof material",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# assets
#----------------------------------------
row_wardrobe = mean_se_strat(
Y_0 = dt$asset_wardrobe[dt$floor == 0],
Y_1 = dt$asset_wardrobe[dt$floor == 1],
Yname = "Owns >=1 wardrobe",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_table = mean_se_strat(
Y_0 = dt$asset_table[dt$floor == 0],
Y_1 = dt$asset_table[dt$floor == 1],
Yname = "Owns >=1 table",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_chair = mean_se_strat(
Y_0 = dt$asset_chair[dt$floor == 0],
Y_1 = dt$asset_chair[dt$floor == 1],
Yname = "Owns >=1 chair",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_khat = mean_se_strat(
Y_0 = dt$asset_khat[dt$floor == 0],
Y_1 = dt$asset_khat[dt$floor == 1],
Yname = "Owns >=1 khat",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_chouki = mean_se_strat(
Y_0 = dt$asset_chouki[dt$floor == 0],
Y_1 = dt$asset_chouki[dt$floor == 1],
Yname = "Owns >=1 chouki",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_tv = mean_se_strat(
Y_0 = dt$asset_tv[dt$floor == 0],
Y_1 = dt$asset_tv[dt$floor == 1],
Yname = "Owns >=1 tv",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_refrigerator = mean_se_strat(
Y_0 = dt$asset_refrig[dt$floor == 0],
Y_1 = dt$asset_refrig[dt$floor == 1],
Yname = "Owns >=1 fridge",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_bike = mean_se_strat(
Y_0 = dt$asset_bike[dt$floor == 0],
Y_1 = dt$asset_bike[dt$floor == 1],
Yname = "Owns >=1 bicycle",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_moto = mean_se_strat(
Y_0 = dt$asset_moto[dt$floor == 0],
Y_1 = dt$asset_moto[dt$floor == 1],
Yname = "Owns >=1 motorcycle",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_sewmach = mean_se_strat(
Y_0 = dt$asset_sewmach[dt$floor == 0],
Y_1 = dt$asset_sewmach[dt$floor == 1],
Yname = "Owns >=1 sewing machine",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_mobile = mean_se_strat(
Y_0 = dt$asset_mobile[dt$floor == 0],
Y_1 = dt$asset_mobile[dt$floor == 1],
Yname = "Owns >=1 mobile phone",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# intervention arm
#----------------------------------------
assert_that(names(table(is.na(dt$tr)))=="FALSE")
dt = dt[, arm_control := ifelse(tr=="Control", 1, 0)]
dt = dt[, arm_HW := ifelse(tr=="Handwashing", 1, 0)]
dt = dt[, arm_N := ifelse(tr=="Nutrition", 1, 0)]
dt = dt[, arm_NWSH := ifelse(tr=="Nutrition + WSH", 1, 0)]
dt = dt[, arm_S := ifelse(tr=="Sanitation", 1, 0)]
dt = dt[, arm_W := ifelse(tr=="Water", 1, 0)]
dt = dt[, arm_WSH := ifelse(tr=="WSH", 1, 0)]
dt = dt[, arm_missing := ifelse(tr=="", 1, 0)]
row_arm_control = mean_se_strat(
Y_0 = dt$arm_control[dt$floor == 0],
Y_1 = dt$arm_control[dt$floor == 1],
Yname = "Control",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_arm_HW = mean_se_strat(
Y_0 = dt$arm_HW[dt$floor == 0],
Y_1 = dt$arm_HW[dt$floor == 1],
Yname = "Handwashing",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_arm_N = mean_se_strat(
Y_0 = dt$arm_N[dt$floor == 0],
Y_1 = dt$arm_N[dt$floor == 1],
Yname = "Nutrition",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_arm_NWSH = mean_se_strat(
Y_0 = dt$arm_NWSH[dt$floor == 0],
Y_1 = dt$arm_NWSH[dt$floor == 1],
Yname = "Nutrition + WSH",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_arm_S = mean_se_strat(
Y_0 = dt$arm_S[dt$floor == 0],
Y_1 = dt$arm_S[dt$floor == 1],
Yname = "Sanitation",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_arm_W = mean_se_strat(
Y_0 = dt$arm_W[dt$floor == 0],
Y_1 = dt$arm_W[dt$floor == 1],
Yname = "Water",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
row_arm_WSH = mean_se_strat(
Y_0 = dt$arm_WSH[dt$floor == 0],
Y_1 = dt$arm_WSH[dt$floor == 1],
Yname = "Combined WSH",
id_0 = dt$hhid[dt$floor == 0],
id_1 = dt$hhid[dt$floor == 1]
)
#----------------------------------------
# child agecat
#----------------------------------------
assert_that(names(table(is.na(dt$agecat)))=="FALSE") # Should output TRUE
dt = dt[, age_0to5 := ifelse(agecat=="0-5 years", 1, 0)]
dt = dt[, age_6to12 := ifelse(agecat =="6-12 years", 1, 0)]
row_child0to5 = mean_se_strat(
Y_0 = dt$age_0to5[dt$floor == 0],
Y_1 = dt$age_0to5[dt$floor == 1],
Yname = "Child's age 0-5 years",
id_0 = dt$personid[dt$floor == 0],
id_1 = dt$personid[dt$floor == 1]
)
# Note KE data has 6-15 years category
row_child6to12 = mean_se_strat(
Y_0 = dt$age_6to12[dt$floor == 0],
Y_1 = dt$age_6to12[dt$floor == 1],
Yname = "Child's age 6-12 years",
id_0 = dt$personid[dt$floor == 0],
id_1 = dt$personid[dt$floor == 1]
)
#----------------------------------------
# child mean age, years
#----------------------------------------
row_childageyr = mean_se_strat(
Y_0 = dt$agey[dt$floor == 0],
Y_1 = dt$agey[dt$floor == 1],
Yname = "Child age, years",
id_0 = dt$personid[dt$floor == 0],
id_1 = dt$personid[dt$floor == 1]
)
#----------------------------------------
# month -- move this to table 2
#----------------------------------------
dt$svymonth <- substr(dt$svydate, 3, 5)
#----------------------------------------
# child sex
#----------------------------------------
assert_that(names(table(is.na(dt$sex)))=="FALSE")
dt = dt[, sexmale := ifelse(sex=="male", 1, 0)]
row_childmale = mean_se_strat(
Y_0 = dt$sexmale[dt$floor == 0],
Y_1 = dt$sexmale[dt$floor == 1],
Yname = "Male, %",
id_0 = dt$personid[dt$floor == 0],
id_1 = dt$personid[dt$floor == 1]
)
########################################################
# Make tables
########################################################
# Table of household characteristics
table = bind_rows(
row_momage, row_momheight, row_momedu_primary, row_momedu_secondary,
row_Nind18, row_Nind,
row_foodsecure,
row_electricity, row_walls, row_roof, row_tv,
row_bike, row_moto, row_mobile,
row_childageyr, row_childmale
)
noscale <- c("Mother's age, years", "Mother's height, cm",
"# individuals living in compound <=18 yrs",
"Total individuals living in compound",
"Child age, years"
)
table <- table %>%
mutate(results = if_else(col %in% noscale,
pt.est.f(
mean = Mean,
digits = 1,
scale = 1
),
pt.est.f(
mean = Mean,
digits = 1,
scale = 100
)))
Outcome_list <- c("Mother's age, years", "Mother's height, cm",
"At least some primary education", "At least some secondary education",
"# individuals living in compound <=18 yrs", "Total individuals living in compound",
"Food secure",
"Has electricity", "Has improved wall materials", "Has improved roof material", "Owns >=1 tv",
"Owns >=1 bicycle", "Owns >=1 motorcycle", "Owns >=1 mobile phone",
"Child age, years", "Male, %"
)
# To fix error: Each row of output must be identified by a unique combination of keys.
table <- table %>%
dplyr::mutate(obs = row_number())
table_wide = table %>% select(col, results, Site) %>%
spread(Site, results)
table_wide_N = table %>% dplyr::select(col, N, Site) %>%
spread(Site, N)
assert_that(all(table_wide$Outcome == table_wide_N$Outcome))
table_wide_all=data.frame(cbind(
as.character(table_wide$col),
table_wide_N$fin,
table_wide$fin,
table_wide_N$unf,
table_wide$unf
))
colnames(table_wide_all) = c(
"Variable",
"N, finished", "Result, finished",
"N, unfinished", "Results, unfinished"
)
# Reorder the rows
table_wide_all_ordered <- table_wide_all %>%
arrange(match(Variable, Outcome_list))
maternal_header = c("Maternal", rep("", 4))
compound_header = c("Compound", rep("", 4))
household_header = c("Household", rep("", 4))
# arm_header = c("Intervention assignment", rep("", 4))
child_header = c("Child", rep("", 4))
names(maternal_header) = colnames(table_wide_all_ordered)
names(compound_header) = colnames(table_wide_all_ordered)
names(household_header) = colnames(table_wide_all_ordered)
# names(arm_header) = colnames(table_wide_all_ordered)
names(child_header) = colnames(table_wide_all_ordered)
table_wide_out = bind_rows(
maternal_header, table_wide_all_ordered[1:4,],
compound_header, table_wide_all_ordered[5:6,],
household_header, table_wide_all_ordered[7:14,],
child_header, table_wide_all_ordered[15:16,]
)
table_bd_full <- table_wide_out
########################################################
# Save tables
########################################################
write.csv(table_bd_full, file=paste0(tab_path, "/table-characteristics-pos-bd.txt"), row.names=FALSE)
write.csv(table_bd_full, file=paste0(tab_path, "/table-characteristics-pos-bd.csv"), row.names=FALSE)
|
#source('style.R')
other_randIndex <- tabItem(tabName = 'other_randIndex',
fluidRow(
column(
width = 10,
offset = 1,
style = 'padding-left: 0px; padding-right: -5px;',
box(
id = 'randIndexDocum',
width = NULL,
style = measure_title_style,
h3("The Adjusted Rand Index")
),
hidden(
div(id = 'randIndexDocumBox',
fluidRow(class = 'documRow',
column(width = 12,
offset = 0,
box(title = randIntex_docum_text,
width = NULL,
style = measure_title_style)
)
)
)
)
)
),
fluidRow(
column(
width = 5,
offset = 1,
fluidRow(
box(
width = NULL,
p(file_upload_text),
style = 'text-align: center;'
)
),
fluidRow(
box(
width = NULL,
p(file_struct_text),
look_down,
style = 'text-align: center;'
)
)
)
,
column(width = 5,
box(
width = NULL,
fileInput(inputId = 'randIndexInput',
label = 'Browse for .csv files'),
actionButton(
inputId = 'randIndexRun',
label = 'calculate'
)
),
style = "text-align: center;"
)
),
fluidRow(class = 'tabStyle',
column(
width = 5,
offset = 1,
style = 'padding: 0px;',
uiOutput('ui_randIndex')
),
column(width = 5,
shinyWidgets::dropMenu(
div(id = 'randIndexDrop',
fluidRow(class = 'style_valuebox_OUTPUT_cyan',
column(
width = 12,
valueBoxOutput(outputId = 'randIndex', width = NULL)
)
)
),
HTML(kableExtra::kable(t(randIndex_output_description)) %>%
kableExtra::kable_styling('basic', font_size = 15, html_font = 'calibri')),
trigger = 'mouseenter',
theme = 'translucent',
placement = 'left-start')
)
))
randIndexOut <- function(input, output, data) {
tryCatch({
vals_randIndex <- list('vals' = warning_handler(randMain(data)),
'warn' = msg)
d_randIndex <- warning_handler(t(data.frame(t(vals_randIndex$vals))))
l_randIndex <<- warning_handler(as.data.frame(d_randIndex))
output$randIndex <- renderValueBox({
valueBox(
subtitle = p(HTML(paste0(
kableExtra::kable(d_randIndex, format = 'html') %>%
kableExtra::kable_styling('basic', font_size = 15, html_font = 'calibri'),
if (!is.null(vals_randIndex$warn)) {
circleButton(inputId = 'warningButton',
icon = icon("exclamation"),
size = 's')
}
)),
div(
downloadButton(outputId = 'randIndexFullDown',
label = 'Full Results'),
style = 'text-align: center;'
)),
value = ''
)
})
}, error = function(e) {
invalid_data(output, 'randIndex')
print(paste('randIndex error happend: ', e))
}, warning = function(w) {
invalid_data(output, 'randIndex')
print(paste('randIndex warning happened: ', w))
})
}
| /s_randIndex.R | no_license | Carnuntum/agree | R | false | false | 5,323 | r | #source('style.R')
other_randIndex <- tabItem(tabName = 'other_randIndex',
fluidRow(
column(
width = 10,
offset = 1,
style = 'padding-left: 0px; padding-right: -5px;',
box(
id = 'randIndexDocum',
width = NULL,
style = measure_title_style,
h3("The Adjusted Rand Index")
),
hidden(
div(id = 'randIndexDocumBox',
fluidRow(class = 'documRow',
column(width = 12,
offset = 0,
box(title = randIntex_docum_text,
width = NULL,
style = measure_title_style)
)
)
)
)
)
),
fluidRow(
column(
width = 5,
offset = 1,
fluidRow(
box(
width = NULL,
p(file_upload_text),
style = 'text-align: center;'
)
),
fluidRow(
box(
width = NULL,
p(file_struct_text),
look_down,
style = 'text-align: center;'
)
)
)
,
column(width = 5,
box(
width = NULL,
fileInput(inputId = 'randIndexInput',
label = 'Browse for .csv files'),
actionButton(
inputId = 'randIndexRun',
label = 'calculate'
)
),
style = "text-align: center;"
)
),
fluidRow(class = 'tabStyle',
column(
width = 5,
offset = 1,
style = 'padding: 0px;',
uiOutput('ui_randIndex')
),
column(width = 5,
shinyWidgets::dropMenu(
div(id = 'randIndexDrop',
fluidRow(class = 'style_valuebox_OUTPUT_cyan',
column(
width = 12,
valueBoxOutput(outputId = 'randIndex', width = NULL)
)
)
),
HTML(kableExtra::kable(t(randIndex_output_description)) %>%
kableExtra::kable_styling('basic', font_size = 15, html_font = 'calibri')),
trigger = 'mouseenter',
theme = 'translucent',
placement = 'left-start')
)
))
randIndexOut <- function(input, output, data) {
tryCatch({
vals_randIndex <- list('vals' = warning_handler(randMain(data)),
'warn' = msg)
d_randIndex <- warning_handler(t(data.frame(t(vals_randIndex$vals))))
l_randIndex <<- warning_handler(as.data.frame(d_randIndex))
output$randIndex <- renderValueBox({
valueBox(
subtitle = p(HTML(paste0(
kableExtra::kable(d_randIndex, format = 'html') %>%
kableExtra::kable_styling('basic', font_size = 15, html_font = 'calibri'),
if (!is.null(vals_randIndex$warn)) {
circleButton(inputId = 'warningButton',
icon = icon("exclamation"),
size = 's')
}
)),
div(
downloadButton(outputId = 'randIndexFullDown',
label = 'Full Results'),
style = 'text-align: center;'
)),
value = ''
)
})
}, error = function(e) {
invalid_data(output, 'randIndex')
print(paste('randIndex error happend: ', e))
}, warning = function(w) {
invalid_data(output, 'randIndex')
print(paste('randIndex warning happened: ', w))
})
}
|
library(ggplot2)
library(tidyverse)
library(lubridate)
#FRANCE
france = read.csv("~/.../Data/france_cases_deaths_severe_recovered_ministresante.csv")
france$date = ymd(france$date)
france$dateind = as.numeric(rownames(france))
france_deaths = france %>% dplyr::select(dateind, deaths_ttl) %>% na.omit()
france_deaths$death_slope = NA
window_size = 15 #num days to slide moving window over
lag_days = 3
c = 15
for(i in window_size:nrow(france_deaths)) {
lm = lm(deaths_ttl ~ dateind, data = france_deaths[(i-(window_size-1)):i,])
france_deaths$death_slope[i-lag_days] = lm$coeff[2]
}
france = france %>% left_join(france_deaths)
france %>%
ggplot() +
geom_line(aes(x = date, y = severe_ttl), color = "red") +
geom_line(aes(x = date, y = deaths_ttl), color = "blue") +
geom_line(aes(x = date, y = death_slope*c), color = "green") +
labs(title = "Severe Case Prediction from Deaths: France",
subtitle = "Blue = Ttl Deaths, Red = Ttl Severe, Green = Predicted Severe",
x = "Date", y = "Cases") +
theme(plot.title = element_text(hjust=0.5),
plot.subtitle = element_text(hjust=0.5))
#NETHERLANDS
neth_hosp = read.csv("~/Documents/School/Grad/Q3/CS472/COVID19_projections/Data/netherlands_hospitalization_statista.csv")
neth_deaths = read.csv("~/Documents/School/Grad/Q3/CS472/COVID19_projections/Data/world_cases_deaths_recovered_JHU.csv")
neth = neth_deaths[(neth_deaths$country == "Netherlands" & neth_deaths$province == ""),] %>%
left_join(neth_hosp) %>%
dplyr::select(date, deaths, hospitalizations_ttl)
neth$date = ymd(neth$date)
neth$dateind = as.numeric(rownames(neth))
neth_deaths = neth %>% dplyr::select(dateind, deaths) %>% na.omit()
neth_deaths$death_slope = NA
window_size = 15 #num days to slide moving window over
lag_days = 3
c = 50
for(i in window_size:nrow(neth_deaths)) {
lm = lm(deaths ~ dateind, data = neth_deaths[(i-(window_size-1)):i,])
neth_deaths$death_slope[i-lag_days] = lm$coeff[2]
}
neth = neth %>% left_join(neth_deaths)
neth %>%
ggplot() +
geom_line(aes(x = date, y = deaths), color = "blue") +
geom_line(aes(x = date, y = hospitalizations_ttl), color = "red") +
geom_line(aes(x = date, y = death_slope*c), color = "green") +
labs(title = "Severe Case Prediction from Deaths: Netherlands",
subtitle = "Blue = Ttl Deaths, Red = Ttl Hospitalizations, Green = Predicted Hospitalizations",
x = "Date", y = "Cases") +
theme(plot.title = element_text(hjust=0.5),
plot.subtitle = element_text(hjust=0.5))
#Note: Model seems to be able to predict total # in hospital per day pretty well -- but it is not fitted for total hospitalization/severe cases cumulative
#Impossible to derive one from the other since if hospital counts remain constant over two days, cannot tell if one in one out or same people stayed in
#Note: severe cases not same as hospitalizations!
#CHINA
china = read.csv("~/.../COVID19_projections/Data/china_cases_severe_deaths_nhc.csv") %>%
dplyr::select(date, severe_ttl, deaths_ttl)
china = china[1:80,]
china$date = ymd(china$date)
china$dateind = as.numeric(rownames(china))
china_deaths = china %>% dplyr::select(dateind, deaths_ttl) %>% na.omit()
china_deaths$death_slope = NA
window_size = 15 #num days to slide moving window over
lag_days = 3
c = 100
for(i in window_size:nrow(china_deaths)) {
lm = lm(deaths_ttl ~ dateind, data = china_deaths[(i-(window_size-1)):i,])
china_deaths$death_slope[i-lag_days] = lm$coeff[2]
}
china = china %>% left_join(china_deaths)
china %>%
ggplot() +
geom_line(aes(x = date, y = deaths_ttl), color = "blue") +
geom_line(aes(x = date, y = severe_ttl), color = "red") +
geom_line(aes(x = date, y = death_slope*c), color = "green") +
labs(title = "Severe Case Prediction from Deaths: China",
subtitle = "Blue = Ttl Deaths, Red = Ttl Severe, Green = Predicted Severe",
x = "Date", y = "Cases") +
theme(plot.title = element_text(hjust=0.5),
plot.subtitle = element_text(hjust=0.5))
| /Archive/predict_severe_from_deaths_vR.R | no_license | jiyingz/COVID19_projections | R | false | false | 4,015 | r | library(ggplot2)
library(tidyverse)
library(lubridate)
#FRANCE
france = read.csv("~/.../Data/france_cases_deaths_severe_recovered_ministresante.csv")
france$date = ymd(france$date)
france$dateind = as.numeric(rownames(france))
france_deaths = france %>% dplyr::select(dateind, deaths_ttl) %>% na.omit()
france_deaths$death_slope = NA
window_size = 15 #num days to slide moving window over
lag_days = 3
c = 15
for(i in window_size:nrow(france_deaths)) {
lm = lm(deaths_ttl ~ dateind, data = france_deaths[(i-(window_size-1)):i,])
france_deaths$death_slope[i-lag_days] = lm$coeff[2]
}
france = france %>% left_join(france_deaths)
france %>%
ggplot() +
geom_line(aes(x = date, y = severe_ttl), color = "red") +
geom_line(aes(x = date, y = deaths_ttl), color = "blue") +
geom_line(aes(x = date, y = death_slope*c), color = "green") +
labs(title = "Severe Case Prediction from Deaths: France",
subtitle = "Blue = Ttl Deaths, Red = Ttl Severe, Green = Predicted Severe",
x = "Date", y = "Cases") +
theme(plot.title = element_text(hjust=0.5),
plot.subtitle = element_text(hjust=0.5))
#NETHERLANDS
neth_hosp = read.csv("~/Documents/School/Grad/Q3/CS472/COVID19_projections/Data/netherlands_hospitalization_statista.csv")
neth_deaths = read.csv("~/Documents/School/Grad/Q3/CS472/COVID19_projections/Data/world_cases_deaths_recovered_JHU.csv")
neth = neth_deaths[(neth_deaths$country == "Netherlands" & neth_deaths$province == ""),] %>%
left_join(neth_hosp) %>%
dplyr::select(date, deaths, hospitalizations_ttl)
neth$date = ymd(neth$date)
neth$dateind = as.numeric(rownames(neth))
neth_deaths = neth %>% dplyr::select(dateind, deaths) %>% na.omit()
neth_deaths$death_slope = NA
window_size = 15 #num days to slide moving window over
lag_days = 3
c = 50
for(i in window_size:nrow(neth_deaths)) {
lm = lm(deaths ~ dateind, data = neth_deaths[(i-(window_size-1)):i,])
neth_deaths$death_slope[i-lag_days] = lm$coeff[2]
}
neth = neth %>% left_join(neth_deaths)
neth %>%
ggplot() +
geom_line(aes(x = date, y = deaths), color = "blue") +
geom_line(aes(x = date, y = hospitalizations_ttl), color = "red") +
geom_line(aes(x = date, y = death_slope*c), color = "green") +
labs(title = "Severe Case Prediction from Deaths: Netherlands",
subtitle = "Blue = Ttl Deaths, Red = Ttl Hospitalizations, Green = Predicted Hospitalizations",
x = "Date", y = "Cases") +
theme(plot.title = element_text(hjust=0.5),
plot.subtitle = element_text(hjust=0.5))
#Note: Model seems to be able to predict total # in hospital per day pretty well -- but it is not fitted for total hospitalization/severe cases cumulative
#Impossible to derive one from the other since if hospital counts remain constant over two days, cannot tell if one in one out or same people stayed in
#Note: severe cases not same as hospitalizations!
#CHINA
china = read.csv("~/.../COVID19_projections/Data/china_cases_severe_deaths_nhc.csv") %>%
dplyr::select(date, severe_ttl, deaths_ttl)
china = china[1:80,]
china$date = ymd(china$date)
china$dateind = as.numeric(rownames(china))
china_deaths = china %>% dplyr::select(dateind, deaths_ttl) %>% na.omit()
china_deaths$death_slope = NA
window_size = 15 #num days to slide moving window over
lag_days = 3
c = 100
for(i in window_size:nrow(china_deaths)) {
lm = lm(deaths_ttl ~ dateind, data = china_deaths[(i-(window_size-1)):i,])
china_deaths$death_slope[i-lag_days] = lm$coeff[2]
}
china = china %>% left_join(china_deaths)
china %>%
ggplot() +
geom_line(aes(x = date, y = deaths_ttl), color = "blue") +
geom_line(aes(x = date, y = severe_ttl), color = "red") +
geom_line(aes(x = date, y = death_slope*c), color = "green") +
labs(title = "Severe Case Prediction from Deaths: China",
subtitle = "Blue = Ttl Deaths, Red = Ttl Severe, Green = Predicted Severe",
x = "Date", y = "Cases") +
theme(plot.title = element_text(hjust=0.5),
plot.subtitle = element_text(hjust=0.5))
|
#' @title coefficient of efficiency
#' @description Nash Sutcliffe 1970 model efficiency coefficient is used to assess the predictive power of hydrological models.
#' @param observados vector of values observed.
#' @param estimados vector of regression model data.
#' @references ( Nash and Sutcliffe, 1970) \url{https://en.wikipedia.org/wiki/Nash-Sutcliffe_model_efficiency_coefficient} for more details.
#' @export
ce = function(observados,estimados) {
return(1-(sum(observados-estimados)^2/sum(observados-mean(observados)^2)))
}
| /Fgmutils/R/ce.R | no_license | ingted/R-Examples | R | false | false | 543 | r | #' @title coefficient of efficiency
#' @description Nash Sutcliffe 1970 model efficiency coefficient is used to assess the predictive power of hydrological models.
#' @param observados vector of values observed.
#' @param estimados vector of regression model data.
#' @references ( Nash and Sutcliffe, 1970) \url{https://en.wikipedia.org/wiki/Nash-Sutcliffe_model_efficiency_coefficient} for more details.
#' @export
ce = function(observados,estimados) {
return(1-(sum(observados-estimados)^2/sum(observados-mean(observados)^2)))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect.R
\name{pub_connect}
\alias{pub_connect}
\title{pub_connect}
\usage{
pub_connect(s3dir = Sys.getenv("S3_DIR"), schema = Sys.getenv("SCHEMA"))
}
\arguments{
\item{s3dir}{The S3 route to save the query results}
\item{schema}{The existing schema in Athena}
}
\description{
This function connects to an Athena schema and sets the saving
directory in AWS' S3.
to automatically connect to the preventivadb database.
The connection must be saved into a variable in order to
use future functions.
}
\examples{
con <- pub_connect()
}
| /man/pub_connect.Rd | no_license | plataformapreventiva/dbrsocial | R | false | true | 612 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect.R
\name{pub_connect}
\alias{pub_connect}
\title{pub_connect}
\usage{
pub_connect(s3dir = Sys.getenv("S3_DIR"), schema = Sys.getenv("SCHEMA"))
}
\arguments{
\item{s3dir}{The S3 route to save the query results}
\item{schema}{The existing schema in Athena}
}
\description{
This function connects to an Athena schema and sets the saving
directory in AWS' S3.
to automatically connect to the preventivadb database.
The connection must be saved into a variable in order to
use future functions.
}
\examples{
con <- pub_connect()
}
|
# Backend/server of R app
# Performs sentiment analysis of tweets based on emotion and polarity classification
# The visualization of the sentiment class distributions is performed using ggplot2 package
# required pakacges
library(twitteR)
library(sentiment)
library(plyr)
library(ggplot2)
library(RColorBrewer)
# loading twitter credentials
#load("twitteR_credentials")
#registerTwitterOAuth(twitCred)
api_key <- "Epoyc1VfIrHOyD6K5DIV4QlN3"
api_secret <- "STu3FNc87kimdiMV6Nw4ouWBKKo9Qbxgi6ultTvDVPCWSa0Kgs"
access_token <- "469047588-S8cXA0zY01AlcZUAQMm5afRszhkML3xR9QkKrXxe"
access_token_secret <- "zct6ERd2AkJhJwyXRUOf4x0pIjKlF0wIkPrc9ClDSFE4i"
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
# loading the helper functions
source('helpers.R')
source('helpers1.R')
source('helpers2.R')
shinyServer(function(input, output) {
# Step 1: Getting the tweets based on search terms
# cainfo="cacert.pem" is required for data access
# tweets <- reactive ({ searchTwitter(input$searchTerm,n=1000, lang="en") })
tweets <- reactive ({ searchTwitter(input$searchTerm,n=1000, lang="en") })
#tweets <- renderText({ input$searchTerm })
# Step2: Preprocessing to clean up the tweets
txtTweets <- reactive ({ preprocess_tweet (tweets()) })
output$plot_emotion <- renderPlot({
# Step 3: Emotion sentiment analysis
emotion <- emotionSentimentAnal(txtTweets())
# Step 4: Polarity sentiment analysis
polarity <- polaritySentimentAnal(txtTweets())
# Step 5: Store results in dataframe
results_df <- data.frame(text=txtTweets(), emotion=emotion, polarity=polarity)
# Step 6: Plot distribution of tweet sentiments
if (input$plot_opt == 'emotion') {
ggplot(results_df) +
geom_bar(aes(x=emotion, y=..count.., fill=emotion)) +
ggtitle(paste('Using Bayes Method Intention Mining of Search Term "', input$searchTerm, '"', sep='')) +
xlab("Emotion Class") + ylab("No of Tweets") +
scale_fill_brewer(palette="Set1") +
theme_bw() +
theme(axis.text.y = element_text(colour="black", size=18, face='plain')) +
theme(axis.title.y = element_text(colour="black", size=18, face='plain', vjust=2)) +
theme(axis.text.x = element_text(colour="black", size=18, face='plain', angle=90, hjust=1)) +
theme(axis.title.x = element_text(colour="black", size=18, face='plain')) +
theme(plot.title = element_text(colour="black", size=20, face='plain', vjust=2.5)) +
theme(legend.text = element_text(colour="black", size=16, face='plain')) +
theme(legend.title = element_text(colour="black", size=18, face='plain')) +
guides(fill = guide_legend(keywidth = 2, keyheight = 2))
} else {
ggplot(results_df, aes()) +
geom_bar(aes(x=polarity, y=..count.., fill=polarity), width=0.6) +
ggtitle(paste('Using Bayes Method Intention Mining of Search Term "', input$searchTerm, '"', sep='')) +
xlab("Polarity Class") + ylab("No of Tweets") +
scale_fill_brewer(palette="Set1") +
theme_bw() +
theme(axis.text.y = element_text(colour="black", size=18, face='plain')) +
theme(axis.title.y = element_text(colour="black", size=18, face='plain', vjust=2)) +
theme(axis.text.x = element_text(colour="black", size=18, face='plain', angle=90, hjust=1)) +
theme(axis.title.x = element_text(colour="black", size=18, face='plain')) +
theme(plot.title = element_text(colour="black", size=20, face='plain', vjust=2.5)) +
theme(legend.text = element_text(colour="black", size=16, face='plain')) +
theme(legend.title = element_text(colour="black", size=18, face='plain')) +
guides(fill = guide_legend(keywidth = 2, keyheight = 2))
}
})
output$plot_emotion1 <- renderPlot({
# Step 3: Emotion sentiment analysis
emotion <- emotionSentimentAnal1(txtTweets())
# Step 4: Polarity sentiment analysis
polarity <- polaritySentimentAnal1(txtTweets())
# Step 5: Store results in dataframe
results_df <- data.frame(text=txtTweets(), emotion=emotion, polarity=polarity)
# Step 6: Plot distribution of tweet sentiments
if (input$plot_opt == 'emotion') {
ggplot(results_df) +
geom_bar(aes(x=emotion, y=..count.., fill=emotion)) +
ggtitle(paste('Using MAXENT Intention Mining of Search Term "', input$searchTerm, '"', sep='')) +
xlab("Emotion Class") + ylab("No of Tweets") +
scale_fill_brewer(palette="Set1") +
theme_bw() +
theme(axis.text.y = element_text(colour="black", size=18, face='plain')) +
theme(axis.title.y = element_text(colour="black", size=18, face='plain', vjust=2)) +
theme(axis.text.x = element_text(colour="black", size=18, face='plain', angle=90, hjust=1)) +
theme(axis.title.x = element_text(colour="black", size=18, face='plain')) +
theme(plot.title = element_text(colour="black", size=20, face='plain', vjust=2.5)) +
theme(legend.text = element_text(colour="black", size=16, face='plain')) +
theme(legend.title = element_text(colour="black", size=18, face='plain')) +
guides(fill = guide_legend(keywidth = 2, keyheight = 2))
} else {
ggplot(results_df, aes()) +
geom_bar(aes(x=polarity, y=..count.., fill=polarity), width=0.6) +
ggtitle(paste('Using MAXENT Intention Mining of Search Term "', input$searchTerm, '"', sep='')) +
xlab("Polarity Class") + ylab("No of Tweets") +
scale_fill_brewer(palette="Set1") +
theme_bw() +
theme(axis.text.y = element_text(colour="black", size=18, face='plain')) +
theme(axis.title.y = element_text(colour="black", size=18, face='plain', vjust=2)) +
theme(axis.text.x = element_text(colour="black", size=18, face='plain', angle=90, hjust=1)) +
theme(axis.title.x = element_text(colour="black", size=18, face='plain')) +
theme(plot.title = element_text(colour="black", size=20, face='plain', vjust=2.5)) +
theme(legend.text = element_text(colour="black", size=16, face='plain')) +
theme(legend.title = element_text(colour="black", size=18, face='plain')) +
guides(fill = guide_legend(keywidth = 2, keyheight = 2))
}
})
output$plot_emotion2 <- renderPlot({
# Step 3: Emotion sentiment analysis
emotion <- emotionSentimentAnal2(txtTweets())
# Step 4: Polarity sentiment analysis
polarity <- polaritySentimentAnal2(txtTweets())
# Step 5: Store results in dataframe
results_df <- data.frame(text=txtTweets(), emotion=emotion, polarity=polarity)
# Step 6: Plot distribution of tweet sentiments
if (input$plot_opt == 'emotion') {
ggplot(results_df) +
geom_bar(aes(x=emotion, y=..count.., fill=emotion)) +
ggtitle(paste('Using HMM Intention Mining of Search Term "', input$searchTerm, '"', sep='')) +
xlab("Emotion Class") + ylab("No of Tweets") +
scale_fill_brewer(palette="Set1") +
theme_bw() +
theme(axis.text.y = element_text(colour="black", size=18, face='plain')) +
theme(axis.title.y = element_text(colour="black", size=18, face='plain', vjust=2)) +
theme(axis.text.x = element_text(colour="black", size=18, face='plain', angle=90, hjust=1)) +
theme(axis.title.x = element_text(colour="black", size=18, face='plain')) +
theme(plot.title = element_text(colour="black", size=20, face='plain', vjust=2.5)) +
theme(legend.text = element_text(colour="black", size=16, face='plain')) +
theme(legend.title = element_text(colour="black", size=18, face='plain')) +
guides(fill = guide_legend(keywidth = 2, keyheight = 2))
} else {
ggplot(results_df, aes()) +
geom_bar(aes(x=polarity, y=..count.., fill=polarity), width=0.6) +
ggtitle(paste('Using HMM Intention Mining of Search Term "', input$searchTerm, '"', sep='')) +
xlab("Polarity Class") + ylab("No of Tweets") +
scale_fill_brewer(palette="Set1") +
theme_bw() +
theme(axis.text.y = element_text(colour="black", size=18, face='plain')) +
theme(axis.title.y = element_text(colour="black", size=18, face='plain', vjust=2)) +
theme(axis.text.x = element_text(colour="black", size=18, face='plain', angle=90, hjust=1)) +
theme(axis.title.x = element_text(colour="black", size=18, face='plain')) +
theme(plot.title = element_text(colour="black", size=20, face='plain', vjust=2.5)) +
theme(legend.text = element_text(colour="black", size=16, face='plain')) +
theme(legend.title = element_text(colour="black", size=18, face='plain')) +
guides(fill = guide_legend(keywidth = 2, keyheight = 2))
}
})
}) | /server.R | no_license | VarshaDolas/Intention-Mining- | R | false | false | 9,187 | r | # Backend/server of R app
# Performs sentiment analysis of tweets based on emotion and polarity classification
# The visualization of the sentiment class distributions is performed using ggplot2 package
# required pakacges
library(twitteR)
library(sentiment)
library(plyr)
library(ggplot2)
library(RColorBrewer)
# loading twitter credentials
#load("twitteR_credentials")
#registerTwitterOAuth(twitCred)
api_key <- "Epoyc1VfIrHOyD6K5DIV4QlN3"
api_secret <- "STu3FNc87kimdiMV6Nw4ouWBKKo9Qbxgi6ultTvDVPCWSa0Kgs"
access_token <- "469047588-S8cXA0zY01AlcZUAQMm5afRszhkML3xR9QkKrXxe"
access_token_secret <- "zct6ERd2AkJhJwyXRUOf4x0pIjKlF0wIkPrc9ClDSFE4i"
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
# loading the helper functions
source('helpers.R')
source('helpers1.R')
source('helpers2.R')
shinyServer(function(input, output) {
# Step 1: Getting the tweets based on search terms
# cainfo="cacert.pem" is required for data access
# tweets <- reactive ({ searchTwitter(input$searchTerm,n=1000, lang="en") })
tweets <- reactive ({ searchTwitter(input$searchTerm,n=1000, lang="en") })
#tweets <- renderText({ input$searchTerm })
# Step2: Preprocessing to clean up the tweets
txtTweets <- reactive ({ preprocess_tweet (tweets()) })
output$plot_emotion <- renderPlot({
# Step 3: Emotion sentiment analysis
emotion <- emotionSentimentAnal(txtTweets())
# Step 4: Polarity sentiment analysis
polarity <- polaritySentimentAnal(txtTweets())
# Step 5: Store results in dataframe
results_df <- data.frame(text=txtTweets(), emotion=emotion, polarity=polarity)
# Step 6: Plot distribution of tweet sentiments
if (input$plot_opt == 'emotion') {
ggplot(results_df) +
geom_bar(aes(x=emotion, y=..count.., fill=emotion)) +
ggtitle(paste('Using Bayes Method Intention Mining of Search Term "', input$searchTerm, '"', sep='')) +
xlab("Emotion Class") + ylab("No of Tweets") +
scale_fill_brewer(palette="Set1") +
theme_bw() +
theme(axis.text.y = element_text(colour="black", size=18, face='plain')) +
theme(axis.title.y = element_text(colour="black", size=18, face='plain', vjust=2)) +
theme(axis.text.x = element_text(colour="black", size=18, face='plain', angle=90, hjust=1)) +
theme(axis.title.x = element_text(colour="black", size=18, face='plain')) +
theme(plot.title = element_text(colour="black", size=20, face='plain', vjust=2.5)) +
theme(legend.text = element_text(colour="black", size=16, face='plain')) +
theme(legend.title = element_text(colour="black", size=18, face='plain')) +
guides(fill = guide_legend(keywidth = 2, keyheight = 2))
} else {
ggplot(results_df, aes()) +
geom_bar(aes(x=polarity, y=..count.., fill=polarity), width=0.6) +
ggtitle(paste('Using Bayes Method Intention Mining of Search Term "', input$searchTerm, '"', sep='')) +
xlab("Polarity Class") + ylab("No of Tweets") +
scale_fill_brewer(palette="Set1") +
theme_bw() +
theme(axis.text.y = element_text(colour="black", size=18, face='plain')) +
theme(axis.title.y = element_text(colour="black", size=18, face='plain', vjust=2)) +
theme(axis.text.x = element_text(colour="black", size=18, face='plain', angle=90, hjust=1)) +
theme(axis.title.x = element_text(colour="black", size=18, face='plain')) +
theme(plot.title = element_text(colour="black", size=20, face='plain', vjust=2.5)) +
theme(legend.text = element_text(colour="black", size=16, face='plain')) +
theme(legend.title = element_text(colour="black", size=18, face='plain')) +
guides(fill = guide_legend(keywidth = 2, keyheight = 2))
}
})
output$plot_emotion1 <- renderPlot({
# Step 3: Emotion sentiment analysis
emotion <- emotionSentimentAnal1(txtTweets())
# Step 4: Polarity sentiment analysis
polarity <- polaritySentimentAnal1(txtTweets())
# Step 5: Store results in dataframe
results_df <- data.frame(text=txtTweets(), emotion=emotion, polarity=polarity)
# Step 6: Plot distribution of tweet sentiments
if (input$plot_opt == 'emotion') {
ggplot(results_df) +
geom_bar(aes(x=emotion, y=..count.., fill=emotion)) +
ggtitle(paste('Using MAXENT Intention Mining of Search Term "', input$searchTerm, '"', sep='')) +
xlab("Emotion Class") + ylab("No of Tweets") +
scale_fill_brewer(palette="Set1") +
theme_bw() +
theme(axis.text.y = element_text(colour="black", size=18, face='plain')) +
theme(axis.title.y = element_text(colour="black", size=18, face='plain', vjust=2)) +
theme(axis.text.x = element_text(colour="black", size=18, face='plain', angle=90, hjust=1)) +
theme(axis.title.x = element_text(colour="black", size=18, face='plain')) +
theme(plot.title = element_text(colour="black", size=20, face='plain', vjust=2.5)) +
theme(legend.text = element_text(colour="black", size=16, face='plain')) +
theme(legend.title = element_text(colour="black", size=18, face='plain')) +
guides(fill = guide_legend(keywidth = 2, keyheight = 2))
} else {
ggplot(results_df, aes()) +
geom_bar(aes(x=polarity, y=..count.., fill=polarity), width=0.6) +
ggtitle(paste('Using MAXENT Intention Mining of Search Term "', input$searchTerm, '"', sep='')) +
xlab("Polarity Class") + ylab("No of Tweets") +
scale_fill_brewer(palette="Set1") +
theme_bw() +
theme(axis.text.y = element_text(colour="black", size=18, face='plain')) +
theme(axis.title.y = element_text(colour="black", size=18, face='plain', vjust=2)) +
theme(axis.text.x = element_text(colour="black", size=18, face='plain', angle=90, hjust=1)) +
theme(axis.title.x = element_text(colour="black", size=18, face='plain')) +
theme(plot.title = element_text(colour="black", size=20, face='plain', vjust=2.5)) +
theme(legend.text = element_text(colour="black", size=16, face='plain')) +
theme(legend.title = element_text(colour="black", size=18, face='plain')) +
guides(fill = guide_legend(keywidth = 2, keyheight = 2))
}
})
output$plot_emotion2 <- renderPlot({
# Step 3: Emotion sentiment analysis
emotion <- emotionSentimentAnal2(txtTweets())
# Step 4: Polarity sentiment analysis
polarity <- polaritySentimentAnal2(txtTweets())
# Step 5: Store results in dataframe
results_df <- data.frame(text=txtTweets(), emotion=emotion, polarity=polarity)
# Step 6: Plot distribution of tweet sentiments
if (input$plot_opt == 'emotion') {
ggplot(results_df) +
geom_bar(aes(x=emotion, y=..count.., fill=emotion)) +
ggtitle(paste('Using HMM Intention Mining of Search Term "', input$searchTerm, '"', sep='')) +
xlab("Emotion Class") + ylab("No of Tweets") +
scale_fill_brewer(palette="Set1") +
theme_bw() +
theme(axis.text.y = element_text(colour="black", size=18, face='plain')) +
theme(axis.title.y = element_text(colour="black", size=18, face='plain', vjust=2)) +
theme(axis.text.x = element_text(colour="black", size=18, face='plain', angle=90, hjust=1)) +
theme(axis.title.x = element_text(colour="black", size=18, face='plain')) +
theme(plot.title = element_text(colour="black", size=20, face='plain', vjust=2.5)) +
theme(legend.text = element_text(colour="black", size=16, face='plain')) +
theme(legend.title = element_text(colour="black", size=18, face='plain')) +
guides(fill = guide_legend(keywidth = 2, keyheight = 2))
} else {
ggplot(results_df, aes()) +
geom_bar(aes(x=polarity, y=..count.., fill=polarity), width=0.6) +
ggtitle(paste('Using HMM Intention Mining of Search Term "', input$searchTerm, '"', sep='')) +
xlab("Polarity Class") + ylab("No of Tweets") +
scale_fill_brewer(palette="Set1") +
theme_bw() +
theme(axis.text.y = element_text(colour="black", size=18, face='plain')) +
theme(axis.title.y = element_text(colour="black", size=18, face='plain', vjust=2)) +
theme(axis.text.x = element_text(colour="black", size=18, face='plain', angle=90, hjust=1)) +
theme(axis.title.x = element_text(colour="black", size=18, face='plain')) +
theme(plot.title = element_text(colour="black", size=20, face='plain', vjust=2.5)) +
theme(legend.text = element_text(colour="black", size=16, face='plain')) +
theme(legend.title = element_text(colour="black", size=18, face='plain')) +
guides(fill = guide_legend(keywidth = 2, keyheight = 2))
}
})
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/office_get_levels.R
\name{office_get_levels}
\alias{office_get_levels}
\title{Get office levels}
\usage{
office_get_levels()
}
\value{
A dataframe with the columns \code{office_level_id} and \code{name}.
}
\description{
These are currently: F for Federal, S for State, and L for Local.
}
\examples{
\dontrun{
office_get_levels()
}
}
| /man/office_get_levels.Rd | no_license | cran/votesmart | R | false | true | 411 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/office_get_levels.R
\name{office_get_levels}
\alias{office_get_levels}
\title{Get office levels}
\usage{
office_get_levels()
}
\value{
A dataframe with the columns \code{office_level_id} and \code{name}.
}
\description{
These are currently: F for Federal, S for State, and L for Local.
}
\examples{
\dontrun{
office_get_levels()
}
}
|
testlist <- list(a = c(0, 0, 0, 0), b = numeric(0))
result <- do.call(metacoder:::euclid,testlist)
str(result) | /metacoder/inst/testfiles/euclid/AFL_euclid/euclid_valgrind_files/1615762777-test.R | permissive | akhikolla/updatedatatype-list3 | R | false | false | 110 | r | testlist <- list(a = c(0, 0, 0, 0), b = numeric(0))
result <- do.call(metacoder:::euclid,testlist)
str(result) |
library(ggplot2)
library(plyr)
library(ROCR)
adult <- read.table('https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',
sep = ',', fill = F, strip.white = T)
colnames(adult) <- c('age', 'workclass', 'fnlwgt', 'educatoin',
'educatoin_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income')
adult$educatoin <- NULL
adult$fnlwgt <- NULL
adult$relationship <- NULL
# histogram of age by income group
ggplot(adult) + aes(x=as.numeric(age), group=income, fill=income) +
geom_histogram(binwidth=1, color='black')
# histogram of age by gender group
ggplot(adult) + aes(x=as.numeric(age), group=sex, fill=sex) +
geom_histogram(binwidth=1, color='black')
summary(adult$workclass)
levels(adult$workclass)[1] <- 'Unknown'
# combine into Government job
adult$workclass <- gsub('^Federal-gov', 'Government', adult$workclass)
adult$workclass <- gsub('^Local-gov', 'Government', adult$workclass)
adult$workclass <- gsub('^State-gov', 'Government', adult$workclass)
# combine into Sele-Employed job
adult$workclass <- gsub('^Self-emp-inc', 'Self-Employed', adult$workclass)
adult$workclass <- gsub('^Self-emp-not-inc', 'Self-Employed', adult$workclass)
# combine into Other/Unknown
adult$workclass <- gsub('^Never-worked', 'Other', adult$workclass)
adult$workclass <- gsub('^Without-pay', 'Other', adult$workclass)
adult$workclass <- gsub('^Other', 'Other/Unknown', adult$workclass)
adult$workclass <- gsub('^Unknown', 'Other/Unknown', adult$workclass)
adult$workclass <- as.factor(adult$workclass)
summary(adult$workclass)
# barplot of job type by income group
# get the counts by industry and income group
count <- table(adult[adult$workclass == 'Government',]$income)["<=50K"]
count <- c(count, table(adult[adult$workclass == 'Government',]$income)[">50K"])
count <- c(count, table(adult[adult$workclass == 'Other/Unknown',]$income)["<=50K"])
count <- c(count, table(adult[adult$workclass == 'Other/Unknown',]$income)[">50K"])
count <- c(count, table(adult[adult$workclass == 'Private',]$income)["<=50K"])
count <- c(count, table(adult[adult$workclass == 'Private',]$income)[">50K"])
count <- c(count, table(adult[adult$workclass == 'Self-Employed',]$income)["<=50K"])
count <- c(count, table(adult[adult$workclass == 'Self-Employed',]$income)[">50K"])
count <- as.numeric(count)
# create a dataframe
industry <- rep(levels(adult$workclass), each = 2)
income <- rep(c('<=50K', '>50K'), 4)
df <- data.frame(industry, income, count)
df
# calculate the percentages
df <- ddply(df, .(industry), transform, percent = count/sum(count) * 100)
# format the labels and calculate their positions
df <- ddply(df, .(industry), transform, pos = (cumsum(count) - 0.5 * count))
df$label <- paste0(sprintf("%.0f", df$percent), "%")
# bar plot of counts by industry with in group proportions
ggplot(df, aes(x = industry, y = count, fill = income)) +
geom_bar(stat = "identity") +
geom_text(aes(y = pos, label = label), size = 2) +
ggtitle('Income by Industry')
# create a dataframe
df1 <- data.frame(table(adult$income, adult$educatoin_num))
names(df1) <- c('income', 'education_num', 'count')
df1
# calculate the percentages
df1 <- ddply(df1, .(education_num), transform, percent = count/sum(count) * 100)
# format the labels and calculate their positions
df1 <- ddply(df1, .(education_num), transform, pos = (cumsum(count) - 0.5 * count))
df1$label <- paste0(sprintf("%.0f", df1$percent), "%")
# remove some in group percentage to avoid overlapped text
df1$label[which(df1$percent < 5)] <- NA
# bar plot of counts by years of education with in group proportions
ggplot(df1, aes(x = education_num, y = count, fill = income)) +
geom_bar(stat = "identity") +
geom_text(aes(y = pos, label = label), size = 2) +
ggtitle('Income Level with Years of Education')
summary(adult$occupation)
levels(adult$occupation)[1] <- 'Unknown'
adult$occupation <- gsub('Adm-clerical', 'White-Collar', adult$occupation)
adult$occupation <- gsub('Craft-repair', 'Blue-Collar', adult$occupation)
adult$occupation <- gsub('Exec-managerial', 'White-Collar', adult$occupation)
adult$occupation <- gsub('Farming-fishing', 'Blue-Collar', adult$occupation)
adult$occupation <- gsub('Handlers-cleaners', 'Blue-Collar', adult$occupation)
adult$occupation <- gsub('Machine-op-inspct', 'Blue-Collar', adult$occupation)
adult$occupation <- gsub('Other-service', 'Service', adult$occupation)
adult$occupation <- gsub('Priv-house-serv', 'Service', adult$occupation)
adult$occupation <- gsub('Prof-specialty', 'Professional', adult$occupation)
adult$occupation <- gsub('Protective-serv', 'Service', adult$occupation)
adult$occupation <- gsub('Tech-support', 'Service', adult$occupation)
adult$occupation <- gsub('Transport-moving', 'Blue-Collar', adult$occupation)
adult$occupation <- gsub('Unknown', 'Other/Unknown', adult$occupation)
adult$occupation <- gsub('Armed-Forces', 'Other/Unknown', adult$occupation)
adult$occupation <- as.factor(adult$occupation)
summary(adult$occupation)
# create a dataframe
df2 <- data.frame(table(adult$income, adult$occupation))
names(df2) <- c('income', 'occupation', 'count')
df2
# calculate the percentages
df2 <- ddply(df2, .(occupation), transform, percent = count/sum(count) * 100)
# format the labels and calculate their positions
df2 <- ddply(df2, .(occupation), transform, pos = (cumsum(count) - 0.5 * count))
df2$label <- paste0(sprintf("%.0f", df2$percent), "%")
# bar plot of counts by occupation with in group proportions
ggplot(df2, aes(x = occupation, y = count, fill = income)) +
geom_bar(stat = "identity") +
geom_text(aes(y = pos, label = label), size = 2) +
ggtitle('Income Level with Different Occupations')
summary(adult$marital_status)
adult$marital_status <- gsub('Married-AF-spouse', 'Married', adult$marital_status)
adult$marital_status <- gsub('Married-civ-spouse', 'Married', adult$marital_status)
adult$marital_status <- gsub('Married-spouse-absent', 'Married', adult$marital_status)
adult$marital_status <- gsub('Never-married', 'Single', adult$marital_status)
adult$marital_status <- as.factor(adult$marital_status)
summary(adult$marital_status)
df3 <- data.frame(table(adult$income, adult$marital_status))
names(df3) <- c('income', 'marital_status', 'count')
df3
# calculate the percentages
df3 <- ddply(df3, .(marital_status), transform, percent = count/sum(count) * 100)
# format the labels and calculate their positions
df3 <- ddply(df3, .(marital_status), transform, pos = (cumsum(count) - 0.5 * count))
df3$label <- paste0(sprintf("%.0f", df3$percent), "%")
# bar plot of counts by marital status with in group proportions
ggplot(df3, aes(x = marital_status, y = count, fill = income)) +
geom_bar(stat = "identity") +
geom_text(aes(y = pos, label = label), size = 2) +
ggtitle('Income Level with Marital Status')
# histogram of capital_gain
ggplot(adult) + aes(x=as.numeric(capital_gain), group=income, fill=income) +
geom_histogram(bins=10, color='black') + ggtitle('Histogram of Capital Gain')
# histogram of capital_loss
ggplot(adult) + aes(x=as.numeric(capital_loss), group=income, fill=income) +
geom_histogram(bins=10, color='black') + ggtitle('Histogram of Capital Loss')
# percentage of observatiosn with no capital gain or loss
sum(adult$capital_gain == 0)/length(adult$capital_gain)
sum(adult$capital_loss == 0)/length(adult$capital_loss)
###
adult$capital_gain <- NULL
adult$capital_loss <- NULL
adult$native_country <- NULL
df4 <- data.frame(table(adult$income, adult$race))
names(df4) <- c('income', 'race', 'count')
df4
# calculate the percentages
df4 <- ddply(df4, .(race), transform, percent = count/sum(count) * 100)
# format the labels and calculate their positions
df4 <- ddply(df4, .(race), transform, pos = (cumsum(count) - 0.5 * count))
df4$label <- paste0(sprintf("%.0f", df4$percent), "%")
# do not display percentage for low counts categories
df4$label[df4$race == 'Other'] <- NA
df4$label[df4$race == 'Amer-Indian-Eskimo'] <- NA
# bar plot of counts by marital status with in group proportions
ggplot(df4, aes(x = race, y = count, fill = income)) +
geom_bar(stat = "identity") +
geom_text(aes(y = pos, label = label), size = 2) +
ggtitle('Income Level by Race')
summary(adult)
sz <- round(.8 * dim(adult)[1]) # training set size
training_set <- adult[1:sz,]
testing_set <- adult[-(1:sz),]
m1 <- glm(income ~ ., data = training_set, family = binomial('logit'))
summary(m1)
confint(m1)
m_full <- m1 # full model is the model just fitted
m_null <- glm(income ~ 1, data = training_set, family = binomial('logit'))
# backward selection
step(m_full, trace = F, scope = list(lower=formula(m_null), upper=formula(m_full)),
direction = 'backward')
# forward selection
step(m_null, trace = F, scope = list(lower=formula(m_null), upper=formula(m_full)),
direction = 'forward')
# create a data frame to store information regarding deviance residuals
index <- 1:dim(training_set)[1]
dev_resid <- residuals(m1)
income <- training_set$income
dff <- data.frame(index, dev_resid, income)
ggplot(dff, aes(x = index, y = dev_resid, color = income)) +
geom_point() +
geom_hline(yintercept = 3, linetype = 'dashed', color = 'blue') +
geom_hline(yintercept = -3, linetype = 'dashed', color = 'blue')
ggtitle('Plot of Deviance Residuals')
###PREDICTION
prob1<-predict(m1,training_set,type='response')
pred1<-rep('<=50k',length(prob1))
pred1[prob>=.5]<-'>50k'
tb1<-table(pred1,training_set$income)
tb1
accuracy1<-sum(diag(tb1))/sum(tb1)
accuracy1
prob <- predict(m1, testing_set, type = 'response')
pred <- rep('<=50K', length(prob))
pred[prob>=.5] <- '>50K'
# confusion matrix
tb <- table(pred, testing_set$income)
tb
accuracy2<-sum(diag(tb))/sum(tb)
accuracy2
##regression tree
library(rpart)
tree2 <- rpart(income ~ ., data = training_set, method = 'class', cp = 1e-3)
tree2.pred.prob <- predict(tree2, newdata = testing_set, type = 'prob')
tree2.pred <- predict(tree2, newdata = testing_set, type = 'class')
# confusion matrix
tb2 <- table(tree2.pred, testing_set$income)
tb2
###random forest
library(randomForest)
rf3 <- randomForest(income ~ ., data = training_set, ntree = 1000)
rf3.pred.prob <- predict(rf3, newdata = testing_set, type = 'prob')
rf3.pred <- predict(rf3, newdata = testing_set, type = 'class')
# confusion matrix
tb3 <- table(rf3.pred, testing_set$income)
tb3
###SVM
library(kernlab)
svm4 <- ksvm(income ~ ., data = training_set)
svm4.pred.prob <- predict(svm4, newdata = testing_set, type = 'decision')
svm4.pred <- predict(svm4, newdata = testing_set, type = 'response')
# confusion matrix
tb4 <- table(svm4.pred, testing_set$income)
tb4
# create a prediction object
pr <- prediction(prob, testing_set$income)
prf <- performance(pr, measure = "tpr", x.measure = "fpr")
# create a data frame for TP and FP rates
dd <- data.frame(FP = prf@x.values[[1]], TP = prf@y.values[[1]])
# CART
pr2 <- prediction(tree2.pred.prob[,2], testing_set$income)
prf2 <- performance(pr2, measure = "tpr", x.measure = "fpr")
dd2 <- data.frame(FP = prf2@x.values[[1]], TP = prf2@y.values[[1]])
# RF
pr3 <- prediction(rf3.pred.prob[,2], testing_set$income)
prf3 <- performance(pr3, measure = "tpr", x.measure = "fpr")
dd3 <- data.frame(FP = prf3@x.values[[1]], TP = prf3@y.values[[1]])
# SVM
pr4 <- prediction(svm4.pred.prob, testing_set$income)
prf4 <- performance(pr4, measure = "tpr", x.measure = "fpr")
dd4 <- data.frame(FP = prf4@x.values[[1]], TP = prf4@y.values[[1]])
# plot ROC curve for logistic regression
g <- ggplot() +
geom_line(data = dd, aes(x = FP, y = TP, color = 'Logistic Regression')) +
geom_line(data = dd2, aes(x = FP, y = TP, color = 'CART')) +
geom_line(data = dd3, aes(x = FP, y = TP, color = 'Random Forest')) +
geom_line(data = dd4, aes(x = FP, y = TP, color = 'Support Vector Machine')) +
geom_segment(aes(x = 0, xend = 1, y = 0, yend = 1)) +
ggtitle('ROC Curve') +
labs(x = 'False Positive Rate', y = 'True Positive Rate')
g + scale_colour_manual(name = 'Classifier', values = c('Logistic Regression'='#E69F00',
'CART'='#009E73',
'Random Forest'='#D55E00', 'Support Vector Machine'='#0072B2'))
| /project_harsha_adult.r | no_license | HarshaKosuru/CENSUS | R | false | false | 12,640 | r | library(ggplot2)
library(plyr)
library(ROCR)
adult <- read.table('https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',
sep = ',', fill = F, strip.white = T)
colnames(adult) <- c('age', 'workclass', 'fnlwgt', 'educatoin',
'educatoin_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income')
adult$educatoin <- NULL
adult$fnlwgt <- NULL
adult$relationship <- NULL
# histogram of age by income group
ggplot(adult) + aes(x=as.numeric(age), group=income, fill=income) +
geom_histogram(binwidth=1, color='black')
# histogram of age by gender group
ggplot(adult) + aes(x=as.numeric(age), group=sex, fill=sex) +
geom_histogram(binwidth=1, color='black')
summary(adult$workclass)
levels(adult$workclass)[1] <- 'Unknown'
# combine into Government job
adult$workclass <- gsub('^Federal-gov', 'Government', adult$workclass)
adult$workclass <- gsub('^Local-gov', 'Government', adult$workclass)
adult$workclass <- gsub('^State-gov', 'Government', adult$workclass)
# combine into Sele-Employed job
adult$workclass <- gsub('^Self-emp-inc', 'Self-Employed', adult$workclass)
adult$workclass <- gsub('^Self-emp-not-inc', 'Self-Employed', adult$workclass)
# combine into Other/Unknown
adult$workclass <- gsub('^Never-worked', 'Other', adult$workclass)
adult$workclass <- gsub('^Without-pay', 'Other', adult$workclass)
adult$workclass <- gsub('^Other', 'Other/Unknown', adult$workclass)
adult$workclass <- gsub('^Unknown', 'Other/Unknown', adult$workclass)
adult$workclass <- as.factor(adult$workclass)
summary(adult$workclass)
# barplot of job type by income group
# get the counts by industry and income group
count <- table(adult[adult$workclass == 'Government',]$income)["<=50K"]
count <- c(count, table(adult[adult$workclass == 'Government',]$income)[">50K"])
count <- c(count, table(adult[adult$workclass == 'Other/Unknown',]$income)["<=50K"])
count <- c(count, table(adult[adult$workclass == 'Other/Unknown',]$income)[">50K"])
count <- c(count, table(adult[adult$workclass == 'Private',]$income)["<=50K"])
count <- c(count, table(adult[adult$workclass == 'Private',]$income)[">50K"])
count <- c(count, table(adult[adult$workclass == 'Self-Employed',]$income)["<=50K"])
count <- c(count, table(adult[adult$workclass == 'Self-Employed',]$income)[">50K"])
count <- as.numeric(count)
# create a dataframe
industry <- rep(levels(adult$workclass), each = 2)
income <- rep(c('<=50K', '>50K'), 4)
df <- data.frame(industry, income, count)
df
# calculate the percentages
df <- ddply(df, .(industry), transform, percent = count/sum(count) * 100)
# format the labels and calculate their positions
df <- ddply(df, .(industry), transform, pos = (cumsum(count) - 0.5 * count))
df$label <- paste0(sprintf("%.0f", df$percent), "%")
# bar plot of counts by industry with in group proportions
ggplot(df, aes(x = industry, y = count, fill = income)) +
geom_bar(stat = "identity") +
geom_text(aes(y = pos, label = label), size = 2) +
ggtitle('Income by Industry')
# create a dataframe
df1 <- data.frame(table(adult$income, adult$educatoin_num))
names(df1) <- c('income', 'education_num', 'count')
df1
# calculate the percentages
df1 <- ddply(df1, .(education_num), transform, percent = count/sum(count) * 100)
# format the labels and calculate their positions
df1 <- ddply(df1, .(education_num), transform, pos = (cumsum(count) - 0.5 * count))
df1$label <- paste0(sprintf("%.0f", df1$percent), "%")
# remove some in group percentage to avoid overlapped text
df1$label[which(df1$percent < 5)] <- NA
# bar plot of counts by years of education with in group proportions
ggplot(df1, aes(x = education_num, y = count, fill = income)) +
geom_bar(stat = "identity") +
geom_text(aes(y = pos, label = label), size = 2) +
ggtitle('Income Level with Years of Education')
summary(adult$occupation)
levels(adult$occupation)[1] <- 'Unknown'
adult$occupation <- gsub('Adm-clerical', 'White-Collar', adult$occupation)
adult$occupation <- gsub('Craft-repair', 'Blue-Collar', adult$occupation)
adult$occupation <- gsub('Exec-managerial', 'White-Collar', adult$occupation)
adult$occupation <- gsub('Farming-fishing', 'Blue-Collar', adult$occupation)
adult$occupation <- gsub('Handlers-cleaners', 'Blue-Collar', adult$occupation)
adult$occupation <- gsub('Machine-op-inspct', 'Blue-Collar', adult$occupation)
adult$occupation <- gsub('Other-service', 'Service', adult$occupation)
adult$occupation <- gsub('Priv-house-serv', 'Service', adult$occupation)
adult$occupation <- gsub('Prof-specialty', 'Professional', adult$occupation)
adult$occupation <- gsub('Protective-serv', 'Service', adult$occupation)
adult$occupation <- gsub('Tech-support', 'Service', adult$occupation)
adult$occupation <- gsub('Transport-moving', 'Blue-Collar', adult$occupation)
adult$occupation <- gsub('Unknown', 'Other/Unknown', adult$occupation)
adult$occupation <- gsub('Armed-Forces', 'Other/Unknown', adult$occupation)
adult$occupation <- as.factor(adult$occupation)
summary(adult$occupation)
# create a dataframe
df2 <- data.frame(table(adult$income, adult$occupation))
names(df2) <- c('income', 'occupation', 'count')
df2
# calculate the percentages
df2 <- ddply(df2, .(occupation), transform, percent = count/sum(count) * 100)
# format the labels and calculate their positions
df2 <- ddply(df2, .(occupation), transform, pos = (cumsum(count) - 0.5 * count))
df2$label <- paste0(sprintf("%.0f", df2$percent), "%")
# bar plot of counts by occupation with in group proportions
ggplot(df2, aes(x = occupation, y = count, fill = income)) +
geom_bar(stat = "identity") +
geom_text(aes(y = pos, label = label), size = 2) +
ggtitle('Income Level with Different Occupations')
summary(adult$marital_status)
adult$marital_status <- gsub('Married-AF-spouse', 'Married', adult$marital_status)
adult$marital_status <- gsub('Married-civ-spouse', 'Married', adult$marital_status)
adult$marital_status <- gsub('Married-spouse-absent', 'Married', adult$marital_status)
adult$marital_status <- gsub('Never-married', 'Single', adult$marital_status)
adult$marital_status <- as.factor(adult$marital_status)
summary(adult$marital_status)
df3 <- data.frame(table(adult$income, adult$marital_status))
names(df3) <- c('income', 'marital_status', 'count')
df3
# calculate the percentages
df3 <- ddply(df3, .(marital_status), transform, percent = count/sum(count) * 100)
# format the labels and calculate their positions
df3 <- ddply(df3, .(marital_status), transform, pos = (cumsum(count) - 0.5 * count))
df3$label <- paste0(sprintf("%.0f", df3$percent), "%")
# bar plot of counts by marital status with in group proportions
ggplot(df3, aes(x = marital_status, y = count, fill = income)) +
geom_bar(stat = "identity") +
geom_text(aes(y = pos, label = label), size = 2) +
ggtitle('Income Level with Marital Status')
# histogram of capital_gain
ggplot(adult) + aes(x=as.numeric(capital_gain), group=income, fill=income) +
geom_histogram(bins=10, color='black') + ggtitle('Histogram of Capital Gain')
# histogram of capital_loss
ggplot(adult) + aes(x=as.numeric(capital_loss), group=income, fill=income) +
geom_histogram(bins=10, color='black') + ggtitle('Histogram of Capital Loss')
# percentage of observatiosn with no capital gain or loss
sum(adult$capital_gain == 0)/length(adult$capital_gain)
sum(adult$capital_loss == 0)/length(adult$capital_loss)
###
adult$capital_gain <- NULL
adult$capital_loss <- NULL
adult$native_country <- NULL
df4 <- data.frame(table(adult$income, adult$race))
names(df4) <- c('income', 'race', 'count')
df4
# calculate the percentages
df4 <- ddply(df4, .(race), transform, percent = count/sum(count) * 100)
# format the labels and calculate their positions
df4 <- ddply(df4, .(race), transform, pos = (cumsum(count) - 0.5 * count))
df4$label <- paste0(sprintf("%.0f", df4$percent), "%")
# do not display percentage for low counts categories
df4$label[df4$race == 'Other'] <- NA
df4$label[df4$race == 'Amer-Indian-Eskimo'] <- NA
# bar plot of counts by marital status with in group proportions
ggplot(df4, aes(x = race, y = count, fill = income)) +
geom_bar(stat = "identity") +
geom_text(aes(y = pos, label = label), size = 2) +
ggtitle('Income Level by Race')
summary(adult)
sz <- round(.8 * dim(adult)[1]) # training set size
training_set <- adult[1:sz,]
testing_set <- adult[-(1:sz),]
m1 <- glm(income ~ ., data = training_set, family = binomial('logit'))
summary(m1)
confint(m1)
m_full <- m1 # full model is the model just fitted
m_null <- glm(income ~ 1, data = training_set, family = binomial('logit'))
# backward selection
step(m_full, trace = F, scope = list(lower=formula(m_null), upper=formula(m_full)),
direction = 'backward')
# forward selection
step(m_null, trace = F, scope = list(lower=formula(m_null), upper=formula(m_full)),
direction = 'forward')
# create a data frame to store information regarding deviance residuals
index <- 1:dim(training_set)[1]
dev_resid <- residuals(m1)
income <- training_set$income
dff <- data.frame(index, dev_resid, income)
ggplot(dff, aes(x = index, y = dev_resid, color = income)) +
geom_point() +
geom_hline(yintercept = 3, linetype = 'dashed', color = 'blue') +
geom_hline(yintercept = -3, linetype = 'dashed', color = 'blue')
ggtitle('Plot of Deviance Residuals')
###PREDICTION
prob1<-predict(m1,training_set,type='response')
pred1<-rep('<=50k',length(prob1))
pred1[prob>=.5]<-'>50k'
tb1<-table(pred1,training_set$income)
tb1
accuracy1<-sum(diag(tb1))/sum(tb1)
accuracy1
prob <- predict(m1, testing_set, type = 'response')
pred <- rep('<=50K', length(prob))
pred[prob>=.5] <- '>50K'
# confusion matrix
tb <- table(pred, testing_set$income)
tb
accuracy2<-sum(diag(tb))/sum(tb)
accuracy2
##regression tree
library(rpart)
tree2 <- rpart(income ~ ., data = training_set, method = 'class', cp = 1e-3)
tree2.pred.prob <- predict(tree2, newdata = testing_set, type = 'prob')
tree2.pred <- predict(tree2, newdata = testing_set, type = 'class')
# confusion matrix
tb2 <- table(tree2.pred, testing_set$income)
tb2
###random forest
library(randomForest)
rf3 <- randomForest(income ~ ., data = training_set, ntree = 1000)
rf3.pred.prob <- predict(rf3, newdata = testing_set, type = 'prob')
rf3.pred <- predict(rf3, newdata = testing_set, type = 'class')
# confusion matrix
tb3 <- table(rf3.pred, testing_set$income)
tb3
###SVM
library(kernlab)
svm4 <- ksvm(income ~ ., data = training_set)
svm4.pred.prob <- predict(svm4, newdata = testing_set, type = 'decision')
svm4.pred <- predict(svm4, newdata = testing_set, type = 'response')
# confusion matrix
tb4 <- table(svm4.pred, testing_set$income)
tb4
# create a prediction object
pr <- prediction(prob, testing_set$income)
prf <- performance(pr, measure = "tpr", x.measure = "fpr")
# create a data frame for TP and FP rates
dd <- data.frame(FP = prf@x.values[[1]], TP = prf@y.values[[1]])
# CART
pr2 <- prediction(tree2.pred.prob[,2], testing_set$income)
prf2 <- performance(pr2, measure = "tpr", x.measure = "fpr")
dd2 <- data.frame(FP = prf2@x.values[[1]], TP = prf2@y.values[[1]])
# RF
pr3 <- prediction(rf3.pred.prob[,2], testing_set$income)
prf3 <- performance(pr3, measure = "tpr", x.measure = "fpr")
dd3 <- data.frame(FP = prf3@x.values[[1]], TP = prf3@y.values[[1]])
# SVM
pr4 <- prediction(svm4.pred.prob, testing_set$income)
prf4 <- performance(pr4, measure = "tpr", x.measure = "fpr")
dd4 <- data.frame(FP = prf4@x.values[[1]], TP = prf4@y.values[[1]])
# plot ROC curve for logistic regression
g <- ggplot() +
geom_line(data = dd, aes(x = FP, y = TP, color = 'Logistic Regression')) +
geom_line(data = dd2, aes(x = FP, y = TP, color = 'CART')) +
geom_line(data = dd3, aes(x = FP, y = TP, color = 'Random Forest')) +
geom_line(data = dd4, aes(x = FP, y = TP, color = 'Support Vector Machine')) +
geom_segment(aes(x = 0, xend = 1, y = 0, yend = 1)) +
ggtitle('ROC Curve') +
labs(x = 'False Positive Rate', y = 'True Positive Rate')
g + scale_colour_manual(name = 'Classifier', values = c('Logistic Regression'='#E69F00',
'CART'='#009E73',
'Random Forest'='#D55E00', 'Support Vector Machine'='#0072B2'))
|
if (!("pacman" %in% rownames(installed.packages()))) {install.packages("pacman")}
pacman::p_load(shiny,
tidytable,
tidyverse,
data.table,
htmlwidgets,
DT,
kableExtra,
reshape2,
dtplyr,
rio,
janitor)
options(shiny.maxRequestSize=50*1024^2)
'%!in%' <- function(x,y)!('%in%'(x,y))
findoverlapping_segments <- function(dataset, cM = 7, name = NULL, exclude=NULL){
dataset %>%
lazy_dt() %>%
filter(CENTIMORGANS > cM) %>%
filter(MATCHNAME %!in% exclude) %>%
as.data.table() ->
dataset
setkey(dataset, CHROMOSOME, `START LOCATION`, `END LOCATION`)
if((is_empty(name)) == T){
olaps = foverlaps(dataset, dataset, type="any", which=FALSE) %>%
lazy_dt() %>%
filter(MATCHNAME != i.MATCHNAME) %>%
select(1:9) %>%
distinct() %>%
as.data.table()
}else{
dataset_name <- dataset %>%
lazy_dt() %>%
filter(MATCHNAME %in% name) %>%
as.data.table()
olaps = foverlaps(dataset_name, dataset, type="any", which=FALSE)
olaps %>%
lazy_dt() %>%
filter(MATCHNAME != i.MATCHNAME) %>%
as.data.table() -> olaps
olaps[,c(1:9)] -> olaps1
olaps[,c(1,10:17)] -> olaps2
colnames(olaps2) <- colnames(olaps1)
olaps2 %>% lazy_dt() %>% full_join(olaps1) %>% distinct() %>%
#mutate(sorter = !(MATCHNAME %in% name)) %>%
as.data.table -> olaps
setkey(olaps, CHROMOSOME, `START LOCATION`, #sorter,
`END LOCATION`)
olaps %>% #select.(-sorter) %>%
as.data.table() -> olaps
}
olaps %>%
lazy_dt() %>%
mutate(CHROMOSOME = CHROMOSOME %>% factor(labels = c(1:22, "X") %>% as.character(),
levels = c(1:22, "X") %>% as.character(),
ordered = T)) %>%
arrange(CHROMOSOME,
`START LOCATION`) %>%
select(NAME,
MATCHNAME,
CHROMOSOME,
`START LOCATION`,
`END LOCATION`,
CENTIMORGANS,
`MATCHING SNPS`,
`Shared cM`,
`Longest Block`) %>%
as.data.table() -> output
return(output)
}
# overlap_in_lists <- function(out){if(length(unique(out$NAME)) > 1){
# out %>%
# lazy_dt() %>%
# distinct(MATCHNAME, NAME) %>%
# group_by(MATCHNAME) %>%
# summarise(n = n()) %>%
# ungroup() %>%
# filter(n > 1) %>%
# as.data.table() %>%
# pull(MATCHNAME) %>%
# c(unique(out$NAME)) ->
# uniques_shared_matches
#
# out <- out %>% lazy_dt() %>% filter(MATCHNAME %in% uniques_shared_matches) %>% as.data.table()
# out}
# else{out}}
import_custom <- function(x){
# 10 cols = MyHeritage
# 6 cols = FTDNA
# ? cols = DNAGedcom FTDNA
# ? cols = DNAGedcom MyHeritage
# ? cols = Gedmatch
imported <- import(x, encoding = "UTF-8", setclass="data.table", blank.lines.skip = TRUE)
if(ncol(imported) == 6) {
imported %>%
lazy_dt() %>%
mutate_at(1:2, trimws) %>%
mutate(Name = NA_character_) %>%
transmute(NAME = Name,
MATCHNAME = `Match Name`,
CHROMOSOME = Chromosome,
`START LOCATION` = `Start Location`,
`END LOCATION` = `End Location`,
CENTIMORGANS = Centimorgans,
`MATCHING SNPS` = `Matching SNPs`) %>%
as.data.table() ->
imported
}
if(ncol(imported) == 10){
imported %>%
lazy_dt() %>%
mutate_at(1:3, trimws) %>%
select(2, 3, 4, 5, 6, 9, 10) %>%
distinct() %>%
as.data.table() ->
imported
colnames(imported) <- c("NAME",
"MATCHNAME",
"CHROMOSOME",
"START LOCATION",
"END LOCATION",
"CENTIMORGANS",
"MATCHING SNPS")
imported %>%
lazy_dt() %>%
filter(!is.na(CHROMOSOME)) %>%
mutate(CHROMOSOME = as.character(CHROMOSOME)) %>%
as.data.table() ->
imported
}
return(imported)
}
shinyServer(function(input, output, session) {
inFile <- reactive({
if (is.null(input$file)) {
return(NULL)
} else {
input$file
}
})
inFile2 <- reactive({
if (is.null(input$file2)) {
return(NULL)
} else {
input$file2
}
})
importData <- reactive({
if (is.null(inFile())) {
return(NULL)
} else {
rbindlist(lapply(inFile()$datapath, import_custom)) ->
dat1
data.table(MATCHNAME = unique(dat1$MATCHNAME)) %>%
lazy_dt() %>%
mutate(MATCHNAME2 = MATCHNAME %>% str_replace_all(" ", " ") %>% str_replace_all(" ", " ") %>% str_replace_all(" ", " ")) %>%
full_join(., dat1) %>%
select(NAME, everything()) %>%
select(-MATCHNAME) %>%
rename(MATCHNAME = MATCHNAME2) %>%
as.data.table() %>%
#nest_by.(NAME) %>%
#lazy_dt() %>%
#mutate(NAME = NAME %>% str_remove("\"")) %>%
#as.data.table() %>%
#unnest.() %>%
lazy_dt() %>%
group_by(#NAME,
MATCHNAME) %>%
mutate(`Shared cM`= sum(CENTIMORGANS*(CHROMOSOME != "X")),
`Longest Block` = max(CENTIMORGANS*(CHROMOSOME != "X"))) %>%
ungroup() %>%
mutate(`Shared cM` = `Shared cM` %>% round(2),
`Longest Block` = `Longest Block` %>% round(2)) %>%
as.data.table() -> out
# out <- overlap_in_lists(out)
out}
})
## DEPRECATED
# matchesData <- reactive({
# if (is.null(input$file2)) {
# return(NULL)
# } else {rbindlist(lapply(inFile2()$datapath, import,
# #`Match Date` = col_date(format = "%m/%d/%Y"),
# encoding = "UTF-8")) %>% #na = c("N/A","")
# # group_by(`Full Name`) %>% dplyr::filter(`Match Date` == min(`Match Date`)) %>% ungroup %>% #Unnecessary
# mutate(MATCHNAME=`Full Name` %>%
# gsub(" "," ", x = .) %>% gsub(" "," ", x = .) %>% gsub(" "," ", x = .)) %>%
# select(MATCHNAME, `Match Date`,`Relationship Range`,`Suggested Relationship`,`Shared cM`,`Longest Block`,`Email`,`Ancestral Surnames`,`Y-DNA Haplogroup`,`mtDNA Haplogroup`) %>%
# mutate(`Shared cM`=`Shared cM` %>% signif(digits=2),
# `Longest Block`=`Longest Block` %>% signif(digits=2))}})
#
# names <- reactive({
# uniques <- data.frame(names=unique(importData()$NAME))
# if(1<nrow(uniques))
# {uniques$names}
# else
# {NULL}
# }
# )
segments <- reactive({
if (is.null(inFile())) {
return(NULL)
} else {#if(is.null(matchesData())){
findoverlapping_segments(dataset = importData(),
cM=input$cM,
name = input$name %>% as.vector(),
exclude = input$exclude %>% as.vector()) %>%
lazy_dt() %>%
transmute(NAME,
MATCHNAME,
CHR=CHROMOSOME,
START = `START LOCATION`,
END = `END LOCATION`,
CENTIMORGANS,
`MATCHING SNPS`) %>%
as.data.table() ->
out
#out <- overlap_in_lists(out)
out#}
# else{
# findoverlapping_segments(dataset = importData(),
# cM = input$cM,
# name = input$name %>% as.vector(),
# exclude = input$exclude %>% as.vector()) %>%
# lazy_dt() %>%
# transmute(NAME,
# MATCHNAME,
# CHR=CHROMOSOME,
# START = `START LOCATION`,
# END = `END LOCATION`,
# CENTIMORGANS,
# `MATCHING SNPS`,
# `Shared cM`,
# `Longest Block`) %>%
# left_join(matchesData()) %>%
# select(-`Ancestral Surnames`,
# -`Y-DNA Haplogroup`,
# -`mtDNA Haplogroup`,
# -`Shared cM`,
# -`Longest Block`,
# -`Suggested Relationship`,
# -`Shared cM`,
# -`Longest Block`) %>%
# as.data.table() ->
# out
# out <- overlap_in_lists(out)
# out}
}
})
segments_out <- reactive({
if (is.null(inFile())) {
return(NULL)
} else {if (is.null(matchesData())) {
findoverlapping_segments(dataset = importData(),
cM = input$cM,
name = input$name %>% as.vector(),
exclude = input$exclude %>% as.vector()) %>%
transmute(NAME,
MATCHNAME,
CHROMOSOME,
`START LOCATION`,
`END LOCATION`,
CENTIMORGANS,
`MATCHING SNPS`)}else{
out <- importData() %>%
findoverlapping_segments(cM = input$cM,
name = input$name %>% as.vector(),
exclude = input$exclude %>% as.vector()) %>%
lazy_dt() %>%
left_join(matchesData()) %>%
select(-`Ancestral Surnames`,
-`Y-DNA Haplogroup`,
-`mtDNA Haplogroup`,
-`Shared cM`,
-`Longest Block`,
-`Suggested Relationship`) %>%
as.data.table() -> out
#out <- overlap_in_lists(out)
out}
}
})
observe({updateSelectizeInput(
session,
"name",
choices=importData()$MATCHNAME, #selected = names(),
server = TRUE)})
observe({updateSelectizeInput(
session,
"exclude",
choices=importData()$MATCHNAME, server = TRUE)})
observe({
output$table <- DT::renderDataTable({ if (is.null(inFile())) {
return(NULL)
} else {DT::datatable(
segments(),
filter = 'top', extensions = c('Buttons', 'Scroller'),
options = list(scrollY = 650,
scrollX = 500,
deferRender = TRUE,
scroller = TRUE,
buttons = list('excel', "csv"),
dom = 'lBfrtip',
fixedColumns = TRUE),
rownames = FALSE,
selection = list(mode="multiple", target = "row", selected = which(segments()$MATCHNAME %in% {input$name %>% as.vector()})))}}, server = TRUE)})
# observe({
# output$downloadData_csv <- downloadHandler(
# filename = "overlapping segments.csv",
# content = function(file) {
# write.csv(segments_out(),
# file,
# row.names =
# FALSE, eol = "\r\n")
# }
# )
# })
# observe({output$downloadData_xlsx <- downloadHandler(
# filename="overlapping segments.xlsx",
# content = function(file){
# xlsx::write.xlsx(segments_out(),
# file,
# sheetName = "Overlapping segments",
# row.names = FALSE)
# }
# )
# })
})
| /server.R | no_license | StaffanBetner/overlappingsegments | R | false | false | 11,446 | r | if (!("pacman" %in% rownames(installed.packages()))) {install.packages("pacman")}
pacman::p_load(shiny,
tidytable,
tidyverse,
data.table,
htmlwidgets,
DT,
kableExtra,
reshape2,
dtplyr,
rio,
janitor)
options(shiny.maxRequestSize=50*1024^2)
'%!in%' <- function(x,y)!('%in%'(x,y))
findoverlapping_segments <- function(dataset, cM = 7, name = NULL, exclude=NULL){
dataset %>%
lazy_dt() %>%
filter(CENTIMORGANS > cM) %>%
filter(MATCHNAME %!in% exclude) %>%
as.data.table() ->
dataset
setkey(dataset, CHROMOSOME, `START LOCATION`, `END LOCATION`)
if((is_empty(name)) == T){
olaps = foverlaps(dataset, dataset, type="any", which=FALSE) %>%
lazy_dt() %>%
filter(MATCHNAME != i.MATCHNAME) %>%
select(1:9) %>%
distinct() %>%
as.data.table()
}else{
dataset_name <- dataset %>%
lazy_dt() %>%
filter(MATCHNAME %in% name) %>%
as.data.table()
olaps = foverlaps(dataset_name, dataset, type="any", which=FALSE)
olaps %>%
lazy_dt() %>%
filter(MATCHNAME != i.MATCHNAME) %>%
as.data.table() -> olaps
olaps[,c(1:9)] -> olaps1
olaps[,c(1,10:17)] -> olaps2
colnames(olaps2) <- colnames(olaps1)
olaps2 %>% lazy_dt() %>% full_join(olaps1) %>% distinct() %>%
#mutate(sorter = !(MATCHNAME %in% name)) %>%
as.data.table -> olaps
setkey(olaps, CHROMOSOME, `START LOCATION`, #sorter,
`END LOCATION`)
olaps %>% #select.(-sorter) %>%
as.data.table() -> olaps
}
olaps %>%
lazy_dt() %>%
mutate(CHROMOSOME = CHROMOSOME %>% factor(labels = c(1:22, "X") %>% as.character(),
levels = c(1:22, "X") %>% as.character(),
ordered = T)) %>%
arrange(CHROMOSOME,
`START LOCATION`) %>%
select(NAME,
MATCHNAME,
CHROMOSOME,
`START LOCATION`,
`END LOCATION`,
CENTIMORGANS,
`MATCHING SNPS`,
`Shared cM`,
`Longest Block`) %>%
as.data.table() -> output
return(output)
}
# overlap_in_lists <- function(out){if(length(unique(out$NAME)) > 1){
# out %>%
# lazy_dt() %>%
# distinct(MATCHNAME, NAME) %>%
# group_by(MATCHNAME) %>%
# summarise(n = n()) %>%
# ungroup() %>%
# filter(n > 1) %>%
# as.data.table() %>%
# pull(MATCHNAME) %>%
# c(unique(out$NAME)) ->
# uniques_shared_matches
#
# out <- out %>% lazy_dt() %>% filter(MATCHNAME %in% uniques_shared_matches) %>% as.data.table()
# out}
# else{out}}
import_custom <- function(x){
# 10 cols = MyHeritage
# 6 cols = FTDNA
# ? cols = DNAGedcom FTDNA
# ? cols = DNAGedcom MyHeritage
# ? cols = Gedmatch
imported <- import(x, encoding = "UTF-8", setclass="data.table", blank.lines.skip = TRUE)
if(ncol(imported) == 6) {
imported %>%
lazy_dt() %>%
mutate_at(1:2, trimws) %>%
mutate(Name = NA_character_) %>%
transmute(NAME = Name,
MATCHNAME = `Match Name`,
CHROMOSOME = Chromosome,
`START LOCATION` = `Start Location`,
`END LOCATION` = `End Location`,
CENTIMORGANS = Centimorgans,
`MATCHING SNPS` = `Matching SNPs`) %>%
as.data.table() ->
imported
}
if(ncol(imported) == 10){
imported %>%
lazy_dt() %>%
mutate_at(1:3, trimws) %>%
select(2, 3, 4, 5, 6, 9, 10) %>%
distinct() %>%
as.data.table() ->
imported
colnames(imported) <- c("NAME",
"MATCHNAME",
"CHROMOSOME",
"START LOCATION",
"END LOCATION",
"CENTIMORGANS",
"MATCHING SNPS")
imported %>%
lazy_dt() %>%
filter(!is.na(CHROMOSOME)) %>%
mutate(CHROMOSOME = as.character(CHROMOSOME)) %>%
as.data.table() ->
imported
}
return(imported)
}
shinyServer(function(input, output, session) {
inFile <- reactive({
if (is.null(input$file)) {
return(NULL)
} else {
input$file
}
})
inFile2 <- reactive({
if (is.null(input$file2)) {
return(NULL)
} else {
input$file2
}
})
importData <- reactive({
if (is.null(inFile())) {
return(NULL)
} else {
rbindlist(lapply(inFile()$datapath, import_custom)) ->
dat1
data.table(MATCHNAME = unique(dat1$MATCHNAME)) %>%
lazy_dt() %>%
mutate(MATCHNAME2 = MATCHNAME %>% str_replace_all(" ", " ") %>% str_replace_all(" ", " ") %>% str_replace_all(" ", " ")) %>%
full_join(., dat1) %>%
select(NAME, everything()) %>%
select(-MATCHNAME) %>%
rename(MATCHNAME = MATCHNAME2) %>%
as.data.table() %>%
#nest_by.(NAME) %>%
#lazy_dt() %>%
#mutate(NAME = NAME %>% str_remove("\"")) %>%
#as.data.table() %>%
#unnest.() %>%
lazy_dt() %>%
group_by(#NAME,
MATCHNAME) %>%
mutate(`Shared cM`= sum(CENTIMORGANS*(CHROMOSOME != "X")),
`Longest Block` = max(CENTIMORGANS*(CHROMOSOME != "X"))) %>%
ungroup() %>%
mutate(`Shared cM` = `Shared cM` %>% round(2),
`Longest Block` = `Longest Block` %>% round(2)) %>%
as.data.table() -> out
# out <- overlap_in_lists(out)
out}
})
## DEPRECATED
# matchesData <- reactive({
# if (is.null(input$file2)) {
# return(NULL)
# } else {rbindlist(lapply(inFile2()$datapath, import,
# #`Match Date` = col_date(format = "%m/%d/%Y"),
# encoding = "UTF-8")) %>% #na = c("N/A","")
# # group_by(`Full Name`) %>% dplyr::filter(`Match Date` == min(`Match Date`)) %>% ungroup %>% #Unnecessary
# mutate(MATCHNAME=`Full Name` %>%
# gsub(" "," ", x = .) %>% gsub(" "," ", x = .) %>% gsub(" "," ", x = .)) %>%
# select(MATCHNAME, `Match Date`,`Relationship Range`,`Suggested Relationship`,`Shared cM`,`Longest Block`,`Email`,`Ancestral Surnames`,`Y-DNA Haplogroup`,`mtDNA Haplogroup`) %>%
# mutate(`Shared cM`=`Shared cM` %>% signif(digits=2),
# `Longest Block`=`Longest Block` %>% signif(digits=2))}})
#
# names <- reactive({
# uniques <- data.frame(names=unique(importData()$NAME))
# if(1<nrow(uniques))
# {uniques$names}
# else
# {NULL}
# }
# )
segments <- reactive({
if (is.null(inFile())) {
return(NULL)
} else {#if(is.null(matchesData())){
findoverlapping_segments(dataset = importData(),
cM=input$cM,
name = input$name %>% as.vector(),
exclude = input$exclude %>% as.vector()) %>%
lazy_dt() %>%
transmute(NAME,
MATCHNAME,
CHR=CHROMOSOME,
START = `START LOCATION`,
END = `END LOCATION`,
CENTIMORGANS,
`MATCHING SNPS`) %>%
as.data.table() ->
out
#out <- overlap_in_lists(out)
out#}
# else{
# findoverlapping_segments(dataset = importData(),
# cM = input$cM,
# name = input$name %>% as.vector(),
# exclude = input$exclude %>% as.vector()) %>%
# lazy_dt() %>%
# transmute(NAME,
# MATCHNAME,
# CHR=CHROMOSOME,
# START = `START LOCATION`,
# END = `END LOCATION`,
# CENTIMORGANS,
# `MATCHING SNPS`,
# `Shared cM`,
# `Longest Block`) %>%
# left_join(matchesData()) %>%
# select(-`Ancestral Surnames`,
# -`Y-DNA Haplogroup`,
# -`mtDNA Haplogroup`,
# -`Shared cM`,
# -`Longest Block`,
# -`Suggested Relationship`,
# -`Shared cM`,
# -`Longest Block`) %>%
# as.data.table() ->
# out
# out <- overlap_in_lists(out)
# out}
}
})
segments_out <- reactive({
if (is.null(inFile())) {
return(NULL)
} else {if (is.null(matchesData())) {
findoverlapping_segments(dataset = importData(),
cM = input$cM,
name = input$name %>% as.vector(),
exclude = input$exclude %>% as.vector()) %>%
transmute(NAME,
MATCHNAME,
CHROMOSOME,
`START LOCATION`,
`END LOCATION`,
CENTIMORGANS,
`MATCHING SNPS`)}else{
out <- importData() %>%
findoverlapping_segments(cM = input$cM,
name = input$name %>% as.vector(),
exclude = input$exclude %>% as.vector()) %>%
lazy_dt() %>%
left_join(matchesData()) %>%
select(-`Ancestral Surnames`,
-`Y-DNA Haplogroup`,
-`mtDNA Haplogroup`,
-`Shared cM`,
-`Longest Block`,
-`Suggested Relationship`) %>%
as.data.table() -> out
#out <- overlap_in_lists(out)
out}
}
})
observe({updateSelectizeInput(
session,
"name",
choices=importData()$MATCHNAME, #selected = names(),
server = TRUE)})
observe({updateSelectizeInput(
session,
"exclude",
choices=importData()$MATCHNAME, server = TRUE)})
observe({
output$table <- DT::renderDataTable({ if (is.null(inFile())) {
return(NULL)
} else {DT::datatable(
segments(),
filter = 'top', extensions = c('Buttons', 'Scroller'),
options = list(scrollY = 650,
scrollX = 500,
deferRender = TRUE,
scroller = TRUE,
buttons = list('excel', "csv"),
dom = 'lBfrtip',
fixedColumns = TRUE),
rownames = FALSE,
selection = list(mode="multiple", target = "row", selected = which(segments()$MATCHNAME %in% {input$name %>% as.vector()})))}}, server = TRUE)})
# observe({
# output$downloadData_csv <- downloadHandler(
# filename = "overlapping segments.csv",
# content = function(file) {
# write.csv(segments_out(),
# file,
# row.names =
# FALSE, eol = "\r\n")
# }
# )
# })
# observe({output$downloadData_xlsx <- downloadHandler(
# filename="overlapping segments.xlsx",
# content = function(file){
# xlsx::write.xlsx(segments_out(),
# file,
# sheetName = "Overlapping segments",
# row.names = FALSE)
# }
# )
# })
})
|
================================================================================
Book - Big Data Analytics with R and Hadoop
Book URL - https://www.packtpub.com/big-data-analytics-with-r-and-hadoop/book
Chapter - 6 Understanding Big Data Analysis with Machine Learning
Author - Vignesh Prajapati
Contact - a. email -> vignesh2066@gmail.com
b. LinkedIn -> http://www.linkedin.com/in/vigneshprajapati
================================================================================
# distance calculation function
dist.fun = function(C, P) {
apply(C,1, function(x) colSums((t(P) - x)^2))}
# k-Means Mapper
kmeans.map = function(., P) {
nearest = {
if(is.null(C))
sample(
1:num.clusters,
nrow(P),
replace = T)
else {
D = dist.fun(C, P)
nearest = max.col(-D)}}
if(!(combine || in.memory.combine))
keyval(nearest, P)
else
keyval(nearest, cbind(1, P))}
# k-Means Reducer
kmeans.reduce = {
if (!(combine || in.memory.combine) )
function(., P)
t(as.matrix(apply(P, 2, mean)))
else
function(k, P)
keyval(
k,
t(as.matrix(apply(P, 2, sum))))}
kmeans.mr = function( P, num.clusters, num.iter, combine, in.memory.combine) {
C = NULL
for(i in 1:num.iter ) {
C = values(from.dfs(mapreduce(P,
map = kmeans.map,
reduce = kmeans.reduce)))
if(combine || in.memory.combine)
C = C[, -1]/C[, 1]
if(nrow(C) < num.clusters) {
C = rbind(C, matrix(rnorm((num.clusters - nrow(C)) * nrow(C)), ncol = nrow(C)) %*% C) }}
C
}
P = do.call(rbind,rep(list(matrix(rnorm(10, sd = 10),
ncol=2)),
20)) + matrix(rnorm(200),
ncol =2)
kmeans.mr(to.dfs(P),
num.clusters = 12,
num.iter = 5,
combine = FALSE,
in.memory.combine = FALSE) | /book/packt/Big.Data.Analytics.with.R.and.Hadoop/3282OS_06_Code/Clustering/Clustering_RHadoop.R | no_license | xenron/sandbox-da-hadoop | R | false | false | 2,028 | r | ================================================================================
Book - Big Data Analytics with R and Hadoop
Book URL - https://www.packtpub.com/big-data-analytics-with-r-and-hadoop/book
Chapter - 6 Understanding Big Data Analysis with Machine Learning
Author - Vignesh Prajapati
Contact - a. email -> vignesh2066@gmail.com
b. LinkedIn -> http://www.linkedin.com/in/vigneshprajapati
================================================================================
# distance calculation function
dist.fun = function(C, P) {
apply(C,1, function(x) colSums((t(P) - x)^2))}
# k-Means Mapper
kmeans.map = function(., P) {
nearest = {
if(is.null(C))
sample(
1:num.clusters,
nrow(P),
replace = T)
else {
D = dist.fun(C, P)
nearest = max.col(-D)}}
if(!(combine || in.memory.combine))
keyval(nearest, P)
else
keyval(nearest, cbind(1, P))}
# k-Means Reducer
kmeans.reduce = {
if (!(combine || in.memory.combine) )
function(., P)
t(as.matrix(apply(P, 2, mean)))
else
function(k, P)
keyval(
k,
t(as.matrix(apply(P, 2, sum))))}
kmeans.mr = function( P, num.clusters, num.iter, combine, in.memory.combine) {
C = NULL
for(i in 1:num.iter ) {
C = values(from.dfs(mapreduce(P,
map = kmeans.map,
reduce = kmeans.reduce)))
if(combine || in.memory.combine)
C = C[, -1]/C[, 1]
if(nrow(C) < num.clusters) {
C = rbind(C, matrix(rnorm((num.clusters - nrow(C)) * nrow(C)), ncol = nrow(C)) %*% C) }}
C
}
P = do.call(rbind,rep(list(matrix(rnorm(10, sd = 10),
ncol=2)),
20)) + matrix(rnorm(200),
ncol =2)
kmeans.mr(to.dfs(P),
num.clusters = 12,
num.iter = 5,
combine = FALSE,
in.memory.combine = FALSE) |
library(rvest)
library(tidyverse)
# wyciągamy linki do poszczególnych kategorii
kategorie_page <- read_html("https://fixly.pl/kategorie")
linki <- kategorie_page %>%
html_nodes("a") %>%
html_attr("href")
kategorie <- linki[sapply(linki, function(x) grepl("https://fixly.pl/kategoria/",x)) == TRUE]
kategorie_ile <- length(kategorie)
#nazwy kategorii
kategorie_nazwy <- map_chr(kategorie, function(x) gsub("https://fixly.pl/kategoria/", "", x))
# wyciągamy wykonawców z poszczególnych kategorii
wykonawcy_wg_kategorii <- vector(mode = "list", length = kategorie_ile)
names(wykonawcy_wg_kategorii) <- kategorie_nazwy
for (i in 1:kategorie_ile) {
podstrona <- read_html(kategorie[i])
wykonawcy_w_kategorii_page <- podstrona %>%
html_nodes("a") %>%
html_attr("href")
wykonawcy_w_kategorii_page <- wykonawcy_w_kategorii_page[sapply(wykonawcy_w_kategorii_page, function(x) grepl("https://fixly.pl/profil/",x)) == TRUE] %>%
map_chr(function(x) gsub("#feedback", "", x))
wykonawcy_wg_kategorii[[i]] <- data.frame(wykonawca_link = wykonawcy_w_kategorii_page, kategoria = kategorie_nazwy[i])
Sys.sleep(sample(seq(0.1,1,0.1),1))
}
wykonawcy_i_kategorie_df <- bind_rows(wykonawcy_wg_kategorii)
wykonawcy_i_kategorie_df$wykonawca_nazwa <- map_chr(wykonawcy_i_kategorie_df$wykonawca, function(x) gsub("https://fixly.pl/profil/", "", x))
wykonawcy_i_kategorie_df <- unique(wykonawcy_i_kategorie_df)
print(
wykonawcy_i_kategorie_df %>%
group_by(wykonawca_nazwa) %>%
count() %>%
arrange(desc(n))
)
wykonawcy <- unique(wykonawcy_i_kategorie_df$wykonawca_link)
wykonawca_ile <- length(wykonawcy)
wyscrapuj_po_xpath <- function(podstrona, xpath) {
return(podstrona %>% html_nodes(xpath=xpath))
}
czy_url_dziala <- function(url){
tryCatch(
identical(status_code(HEAD(url)),200L),
error = function(e){
FALSE
})
}
czy_jest_puste <- function(value) {
return(ifelse(identical(value,character(0)),"NA", value))
}
# teraz trzeba by wyciągnąć dane o każdym wykonawcy
wykonawcy_opis<- vector(mode = "list", length = wykonawca_ile)
for(i in 1:wykonawca_ile) {
if(!czy_url_dziala(wykonawcy[i])) {
next }
podstrona <- read_html(wykonawcy[i])
liczba_gwiazdek <- wyscrapuj_po_xpath(podstrona, '//*[contains(concat( " ", @class, " " ), concat( " ", "fa-star", " " ))]') %>%
length()
lokalizacja <- wyscrapuj_po_xpath(podstrona, '//*[contains(concat( " ", @class, " " ), concat( " ", "publicProfile__address", " " ))]') %>%
html_text() %>%
str_replace_all("\n", "") %>%
trimws(which="both")
liczba_kategorii_na_profilu <- sapply(podstrona, function (x) wyscrapuj_po_xpath(podstrona,
'//*[contains(concat( " ", @class, " " ), concat( " ", "publicProfile__to-category", " " ))]'),
simplify = FALSE)$doc %>%
html_text() %>%
length()
wykonawcy_opis_na_profilu <- wyscrapuj_po_xpath(podstrona,
'//*[contains(concat( " ", @class, " " ), concat( " ", "publicProfile__details", " " ))]') %>%
html_text() %>%
str_replace_all("\n|\r", "") %>%
trimws(which = "both")
liczba_gwiazdek <- czy_jest_puste(liczba_gwiazdek)
wykonawcy_opis_na_profilu <- czy_jest_puste(wykonawcy_opis_na_profilu)
liczba_kategorii_na_profilu <- czy_jest_puste(liczba_kategorii_na_profilu)
lokalizacja <- czy_jest_puste(lokalizacja)
opis <- data.frame(Wykonawca = wykonawcy[i],
Liczba_gwiazdek = liczba_gwiazdek,
Opis_na_profilu = wykonawcy_opis_na_profilu,
Liczba_kategorii = liczba_kategorii_na_profilu,
Lokalizacja = lokalizacja)
wykonawcy_opis[[i]] <- opis
Sys.sleep(sample(seq(0.1,1,0.1),1))
}
wykonawcy_opis_df <- bind_rows(wykonawcy_opis)
wykonawcy_grupowani <- wykonawcy_i_kategorie_df %>%
group_by(wykonawca_link) %>%
count()
names(wykonawcy_grupowani)[(names(wykonawcy_grupowani)) =="n"] <- "Liczba_kategorii_z_opinia"
wykonawcy_polaczeni <- merge(wykonawcy_opis_df, wykonawcy_grupowani, by.x="Wykonawca", by.y="wykonawca_link")
write_csv(wykonawcy_polaczeni, "wykonawcy.csv") | /fixly.R | no_license | mkmozgawa/learning-ml | R | false | false | 4,220 | r | library(rvest)
library(tidyverse)
# wyciągamy linki do poszczególnych kategorii
kategorie_page <- read_html("https://fixly.pl/kategorie")
linki <- kategorie_page %>%
html_nodes("a") %>%
html_attr("href")
kategorie <- linki[sapply(linki, function(x) grepl("https://fixly.pl/kategoria/",x)) == TRUE]
kategorie_ile <- length(kategorie)
#nazwy kategorii
kategorie_nazwy <- map_chr(kategorie, function(x) gsub("https://fixly.pl/kategoria/", "", x))
# wyciągamy wykonawców z poszczególnych kategorii
wykonawcy_wg_kategorii <- vector(mode = "list", length = kategorie_ile)
names(wykonawcy_wg_kategorii) <- kategorie_nazwy
for (i in 1:kategorie_ile) {
podstrona <- read_html(kategorie[i])
wykonawcy_w_kategorii_page <- podstrona %>%
html_nodes("a") %>%
html_attr("href")
wykonawcy_w_kategorii_page <- wykonawcy_w_kategorii_page[sapply(wykonawcy_w_kategorii_page, function(x) grepl("https://fixly.pl/profil/",x)) == TRUE] %>%
map_chr(function(x) gsub("#feedback", "", x))
wykonawcy_wg_kategorii[[i]] <- data.frame(wykonawca_link = wykonawcy_w_kategorii_page, kategoria = kategorie_nazwy[i])
Sys.sleep(sample(seq(0.1,1,0.1),1))
}
wykonawcy_i_kategorie_df <- bind_rows(wykonawcy_wg_kategorii)
wykonawcy_i_kategorie_df$wykonawca_nazwa <- map_chr(wykonawcy_i_kategorie_df$wykonawca, function(x) gsub("https://fixly.pl/profil/", "", x))
wykonawcy_i_kategorie_df <- unique(wykonawcy_i_kategorie_df)
print(
wykonawcy_i_kategorie_df %>%
group_by(wykonawca_nazwa) %>%
count() %>%
arrange(desc(n))
)
wykonawcy <- unique(wykonawcy_i_kategorie_df$wykonawca_link)
wykonawca_ile <- length(wykonawcy)
wyscrapuj_po_xpath <- function(podstrona, xpath) {
return(podstrona %>% html_nodes(xpath=xpath))
}
czy_url_dziala <- function(url){
tryCatch(
identical(status_code(HEAD(url)),200L),
error = function(e){
FALSE
})
}
czy_jest_puste <- function(value) {
return(ifelse(identical(value,character(0)),"NA", value))
}
# teraz trzeba by wyciągnąć dane o każdym wykonawcy
wykonawcy_opis<- vector(mode = "list", length = wykonawca_ile)
for(i in 1:wykonawca_ile) {
if(!czy_url_dziala(wykonawcy[i])) {
next }
podstrona <- read_html(wykonawcy[i])
liczba_gwiazdek <- wyscrapuj_po_xpath(podstrona, '//*[contains(concat( " ", @class, " " ), concat( " ", "fa-star", " " ))]') %>%
length()
lokalizacja <- wyscrapuj_po_xpath(podstrona, '//*[contains(concat( " ", @class, " " ), concat( " ", "publicProfile__address", " " ))]') %>%
html_text() %>%
str_replace_all("\n", "") %>%
trimws(which="both")
liczba_kategorii_na_profilu <- sapply(podstrona, function (x) wyscrapuj_po_xpath(podstrona,
'//*[contains(concat( " ", @class, " " ), concat( " ", "publicProfile__to-category", " " ))]'),
simplify = FALSE)$doc %>%
html_text() %>%
length()
wykonawcy_opis_na_profilu <- wyscrapuj_po_xpath(podstrona,
'//*[contains(concat( " ", @class, " " ), concat( " ", "publicProfile__details", " " ))]') %>%
html_text() %>%
str_replace_all("\n|\r", "") %>%
trimws(which = "both")
liczba_gwiazdek <- czy_jest_puste(liczba_gwiazdek)
wykonawcy_opis_na_profilu <- czy_jest_puste(wykonawcy_opis_na_profilu)
liczba_kategorii_na_profilu <- czy_jest_puste(liczba_kategorii_na_profilu)
lokalizacja <- czy_jest_puste(lokalizacja)
opis <- data.frame(Wykonawca = wykonawcy[i],
Liczba_gwiazdek = liczba_gwiazdek,
Opis_na_profilu = wykonawcy_opis_na_profilu,
Liczba_kategorii = liczba_kategorii_na_profilu,
Lokalizacja = lokalizacja)
wykonawcy_opis[[i]] <- opis
Sys.sleep(sample(seq(0.1,1,0.1),1))
}
wykonawcy_opis_df <- bind_rows(wykonawcy_opis)
wykonawcy_grupowani <- wykonawcy_i_kategorie_df %>%
group_by(wykonawca_link) %>%
count()
names(wykonawcy_grupowani)[(names(wykonawcy_grupowani)) =="n"] <- "Liczba_kategorii_z_opinia"
wykonawcy_polaczeni <- merge(wykonawcy_opis_df, wykonawcy_grupowani, by.x="Wykonawca", by.y="wykonawca_link")
write_csv(wykonawcy_polaczeni, "wykonawcy.csv") |
library(partykit);library(strucchange);library(sandwich);library(mvtnorm)
source("ERAfunction_simulation.r")
#---------------------
# Random ERA data generate: Becker, Rai, & Rigdon (2013)
# "Predictive validity and formative measurement in structural equation modeling: Embracing practical relevance."
#---------------------
ncomp <- 2 # num. of components
r <- 0 # corr(F1,F2)
B1 <- 0.3 # reg coeff for F1
R2 <- 0.4 # R2 of endo- composite
B2 <- 0.5568 # obtained reg coeff for F2 based on (R2, b1, r)
corstr <- 1 # corr structure of predictors (1= nearby pair)
npred <- 4 # num. of predictors per comp
corX <- 0 # corr among predictors
w1 <- c(0.7, 0.6, 0.5, 0.4) # initial (i.e., unstandardized) values for F1 weights
w2 <- c(0.6, 0.5, 0.4, 0.3) # initial values for F2 weights
Nvar <- npred*ncomp # total num. of predictors
nvar_t <- Nvar+1 # total num. of observed variables
nrep <- 1000 # num. of replications
#---------------------
# Design factors
#---------------------
corX <- 0.4 # corr among predictors; standardized weights will vary
gsize <- c(1, 2, 3) # 1=equal/balanced group size, 2=G1,50%, 3=G1,66.67%
delta <- 0 # (0=heterogeniety, 1=homogeneity, by 0.2)
startN <- c(60,90,120,180,300,500) # sample sizes
#---------------------
# Combinations? for each senario
#---------------------
ERA_senario <- expand.grid(F1coef = B1, F2coef = B2, weightCoef = corX)
Tree_senario <- expand.grid(N = startN, GroupN = gsize)
lmfun <- function(y, x, start=NULL, weights=NULL, offset=NULL, ...){lm(y~0+x,...)}
Result <- vector("list", nrow(Tree_senario))
names(Result) <- paste("Tree_senario", 1:nrow(Tree_senario), sep="")
for (sn in 1:nrow(Tree_senario)) {
startN <- Tree_senario[sn,"N"]
gsize <- Tree_senario[sn,"GroupN"]
# Create covariates (Z1, Z2, Z3) with true group memberships in "G_id"
Zdata <- Zgen(N0=startN, delt=delta, GN=gsize)
G1 <- Zdata[Zdata$G_id==1,]
G2 <- Zdata[Zdata$G_id==2,]
G3 <- Zdata[Zdata$G_id==3,]
b10 <- ERA_senario[,"F1coef"]
b20 <- ERA_senario[,"F2coef"]
corrX <- ERA_senario[,"weightCoef"]
# Save space for the outcome measures
Result[[sn]]$Zsplit1 <- matrix(,ncol=nrep)
Result[[sn]]$Power <- matrix(,ncol=nrep)
Result[[sn]]$Accuracy <- matrix(,ncol=nrep)
for (np in 1:nrep) { # 1000 replications
# Group1
ERAG1 <- ERAgen(corX=corrX, b1=b10, b2=b20)
G1d <- MASS::mvrnorm(n = nrow(G1), mu = rep(0, nrow(ERAG1$COV)), Sigma = ERAG1$COV)
G1y <- G1d[,9] # in COV or ERAgen(), the last column is for Y
G1X <- G1d[,1:8]
nvar <- matrix(c(4,4), ncol=2)
G1ERA <- ERA_simul(y=G1y, X=G1X, nvar=nvar)
G1Y <- G1ERA$adj.DV
G1F <- G1ERA$F
G1D <- cbind(G1Y,G1F) # test: lm(G1Y~0+F1+F2, data=G1D) & G1ERA$A
colnames(G1D)[1] <- "Y"
# Group2
ERAG2 <- ERAgen(corX=corrX, b1=b10, b2=-b20)
G2d <- MASS::mvrnorm(n = nrow(G2), mu = rep(0, nrow(ERAG2$COV)), Sigma = ERAG2$COV)
G2y <- G2d[,9] # in COV or ERAgen(), the last column is for Y
G2X <- G2d[,1:8]
G2ERA <- ERA_simul(y=G2y, X=G2X, nvar=nvar)
G2Y <- G2ERA$adj.DV
G2F <- G2ERA$F
G2D <- cbind(G2Y,G2F) # test: lm(G2Y~0+F1+F2, data=G2D) & G2ERA$A
colnames(G2D)[1] <- "Y"
# Group3
ERAG3 <- ERAgen(corX=corrX, b1=-b10, b2=b20)
G3d <- MASS::mvrnorm(n = nrow(G3), mu = rep(0, nrow(ERAG3$COV)), Sigma = ERAG3$COV)
G3y <- G3d[,9] # in COV or ERAgen(), the last column is for Y
G3X <- G3d[,1:8]
G3ERA <- ERA_simul(y=G3y, X=G3X, nvar=nvar)
G3Y <- G3ERA$adj.DV
G3F <- G3ERA$F
G3D <- cbind(G3Y,G3F) # test: lm(G3Y~0+F1+F2, data=G3D) & G3ERA$A
colnames(G3D)[1] <- "Y"
# Final data and ERA
ERAd <- data.frame(rbind(G1D, G2D, G3D))
Data <- cbind(Zdata, ERAd)
# MOB with maxdept=3
MOBf <- mob(Y ~ 0+F1+F2 | Z1+Z2+Z3, data=Data, fit=lmfun,
control = mob_control(ordinal = "L2", alpha=0.1, prune="AIC")
)
nodeP <- predict(MOBf, type="node")
MemP <- length(unique(nodeP)) # predicted memberships
MemT <- length(unique(Data$G_id)) # true memberships
if (MemP == 1) { # no split, 1 group
divided <- FALSE
firstsplit <- NA
CramV <- NA # the "q" below would be 1, leading to the 0 value for denominator
} else {
divided <- TRUE
sct <- sctest(MOBf)$"1"
sct <- sct[,complete.cases(sct["p.value",]),drop=FALSE]
firstsplit <- colnames(sct)[ min(sct["p.value",]) == sct["p.value",]]
if( length(firstsplit) != 1) firstsplit <- firstsplit[1]
# Cramer's V
for (i in 1:length(unique(nodeP))) nodeP[nodeP == unique(nodeP)[i]] <- (i+99)
for (i in 1:length(unique(nodeP))) nodeP[nodeP == unique(nodeP)[i]] <- i
crossT <- table(Data$G_id, nodeP)
n <- sum(crossT)
q <- min( nrow(crossT), ncol(crossT) )
chis <- unname(chisq.test(crossT, correct=FALSE)$statistic)
CramV <- sqrt( chis / (n*(q-1)))
}
# Outcomes
Result[[sn]]$Zsplit1[1,np] <- firstsplit
Result[[sn]]$Power[1,np] <- divided
Result[[sn]]$Accuracy[1,np] <- CramV
} # replications end
}
comp <- Result
saveM <- matrix(,nrow=nrow(Tree_senario),ncol=4,dimnames=list(NULL,c("whichZ","howmany","POWER","CramerV")))
saveM <- cbind(Tree_senario, saveM)
for (i in 1:nrow(saveM)){
ttt <- table(comp[[i]]$Zsplit1)
if ( length(ttt) == 0 ) {saveM[i,"whichZ"] <- NA
} else {
tt <- as.matrix(ttt[ttt==max(ttt)])
saveM[i,"whichZ"] <- rownames(tt)
}
saveM[i,"howmany"] <- as.numeric(tt)
saveM[i,"POWER"] <- mean(as.vector(comp[[i]]$Power))
saveM[i,"CramerV"] <- mean(comp[[i]]$Accuracy, na.rm=T)
}
saveM
| /Simulation_Hetero_Power.r | no_license | QuantMM/generalizedERA | R | false | false | 5,754 | r | library(partykit);library(strucchange);library(sandwich);library(mvtnorm)
source("ERAfunction_simulation.r")
#---------------------
# Random ERA data generate: Becker, Rai, & Rigdon (2013)
# "Predictive validity and formative measurement in structural equation modeling: Embracing practical relevance."
#---------------------
ncomp <- 2 # num. of components
r <- 0 # corr(F1,F2)
B1 <- 0.3 # reg coeff for F1
R2 <- 0.4 # R2 of endo- composite
B2 <- 0.5568 # obtained reg coeff for F2 based on (R2, b1, r)
corstr <- 1 # corr structure of predictors (1= nearby pair)
npred <- 4 # num. of predictors per comp
corX <- 0 # corr among predictors
w1 <- c(0.7, 0.6, 0.5, 0.4) # initial (i.e., unstandardized) values for F1 weights
w2 <- c(0.6, 0.5, 0.4, 0.3) # initial values for F2 weights
Nvar <- npred*ncomp # total num. of predictors
nvar_t <- Nvar+1 # total num. of observed variables
nrep <- 1000 # num. of replications
#---------------------
# Design factors
#---------------------
corX <- 0.4 # corr among predictors; standardized weights will vary
gsize <- c(1, 2, 3) # 1=equal/balanced group size, 2=G1,50%, 3=G1,66.67%
delta <- 0 # (0=heterogeniety, 1=homogeneity, by 0.2)
startN <- c(60,90,120,180,300,500) # sample sizes
#---------------------
# Combinations? for each senario
#---------------------
ERA_senario <- expand.grid(F1coef = B1, F2coef = B2, weightCoef = corX)
Tree_senario <- expand.grid(N = startN, GroupN = gsize)
lmfun <- function(y, x, start=NULL, weights=NULL, offset=NULL, ...){lm(y~0+x,...)}
Result <- vector("list", nrow(Tree_senario))
names(Result) <- paste("Tree_senario", 1:nrow(Tree_senario), sep="")
for (sn in 1:nrow(Tree_senario)) {
startN <- Tree_senario[sn,"N"]
gsize <- Tree_senario[sn,"GroupN"]
# Create covariates (Z1, Z2, Z3) with true group memberships in "G_id"
Zdata <- Zgen(N0=startN, delt=delta, GN=gsize)
G1 <- Zdata[Zdata$G_id==1,]
G2 <- Zdata[Zdata$G_id==2,]
G3 <- Zdata[Zdata$G_id==3,]
b10 <- ERA_senario[,"F1coef"]
b20 <- ERA_senario[,"F2coef"]
corrX <- ERA_senario[,"weightCoef"]
# Save space for the outcome measures
Result[[sn]]$Zsplit1 <- matrix(,ncol=nrep)
Result[[sn]]$Power <- matrix(,ncol=nrep)
Result[[sn]]$Accuracy <- matrix(,ncol=nrep)
for (np in 1:nrep) { # 1000 replications
# Group1
ERAG1 <- ERAgen(corX=corrX, b1=b10, b2=b20)
G1d <- MASS::mvrnorm(n = nrow(G1), mu = rep(0, nrow(ERAG1$COV)), Sigma = ERAG1$COV)
G1y <- G1d[,9] # in COV or ERAgen(), the last column is for Y
G1X <- G1d[,1:8]
nvar <- matrix(c(4,4), ncol=2)
G1ERA <- ERA_simul(y=G1y, X=G1X, nvar=nvar)
G1Y <- G1ERA$adj.DV
G1F <- G1ERA$F
G1D <- cbind(G1Y,G1F) # test: lm(G1Y~0+F1+F2, data=G1D) & G1ERA$A
colnames(G1D)[1] <- "Y"
# Group2
ERAG2 <- ERAgen(corX=corrX, b1=b10, b2=-b20)
G2d <- MASS::mvrnorm(n = nrow(G2), mu = rep(0, nrow(ERAG2$COV)), Sigma = ERAG2$COV)
G2y <- G2d[,9] # in COV or ERAgen(), the last column is for Y
G2X <- G2d[,1:8]
G2ERA <- ERA_simul(y=G2y, X=G2X, nvar=nvar)
G2Y <- G2ERA$adj.DV
G2F <- G2ERA$F
G2D <- cbind(G2Y,G2F) # test: lm(G2Y~0+F1+F2, data=G2D) & G2ERA$A
colnames(G2D)[1] <- "Y"
# Group3
ERAG3 <- ERAgen(corX=corrX, b1=-b10, b2=b20)
G3d <- MASS::mvrnorm(n = nrow(G3), mu = rep(0, nrow(ERAG3$COV)), Sigma = ERAG3$COV)
G3y <- G3d[,9] # in COV or ERAgen(), the last column is for Y
G3X <- G3d[,1:8]
G3ERA <- ERA_simul(y=G3y, X=G3X, nvar=nvar)
G3Y <- G3ERA$adj.DV
G3F <- G3ERA$F
G3D <- cbind(G3Y,G3F) # test: lm(G3Y~0+F1+F2, data=G3D) & G3ERA$A
colnames(G3D)[1] <- "Y"
# Final data and ERA
ERAd <- data.frame(rbind(G1D, G2D, G3D))
Data <- cbind(Zdata, ERAd)
# MOB with maxdept=3
MOBf <- mob(Y ~ 0+F1+F2 | Z1+Z2+Z3, data=Data, fit=lmfun,
control = mob_control(ordinal = "L2", alpha=0.1, prune="AIC")
)
nodeP <- predict(MOBf, type="node")
MemP <- length(unique(nodeP)) # predicted memberships
MemT <- length(unique(Data$G_id)) # true memberships
if (MemP == 1) { # no split, 1 group
divided <- FALSE
firstsplit <- NA
CramV <- NA # the "q" below would be 1, leading to the 0 value for denominator
} else {
divided <- TRUE
sct <- sctest(MOBf)$"1"
sct <- sct[,complete.cases(sct["p.value",]),drop=FALSE]
firstsplit <- colnames(sct)[ min(sct["p.value",]) == sct["p.value",]]
if( length(firstsplit) != 1) firstsplit <- firstsplit[1]
# Cramer's V
for (i in 1:length(unique(nodeP))) nodeP[nodeP == unique(nodeP)[i]] <- (i+99)
for (i in 1:length(unique(nodeP))) nodeP[nodeP == unique(nodeP)[i]] <- i
crossT <- table(Data$G_id, nodeP)
n <- sum(crossT)
q <- min( nrow(crossT), ncol(crossT) )
chis <- unname(chisq.test(crossT, correct=FALSE)$statistic)
CramV <- sqrt( chis / (n*(q-1)))
}
# Outcomes
Result[[sn]]$Zsplit1[1,np] <- firstsplit
Result[[sn]]$Power[1,np] <- divided
Result[[sn]]$Accuracy[1,np] <- CramV
} # replications end
}
comp <- Result
saveM <- matrix(,nrow=nrow(Tree_senario),ncol=4,dimnames=list(NULL,c("whichZ","howmany","POWER","CramerV")))
saveM <- cbind(Tree_senario, saveM)
for (i in 1:nrow(saveM)){
ttt <- table(comp[[i]]$Zsplit1)
if ( length(ttt) == 0 ) {saveM[i,"whichZ"] <- NA
} else {
tt <- as.matrix(ttt[ttt==max(ttt)])
saveM[i,"whichZ"] <- rownames(tt)
}
saveM[i,"howmany"] <- as.numeric(tt)
saveM[i,"POWER"] <- mean(as.vector(comp[[i]]$Power))
saveM[i,"CramerV"] <- mean(comp[[i]]$Accuracy, na.rm=T)
}
saveM
|
#' Patch Buffer Analysis
#'
#' This function allows you to measure the relative composition of the buffer of a distribution of patch. WARNING: You need the Python library GDAL to use this function. Go to: https://johnbaumgartner.wordpress.com/2012/07/26/getting-rasters-into-shape-from-r/ for more information on how to proceed.
#' @param X wether (i) the path of the file to analyse, (ii) a matrix where each cell contains a number representing the cell type (as outputed by the function \code{FromPictoRdata}).
#' @param Y if \code{X} the path of the file to analyse, \code{Y} the path to the folder containing named images of unique colors corresponding to a specific type of organism potentially present on the analysed image. If \code{Y=NA}, then \code{X} is considered being a matrix where each cell contains a number representing the cell type (as outputed by the function \code{FromPictoRdata}).
#' @param d the distance around each patch that define the buffer.
#' @param scale the length of one pixel edge on the image (default to 1).
#' @param minimum_size the minimum size of the patches to consider in the analysis (default to 1).
#' @return A dataframe containing the size and relative buffer composition of each patch considered in the analysis.
#' @keywords Mosaic, image analysis, buffer, cover, species, patch size, size distributions.
#' @export
#' @examples #working on it
#setwd('/Users/yoaneynaud/Desktop/Travail/Post_doc_scripps/Mosaic/test_for_package/')
#X='/Users/yoaneynaud/Desktop/Travail/Post_doc_scripps/Mosaic/test_for_package/MAI_2016.png'
#Y='/Users/yoaneynaud/Desktop/Travail/Post_doc_scripps/Mosaic/test_for_package/Legend/'
Buffer_Analysis=function(X,Y=NA,d,scale=1,minimum_size=1,pathtopython=NULL){
gdal_polygonizeR <- function(x, outshape=NULL, gdalformat = 'ESRI Shapefile',
pypath=pathtopython, readpoly=TRUE, quiet=TRUE) {
cat('I am using gdal_polygonizer, a function written by John Baumgartner see: https://github.com/johnbaums',fill=T)
if (isTRUE(readpoly)) require(rgdal)
if (is.null(pypath)) {
pypath <- Sys.which('gdal_polygonize.py')
}
if (!file.exists(pypath)) stop("Can't find gdal_polygonize.py on your system. You need to install the GDAL library, see:
https://johnbaumgartner.wordpress.com/2012/07/26/getting-rasters-into-shape-from-r/
for more infos.")
owd <- getwd()
on.exit(setwd(owd))
setwd(dirname(pypath))
if (!is.null(outshape)) {
outshape <- sub('\\.shp$', '', outshape)
f.exists <- file.exists(paste(outshape, c('shp', 'shx', 'dbf'), sep='.'))
if (any(f.exists))
stop(sprintf('File already exists: %s',
toString(paste(outshape, c('shp', 'shx', 'dbf'),
sep='.')[f.exists])), call.=FALSE)
} else outshape <- tempfile()
if (is(x, 'Raster')) {
require(raster)
writeRaster(x, {f <- tempfile(fileext='.tif')})
rastpath <- normalizePath(f)
} else if (is.character(x)) {
rastpath <- normalizePath(x)
} else stop('x must be a file path (character string), or a Raster object.')
system2('python', args=(sprintf('"%1$s" "%2$s" -f "%3$s" "%4$s.shp"',
pypath, rastpath, gdalformat, outshape)))
if (isTRUE(readpoly)) {
shp <- readOGR(dirname(outshape), layer = basename(outshape), verbose=!quiet)
return(shp)
}
return(NULL)
}
if(is.na(Y)){
X=X
legend=data.frame("Organism type"=paste('Organism',seq(1:length(X)),sep='_'),"ID"=seq(1:length(X)))
colnames(legend)[1]="Organism type"
}else{X=FromPictoRdata(X,Y)
legend=X[[2]]
X=X[[1]]
}
require(rgeos)
require(raster)
require(foreach)
to_bind=unique(array(X))[which(unique(array(X))!=0)]
foreach(i=1:length(to_bind))%do%{
cat(paste('binding ',legend$`Organism type`[i],sep=''),fill=T)
data1=matrix(0,nrow(X),ncol(X))
data1[which(X==to_bind[i])]=1
data1=clump(raster(data1,xmn=1,xmx=ncol(X),ymn=1,ymx=nrow(X)))
gc()
datap1=gdal_polygonizeR(data1)
datap1@bbox=matrix(c(0,0,ncol(X),nrow(X)),2,2)
#datap1 <- subset(datap1, gArea(datap1,byid = T)>=0.000001)
datap1$DN=paste(legend$`Organism type`[to_bind[i]],datap1$DN,sep='_')
if(i==1){
datap=datap1
}else{
datap=rbind(datap,datap1,makeUniqueIDs = T)}
rm(data1)
rm(datap1)
gc()
}
save(datap,file = 'Polygons.Rdata')
#load('/Users/yoaneynaud/Desktop/Travail/Post_doc_scripps/Mosaic/New_buffer_analysis/try_one.Rdata')
datap <- subset(datap, gArea(datap,byid = T)>scale^2*minimum_size*1/length(X))
tailles=gArea(datap,byid = T)
names(tailles)=datap$DN
buffer_size=d/scale
tokeep=foreach(i=1:length(datap),.combine=c)%do%{
to_test=datap@polygons[[i]]@Polygons[[1]]@coords
a=length(which(to_test[,1]<buffer_size|to_test[,1]>ncol(X)-buffer_size))
b=length(which(to_test[,2]<buffer_size|to_test[,2]>nrow(X)-buffer_size))
if(a!=0|b!=0){
todo=FALSE
}else{todo=TRUE}
todo
}
shp.sub <- subset(datap, tokeep)
#plot(shp.sub)
#plot(shp.sub)
test=gBuffer(shp.sub,byid = T,width = buffer_size)
#par(mfrow=c(1,1))
#pdf(paste('Polygons_with_buffer_',buffer_size*scale,'.pdf',sep=''),height=50*nrow(X)/ncol(X),width=50*ncol(X)/nrow(X))
#plot(datap,col='dodgerblue')
#plot(test,border = 'red',add=T)
#dev.off()
#plot(datap, border="orange",col='dodgerblue')
#plot(test, border="blue", col='red',add=TRUE,lty=1,lwd=3)
gc()
save(test,file = paste('Polygon_buffer_',buffer_size*scale,'.Rdata',sep=''))
test=gBuffer(test,byid = T,width = 0) # anti bug device
datap=gBuffer(datap,byid = T,width = 0) #anti bug device
kk=gIntersection(test,datap,byid = T,drop_lower_td = T)
#plot(kk,col='red')
#gArea(kk,byid = T)
save(kk,file = paste('Polygon_interaction_buffer_',buffer_size,'.Rdata',sep=''))
splitted_names=t(matrix(unlist(strsplit(names(gArea(kk,byid = T)),' ')),nrow=2))
broken_res=split(gArea(kk,byid = T)[-which(splitted_names[,1]==splitted_names[,2])],splitted_names[-which(splitted_names[,1]==splitted_names[,2]),1])
size_buffer=gArea(test,byid = T)-gArea(shp.sub,byid=T)
species_names=suppressWarnings(sapply(strsplit(datap$DN,'_'),function(x) paste(x[-which(!is.na(as.numeric(x)==TRUE))],collapse = '_')))
colony_names=datap$DN
buffer_analysis=foreach(p=1:length(colony_names),.combine=rbind)%do%{
final_data=matrix(0,1,length(unique(species_names)))
j=which(names(broken_res)==rownames(data.frame(datap))[p])
if(length(j)){
colnames(final_data)=unique(species_names)
pure_data=broken_res[[j]]/size_buffer[which(names(size_buffer)==names(broken_res)[j])]
for(jj in 1:length(pure_data)){
names(pure_data)[jj]=species_names[which(rownames(data.frame(datap))==unlist(strsplit(names(pure_data)[jj],' '))[2])]
}
agg_f_data=aggregate(pure_data,list(names(pure_data)),sum)
pure_data=agg_f_data[,2]
names(pure_data)=agg_f_data[,1]
for(jj in 1:ncol(final_data)){
if(length(which(names(pure_data)==colnames(final_data)[jj]))!=0){
final_data[jj]=pure_data[which(names(pure_data)==colnames(final_data)[jj])]}
}
}
final_data
}
buffer_analysis[which(tokeep==FALSE),]=NA
#buffer_analysis
size_colony=gArea(datap,byid = T)
buffer_analysis=data.frame(species_names,colony_names,size_colony,buffer_analysis)
colnames(buffer_analysis)=paste('Ratio of buffer containing ',colnames(buffer_analysis),sep='')
colnames(buffer_analysis)[1:3]=c('Group name','Patch ID','Patch size')
write.csv(buffer_analysis, paste('Table_interaction_',buffer_size,'.csv',sep=''))
la_couleur=species_names
#la_couleur[which(tokeep==FALSE)]='out of buffer'
plot(datap,col=factor(la_couleur))
plot(test,border = 'dodgerblue',add=T)
return(buffer_analysis)
}
| /R/Buffer_Analysis.R | no_license | jherlan/MosaicAnalysis | R | false | false | 7,936 | r | #' Patch Buffer Analysis
#'
#' This function allows you to measure the relative composition of the buffer of a distribution of patch. WARNING: You need the Python library GDAL to use this function. Go to: https://johnbaumgartner.wordpress.com/2012/07/26/getting-rasters-into-shape-from-r/ for more information on how to proceed.
#' @param X wether (i) the path of the file to analyse, (ii) a matrix where each cell contains a number representing the cell type (as outputed by the function \code{FromPictoRdata}).
#' @param Y if \code{X} the path of the file to analyse, \code{Y} the path to the folder containing named images of unique colors corresponding to a specific type of organism potentially present on the analysed image. If \code{Y=NA}, then \code{X} is considered being a matrix where each cell contains a number representing the cell type (as outputed by the function \code{FromPictoRdata}).
#' @param d the distance around each patch that define the buffer.
#' @param scale the length of one pixel edge on the image (default to 1).
#' @param minimum_size the minimum size of the patches to consider in the analysis (default to 1).
#' @return A dataframe containing the size and relative buffer composition of each patch considered in the analysis.
#' @keywords Mosaic, image analysis, buffer, cover, species, patch size, size distributions.
#' @export
#' @examples #working on it
#setwd('/Users/yoaneynaud/Desktop/Travail/Post_doc_scripps/Mosaic/test_for_package/')
#X='/Users/yoaneynaud/Desktop/Travail/Post_doc_scripps/Mosaic/test_for_package/MAI_2016.png'
#Y='/Users/yoaneynaud/Desktop/Travail/Post_doc_scripps/Mosaic/test_for_package/Legend/'
Buffer_Analysis=function(X,Y=NA,d,scale=1,minimum_size=1,pathtopython=NULL){
gdal_polygonizeR <- function(x, outshape=NULL, gdalformat = 'ESRI Shapefile',
pypath=pathtopython, readpoly=TRUE, quiet=TRUE) {
cat('I am using gdal_polygonizer, a function written by John Baumgartner see: https://github.com/johnbaums',fill=T)
if (isTRUE(readpoly)) require(rgdal)
if (is.null(pypath)) {
pypath <- Sys.which('gdal_polygonize.py')
}
if (!file.exists(pypath)) stop("Can't find gdal_polygonize.py on your system. You need to install the GDAL library, see:
https://johnbaumgartner.wordpress.com/2012/07/26/getting-rasters-into-shape-from-r/
for more infos.")
owd <- getwd()
on.exit(setwd(owd))
setwd(dirname(pypath))
if (!is.null(outshape)) {
outshape <- sub('\\.shp$', '', outshape)
f.exists <- file.exists(paste(outshape, c('shp', 'shx', 'dbf'), sep='.'))
if (any(f.exists))
stop(sprintf('File already exists: %s',
toString(paste(outshape, c('shp', 'shx', 'dbf'),
sep='.')[f.exists])), call.=FALSE)
} else outshape <- tempfile()
if (is(x, 'Raster')) {
require(raster)
writeRaster(x, {f <- tempfile(fileext='.tif')})
rastpath <- normalizePath(f)
} else if (is.character(x)) {
rastpath <- normalizePath(x)
} else stop('x must be a file path (character string), or a Raster object.')
system2('python', args=(sprintf('"%1$s" "%2$s" -f "%3$s" "%4$s.shp"',
pypath, rastpath, gdalformat, outshape)))
if (isTRUE(readpoly)) {
shp <- readOGR(dirname(outshape), layer = basename(outshape), verbose=!quiet)
return(shp)
}
return(NULL)
}
if(is.na(Y)){
X=X
legend=data.frame("Organism type"=paste('Organism',seq(1:length(X)),sep='_'),"ID"=seq(1:length(X)))
colnames(legend)[1]="Organism type"
}else{X=FromPictoRdata(X,Y)
legend=X[[2]]
X=X[[1]]
}
require(rgeos)
require(raster)
require(foreach)
to_bind=unique(array(X))[which(unique(array(X))!=0)]
foreach(i=1:length(to_bind))%do%{
cat(paste('binding ',legend$`Organism type`[i],sep=''),fill=T)
data1=matrix(0,nrow(X),ncol(X))
data1[which(X==to_bind[i])]=1
data1=clump(raster(data1,xmn=1,xmx=ncol(X),ymn=1,ymx=nrow(X)))
gc()
datap1=gdal_polygonizeR(data1)
datap1@bbox=matrix(c(0,0,ncol(X),nrow(X)),2,2)
#datap1 <- subset(datap1, gArea(datap1,byid = T)>=0.000001)
datap1$DN=paste(legend$`Organism type`[to_bind[i]],datap1$DN,sep='_')
if(i==1){
datap=datap1
}else{
datap=rbind(datap,datap1,makeUniqueIDs = T)}
rm(data1)
rm(datap1)
gc()
}
save(datap,file = 'Polygons.Rdata')
#load('/Users/yoaneynaud/Desktop/Travail/Post_doc_scripps/Mosaic/New_buffer_analysis/try_one.Rdata')
datap <- subset(datap, gArea(datap,byid = T)>scale^2*minimum_size*1/length(X))
tailles=gArea(datap,byid = T)
names(tailles)=datap$DN
buffer_size=d/scale
tokeep=foreach(i=1:length(datap),.combine=c)%do%{
to_test=datap@polygons[[i]]@Polygons[[1]]@coords
a=length(which(to_test[,1]<buffer_size|to_test[,1]>ncol(X)-buffer_size))
b=length(which(to_test[,2]<buffer_size|to_test[,2]>nrow(X)-buffer_size))
if(a!=0|b!=0){
todo=FALSE
}else{todo=TRUE}
todo
}
shp.sub <- subset(datap, tokeep)
#plot(shp.sub)
#plot(shp.sub)
test=gBuffer(shp.sub,byid = T,width = buffer_size)
#par(mfrow=c(1,1))
#pdf(paste('Polygons_with_buffer_',buffer_size*scale,'.pdf',sep=''),height=50*nrow(X)/ncol(X),width=50*ncol(X)/nrow(X))
#plot(datap,col='dodgerblue')
#plot(test,border = 'red',add=T)
#dev.off()
#plot(datap, border="orange",col='dodgerblue')
#plot(test, border="blue", col='red',add=TRUE,lty=1,lwd=3)
gc()
save(test,file = paste('Polygon_buffer_',buffer_size*scale,'.Rdata',sep=''))
test=gBuffer(test,byid = T,width = 0) # anti bug device
datap=gBuffer(datap,byid = T,width = 0) #anti bug device
kk=gIntersection(test,datap,byid = T,drop_lower_td = T)
#plot(kk,col='red')
#gArea(kk,byid = T)
save(kk,file = paste('Polygon_interaction_buffer_',buffer_size,'.Rdata',sep=''))
splitted_names=t(matrix(unlist(strsplit(names(gArea(kk,byid = T)),' ')),nrow=2))
broken_res=split(gArea(kk,byid = T)[-which(splitted_names[,1]==splitted_names[,2])],splitted_names[-which(splitted_names[,1]==splitted_names[,2]),1])
size_buffer=gArea(test,byid = T)-gArea(shp.sub,byid=T)
species_names=suppressWarnings(sapply(strsplit(datap$DN,'_'),function(x) paste(x[-which(!is.na(as.numeric(x)==TRUE))],collapse = '_')))
colony_names=datap$DN
buffer_analysis=foreach(p=1:length(colony_names),.combine=rbind)%do%{
final_data=matrix(0,1,length(unique(species_names)))
j=which(names(broken_res)==rownames(data.frame(datap))[p])
if(length(j)){
colnames(final_data)=unique(species_names)
pure_data=broken_res[[j]]/size_buffer[which(names(size_buffer)==names(broken_res)[j])]
for(jj in 1:length(pure_data)){
names(pure_data)[jj]=species_names[which(rownames(data.frame(datap))==unlist(strsplit(names(pure_data)[jj],' '))[2])]
}
agg_f_data=aggregate(pure_data,list(names(pure_data)),sum)
pure_data=agg_f_data[,2]
names(pure_data)=agg_f_data[,1]
for(jj in 1:ncol(final_data)){
if(length(which(names(pure_data)==colnames(final_data)[jj]))!=0){
final_data[jj]=pure_data[which(names(pure_data)==colnames(final_data)[jj])]}
}
}
final_data
}
buffer_analysis[which(tokeep==FALSE),]=NA
#buffer_analysis
size_colony=gArea(datap,byid = T)
buffer_analysis=data.frame(species_names,colony_names,size_colony,buffer_analysis)
colnames(buffer_analysis)=paste('Ratio of buffer containing ',colnames(buffer_analysis),sep='')
colnames(buffer_analysis)[1:3]=c('Group name','Patch ID','Patch size')
write.csv(buffer_analysis, paste('Table_interaction_',buffer_size,'.csv',sep=''))
la_couleur=species_names
#la_couleur[which(tokeep==FALSE)]='out of buffer'
plot(datap,col=factor(la_couleur))
plot(test,border = 'dodgerblue',add=T)
return(buffer_analysis)
}
|
###########################################################################/**
# @RdocFunction anyMissing
# \alias{colAnyMissings}
# \alias{rowAnyMissings}
#
# @title "Checks if there are any missing values in an object or not"
#
# \description{
# @get "title".
# }
#
# \usage{
# anyMissing(x, ...)
# colAnyMissings(x, ...)
# rowAnyMissings(x, ...)
# }
#
# \arguments{
# \item{x}{A @vector, a @list, a @matrix, a @data.frame, or @NULL.}
# \item{...}{Not used.}
# }
#
# \value{
# Returns @TRUE if a missing value was detected, otherwise @FALSE.
# }
#
# \details{
# The implementation of this method is optimized for both speed and memory.
# The method will return @TRUE as soon as a missing value is detected.
# }
#
# \examples{
# x <- rnorm(n=1000)
# x[seq(300,length(x),by=100)] <- NA
# stopifnot(anyMissing(x) == any(is.na(x)))
# }
#
# @author "HB"
#
# \seealso{
# Starting with R v3.1.0, there is \code{anyNA()} in the \pkg{base},
# which provides the same functionality as this function.
# }
#
# @keyword iteration
# @keyword logic
#*/###########################################################################
anyMissing <- function(x, ...) {
## All list or a data.frame?
if (is.list(x)) {
for (kk in seq(along=x)) {
if (.Call("anyMissing", x[[kk]], PACKAGE="matrixStats"))
return(TRUE)
}
} else {
## All other data types
.Call("anyMissing", x, PACKAGE="matrixStats")
}
}
colAnyMissings <- function(x, ...) {
colAnys(x, value=NA, ...)
}
rowAnyMissings <- function(x, ...) {
rowAnys(x, value=NA, ...)
}
############################################################################
# HISTORY:
# 2015-02-10
# o CLEANUP: anyMissing() is no longer an S4 generic, cf. base::anyNA().
# 2015-01-20
# o CLEANUP: In the effort of migrating anyMissing() into a plain R
# function, specific anyMissing() implementations for data.frame:s and
# and list:s were dropped and is now handled by anyMissing() for "ANY".
# 2014-12-08
# o Added (col|row)AnyMissings().
# 2013-09-26
# o Added help reference to base::anyNA().
# 2013-01-13
# o Added anyMissing() for raw, which always returns FALSE.
# 2008-03-25
# o Added anyMissing() for matrices, data.frames, lists and NULL.
# o Added anyMissing() for numeric, logical, complex and character.
# o Made anyMissing() into an S4 method.
# 2007-08-14
# o Created. See also R-devel thread "hasNA()/anyNA()?" on 2007-08-13.
############################################################################
| /R/anyMissing.R | no_license | david78k/mstats | R | false | false | 2,501 | r | ###########################################################################/**
# @RdocFunction anyMissing
# \alias{colAnyMissings}
# \alias{rowAnyMissings}
#
# @title "Checks if there are any missing values in an object or not"
#
# \description{
# @get "title".
# }
#
# \usage{
# anyMissing(x, ...)
# colAnyMissings(x, ...)
# rowAnyMissings(x, ...)
# }
#
# \arguments{
# \item{x}{A @vector, a @list, a @matrix, a @data.frame, or @NULL.}
# \item{...}{Not used.}
# }
#
# \value{
# Returns @TRUE if a missing value was detected, otherwise @FALSE.
# }
#
# \details{
# The implementation of this method is optimized for both speed and memory.
# The method will return @TRUE as soon as a missing value is detected.
# }
#
# \examples{
# x <- rnorm(n=1000)
# x[seq(300,length(x),by=100)] <- NA
# stopifnot(anyMissing(x) == any(is.na(x)))
# }
#
# @author "HB"
#
# \seealso{
# Starting with R v3.1.0, there is \code{anyNA()} in the \pkg{base},
# which provides the same functionality as this function.
# }
#
# @keyword iteration
# @keyword logic
#*/###########################################################################
anyMissing <- function(x, ...) {
## All list or a data.frame?
if (is.list(x)) {
for (kk in seq(along=x)) {
if (.Call("anyMissing", x[[kk]], PACKAGE="matrixStats"))
return(TRUE)
}
} else {
## All other data types
.Call("anyMissing", x, PACKAGE="matrixStats")
}
}
colAnyMissings <- function(x, ...) {
colAnys(x, value=NA, ...)
}
rowAnyMissings <- function(x, ...) {
rowAnys(x, value=NA, ...)
}
############################################################################
# HISTORY:
# 2015-02-10
# o CLEANUP: anyMissing() is no longer an S4 generic, cf. base::anyNA().
# 2015-01-20
# o CLEANUP: In the effort of migrating anyMissing() into a plain R
# function, specific anyMissing() implementations for data.frame:s and
# and list:s were dropped and is now handled by anyMissing() for "ANY".
# 2014-12-08
# o Added (col|row)AnyMissings().
# 2013-09-26
# o Added help reference to base::anyNA().
# 2013-01-13
# o Added anyMissing() for raw, which always returns FALSE.
# 2008-03-25
# o Added anyMissing() for matrices, data.frames, lists and NULL.
# o Added anyMissing() for numeric, logical, complex and character.
# o Made anyMissing() into an S4 method.
# 2007-08-14
# o Created. See also R-devel thread "hasNA()/anyNA()?" on 2007-08-13.
############################################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{is_db_con}
\alias{is_db_con}
\title{Check if the provided connection is a DBI connection object}
\usage{
is_db_con(con)
}
\arguments{
\item{con}{a DBI connection}
}
\value{
The result of the test
}
\description{
Check if the provided connection is a DBI connection object
}
\examples{
\dontrun{
conn = connect_to_local_db()
is_db_con(
con = conn
)
}
}
| /man/is_db_con.Rd | permissive | ctsit/redcapcustodian | R | false | true | 450 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{is_db_con}
\alias{is_db_con}
\title{Check if the provided connection is a DBI connection object}
\usage{
is_db_con(con)
}
\arguments{
\item{con}{a DBI connection}
}
\value{
The result of the test
}
\description{
Check if the provided connection is a DBI connection object
}
\examples{
\dontrun{
conn = connect_to_local_db()
is_db_con(
con = conn
)
}
}
|
library(tidyverse)
concTab = as_tibble(read.table("~/Dropbox (RajLab)/Projects/cellid/metadata/RNAtagTest6/20171106_RNAtagTest6_drugStockConcentrations - Sheet1.tsv", sep = "\t", header = T, stringsAsFactors = F))
pairTab = as_tibble(read.table("~/Dropbox (RajLab)/Projects/cellid/miscellaneous/pickingDrugs/20171106_drugPairsWithPairID.txt", sep = "\t", header = T, stringsAsFactors = F))
concTab$deliveredWell = "incomplete"
concTab2 = pairTab %>%
mutate(Product.Name = drug1) %>%
dplyr::select(Product.Name, pairID, stockWellID) %>%
inner_join(concTab, by = "Product.Name")
concTab3 = pairTab %>%
mutate(Product.Name = drug2) %>%
dplyr::select(Product.Name, pairID, stockWellID) %>%
inner_join(concTab, by = "Product.Name")
concTabF = bind_rows(concTab2, concTab3)
write.table(concTabF, file = "~/Dropbox (RajLab)/Projects/cellid/metadata/RNAtagTest6/RNAtagTest6_drugStockConcentrationWithPairPositions.txt", sep = "\t", quote = F, row.names = F)
| /metadata/RNAtagTest6/drugIDpairStockMatching.R | permissive | arjunrajlaboratory/P3_public_analysis | R | false | false | 967 | r | library(tidyverse)
concTab = as_tibble(read.table("~/Dropbox (RajLab)/Projects/cellid/metadata/RNAtagTest6/20171106_RNAtagTest6_drugStockConcentrations - Sheet1.tsv", sep = "\t", header = T, stringsAsFactors = F))
pairTab = as_tibble(read.table("~/Dropbox (RajLab)/Projects/cellid/miscellaneous/pickingDrugs/20171106_drugPairsWithPairID.txt", sep = "\t", header = T, stringsAsFactors = F))
concTab$deliveredWell = "incomplete"
concTab2 = pairTab %>%
mutate(Product.Name = drug1) %>%
dplyr::select(Product.Name, pairID, stockWellID) %>%
inner_join(concTab, by = "Product.Name")
concTab3 = pairTab %>%
mutate(Product.Name = drug2) %>%
dplyr::select(Product.Name, pairID, stockWellID) %>%
inner_join(concTab, by = "Product.Name")
concTabF = bind_rows(concTab2, concTab3)
write.table(concTabF, file = "~/Dropbox (RajLab)/Projects/cellid/metadata/RNAtagTest6/RNAtagTest6_drugStockConcentrationWithPairPositions.txt", sep = "\t", quote = F, row.names = F)
|
##' @title Parse metadata
##' @description Parse metadata either by providing the \emph{.json}
##' file that is downloaded from GDC cart or by parse metadata
##' automatically by providing the projct id and data type
##' @param metafile metadata file in \code{.json} format download
##' from GDC cart. If provided, the metadata will be parsed from
##' this file, otherwise, \code{project} and \code{data.type} arguments
##' should be provided to retrieve metadata automatically.
##' Default is \code{NULL}
##' @param project.id project id in GDC
##' @param data.type one of \code{'RNAseq'} and \code{'miRNAs'}
##' @param write.meta logical, whether to write the metadata to a
##' \code{.json} file
##' @importFrom rjson fromJSON
##' @importFrom jsonlite toJSON
##' @return A dataframe of metadata containing file_name,
##' sample_id, etc. as well as some basic clinical data
##' @export
##' @author Ruidong Li and Han Qu
##' @examples
##' ####### Merge RNA expression data #######
##' metaMatrix <- gdcParseMetadata(project.id='TARGET-RT', data.type='RNAseq')
gdcParseMetadata <- function(metafile=NULL, project.id,
data.type, write.meta=FALSE) {
if (! is.null(metafile)) {
metadata <- rjson::fromJSON(file=metafile)
} else {
url <- gdcGetURL(project.id = project.id, data.type = data.type)
metadata <- rjson::fromJSON(file=url)
metadata <- metadata$data$hits
#keep <- unlist(lapply(metadata$data$hits,
#function(sam) sam$analysis$workflow_type %in%
#c('HTSeq - Counts', 'BCGSC miRNA Profiling')))
#metadata <- metadata$data$hits[keep]
if (write.meta==TRUE) {
metafile <- jsonlite::toJSON(metadata, pretty=TRUE)
systime <- gsub(' ', 'T', Sys.time())
systime <- gsub(':', '-', systime)
write(metafile, file=paste(project.id, data.type,
'metadata', systime, 'json', sep='.'))
}
}
nSam <- length(metadata)
file_name <- vapply(seq_len(nSam),
function(i) metadata[[i]]$file_name,
character(1))
file_id <- vapply(seq_len(nSam),
function(i) metadata[[i]]$file_id, character(1))
submitter_id <- vapply(seq_len(nSam), function(i)
metadata[[i]]$cases[[1]]$samples[[1]]$submitter_id,
character(1))
sample <- vapply(submitter_id,
function(v) substr(v, 1, nchar(v)-1),
character(1))
entity_submitter_id <- vapply(seq_len(nSam), function(i)
metadata[[i]]$associated_entities[[1]]$entity_submitter_id,
character(1))
sample_type <- vapply(seq_len(nSam), function(i)
metadata[[i]]$cases[[1]]$samples[[1]]$sample_type,
character(1))
patient <- vapply(seq_len(nSam), function(i)
metadata[[i]]$cases[[1]]$submitter_id,
character(1))
gender <- null2naFun(vapply(seq_len(nSam), function(i)
chr2naFun(metadata[[i]]$cases[[1]]$demographic$gender), character(1)))
#race <- sapply(1:length(metadata), function(i)
#metadata[[i]]$cases[[1]]$demographic$race)
#ethnicity <- sapply(1:length(metadata), function(i)
#metadata[[i]]$cases[[1]]$demographic$ethnicity)
project_id <- vapply(seq_len(nSam), function(i)
chr2naFun(metadata[[i]]$cases[[1]]$project$project_id),
character(1))
tumor_stage <- null2naFun(vapply(seq_len(nSam), function(i)
chr2naFun(metadata[[i]]$cases[[1]]$diagnoses[[1]]$tumor_stage),
character(1)))
tumor_grade <- null2naFun(vapply(seq_len(nSam), function(i)
chr2naFun(metadata[[i]]$cases[[1]]$diagnoses[[1]]$tumor_grade),
character(1)))
age_at_diagnosis <- suppressWarnings(vapply(seq_len(nSam), function(i)
num2naFun(metadata[[i]]$cases[[1]]$diagnoses[[1]]$age_at_diagnosis),
numeric(1)))
days_to_death <- suppressWarnings(vapply(seq_len(nSam), function(i)
num2naFun(metadata[[i]]$cases[[1]]$diagnoses[[1]]$days_to_death),
numeric(1)))
days_to_last_follow_up <- suppressWarnings(vapply(seq_len(nSam),
function(i) num2naFun(
metadata[[i]]$cases[[1]]$diagnoses[[1]]$days_to_last_follow_up),
numeric(1)))
vital_status <- null2naFun(vapply(seq_len(nSam), function(i)
chr2naFun(metadata[[i]]$cases[[1]]$diagnoses[[1]]$vital_status),
character(1)))
metaMatrix <- data.frame(file_name,file_id,patient,sample,submitter_id,
entity_submitter_id, sample_type, gender,age_at_diagnosis,tumor_stage,
tumor_grade,days_to_death, days_to_last_follow_up,vital_status,
project_id, stringsAsFactors = FALSE)
metaMatrix <- metaMatrix[order(metaMatrix$submitter_id),]
metaMatrix[metaMatrix=='not reported'] <- NA
metaMatrix$sample_type <- gsub(' ', '',
metaMatrix$sample_type, fixed=TRUE)
metaMatrix$tumor_stage <- gsub(' ', '',
metaMatrix$tumor_stage, fixed=TRUE)
return (metaMatrix)
}
null2naFun <- function(v) {
v[v=='NA'] <- NA
return (v)
}
chr2naFun <- function(v) {
if (is.null(v)) {
return ('NA')
} else {
return (as.character(v))
}
}
num2naFun <- function(v) {
if (is.null(v)) {
return (NA)
} else if (is.numeric(v)) {
return (as.numeric(as.character(v)))
} else {
return (as.character(v))
}
} | /R/gdcParseMetadata.R | permissive | ShouyeLiu/GDCRNATools | R | false | false | 5,496 | r | ##' @title Parse metadata
##' @description Parse metadata either by providing the \emph{.json}
##' file that is downloaded from GDC cart or by parse metadata
##' automatically by providing the projct id and data type
##' @param metafile metadata file in \code{.json} format download
##' from GDC cart. If provided, the metadata will be parsed from
##' this file, otherwise, \code{project} and \code{data.type} arguments
##' should be provided to retrieve metadata automatically.
##' Default is \code{NULL}
##' @param project.id project id in GDC
##' @param data.type one of \code{'RNAseq'} and \code{'miRNAs'}
##' @param write.meta logical, whether to write the metadata to a
##' \code{.json} file
##' @importFrom rjson fromJSON
##' @importFrom jsonlite toJSON
##' @return A dataframe of metadata containing file_name,
##' sample_id, etc. as well as some basic clinical data
##' @export
##' @author Ruidong Li and Han Qu
##' @examples
##' ####### Merge RNA expression data #######
##' metaMatrix <- gdcParseMetadata(project.id='TARGET-RT', data.type='RNAseq')
gdcParseMetadata <- function(metafile=NULL, project.id,
data.type, write.meta=FALSE) {
if (! is.null(metafile)) {
metadata <- rjson::fromJSON(file=metafile)
} else {
url <- gdcGetURL(project.id = project.id, data.type = data.type)
metadata <- rjson::fromJSON(file=url)
metadata <- metadata$data$hits
#keep <- unlist(lapply(metadata$data$hits,
#function(sam) sam$analysis$workflow_type %in%
#c('HTSeq - Counts', 'BCGSC miRNA Profiling')))
#metadata <- metadata$data$hits[keep]
if (write.meta==TRUE) {
metafile <- jsonlite::toJSON(metadata, pretty=TRUE)
systime <- gsub(' ', 'T', Sys.time())
systime <- gsub(':', '-', systime)
write(metafile, file=paste(project.id, data.type,
'metadata', systime, 'json', sep='.'))
}
}
nSam <- length(metadata)
file_name <- vapply(seq_len(nSam),
function(i) metadata[[i]]$file_name,
character(1))
file_id <- vapply(seq_len(nSam),
function(i) metadata[[i]]$file_id, character(1))
submitter_id <- vapply(seq_len(nSam), function(i)
metadata[[i]]$cases[[1]]$samples[[1]]$submitter_id,
character(1))
sample <- vapply(submitter_id,
function(v) substr(v, 1, nchar(v)-1),
character(1))
entity_submitter_id <- vapply(seq_len(nSam), function(i)
metadata[[i]]$associated_entities[[1]]$entity_submitter_id,
character(1))
sample_type <- vapply(seq_len(nSam), function(i)
metadata[[i]]$cases[[1]]$samples[[1]]$sample_type,
character(1))
patient <- vapply(seq_len(nSam), function(i)
metadata[[i]]$cases[[1]]$submitter_id,
character(1))
gender <- null2naFun(vapply(seq_len(nSam), function(i)
chr2naFun(metadata[[i]]$cases[[1]]$demographic$gender), character(1)))
#race <- sapply(1:length(metadata), function(i)
#metadata[[i]]$cases[[1]]$demographic$race)
#ethnicity <- sapply(1:length(metadata), function(i)
#metadata[[i]]$cases[[1]]$demographic$ethnicity)
project_id <- vapply(seq_len(nSam), function(i)
chr2naFun(metadata[[i]]$cases[[1]]$project$project_id),
character(1))
tumor_stage <- null2naFun(vapply(seq_len(nSam), function(i)
chr2naFun(metadata[[i]]$cases[[1]]$diagnoses[[1]]$tumor_stage),
character(1)))
tumor_grade <- null2naFun(vapply(seq_len(nSam), function(i)
chr2naFun(metadata[[i]]$cases[[1]]$diagnoses[[1]]$tumor_grade),
character(1)))
age_at_diagnosis <- suppressWarnings(vapply(seq_len(nSam), function(i)
num2naFun(metadata[[i]]$cases[[1]]$diagnoses[[1]]$age_at_diagnosis),
numeric(1)))
days_to_death <- suppressWarnings(vapply(seq_len(nSam), function(i)
num2naFun(metadata[[i]]$cases[[1]]$diagnoses[[1]]$days_to_death),
numeric(1)))
days_to_last_follow_up <- suppressWarnings(vapply(seq_len(nSam),
function(i) num2naFun(
metadata[[i]]$cases[[1]]$diagnoses[[1]]$days_to_last_follow_up),
numeric(1)))
vital_status <- null2naFun(vapply(seq_len(nSam), function(i)
chr2naFun(metadata[[i]]$cases[[1]]$diagnoses[[1]]$vital_status),
character(1)))
metaMatrix <- data.frame(file_name,file_id,patient,sample,submitter_id,
entity_submitter_id, sample_type, gender,age_at_diagnosis,tumor_stage,
tumor_grade,days_to_death, days_to_last_follow_up,vital_status,
project_id, stringsAsFactors = FALSE)
metaMatrix <- metaMatrix[order(metaMatrix$submitter_id),]
metaMatrix[metaMatrix=='not reported'] <- NA
metaMatrix$sample_type <- gsub(' ', '',
metaMatrix$sample_type, fixed=TRUE)
metaMatrix$tumor_stage <- gsub(' ', '',
metaMatrix$tumor_stage, fixed=TRUE)
return (metaMatrix)
}
null2naFun <- function(v) {
v[v=='NA'] <- NA
return (v)
}
chr2naFun <- function(v) {
if (is.null(v)) {
return ('NA')
} else {
return (as.character(v))
}
}
num2naFun <- function(v) {
if (is.null(v)) {
return (NA)
} else if (is.numeric(v)) {
return (as.numeric(as.character(v)))
} else {
return (as.character(v))
}
} |
## Plot 2 - Baltimore total emissions trend using base plotting package
# Read in the Particulate and Classification data.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Prepare the data by finding the emissions for the city of Baltimore. Then aggregate by year.
Baltimore.NEI.subset <- NEI[which(NEI$fips=="24510"),]
result <- aggregate(Emissions ~ year, Baltimore.NEI.subset, sum)
# Plot the data.
png("plot2.png", width=480, height=480, bg="transparent")
barplot(result$Emissions/1000, main="Baltimore 1999-2008 Total Pollutants", names.arg=result$year,
xlab="Year", ylab="Polluntants Tons/1000")
dev.off() | /plot2.R | no_license | pcappa/ExData-Project2 | R | false | false | 669 | r | ## Plot 2 - Baltimore total emissions trend using base plotting package
# Read in the Particulate and Classification data.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Prepare the data by finding the emissions for the city of Baltimore. Then aggregate by year.
Baltimore.NEI.subset <- NEI[which(NEI$fips=="24510"),]
result <- aggregate(Emissions ~ year, Baltimore.NEI.subset, sum)
# Plot the data.
png("plot2.png", width=480, height=480, bg="transparent")
barplot(result$Emissions/1000, main="Baltimore 1999-2008 Total Pollutants", names.arg=result$year,
xlab="Year", ylab="Polluntants Tons/1000")
dev.off() |
# tests of the convolution function in r
x<-1:5
y<-1:5
x<-rnorm(100)
y<-rnorm(100)
# randy's functions
test1<-Convolve(x,y)
test2<-Deconvolve(x,test1)
Cosine(y,test2)
# need the rev(y) in the y parameter to get the same convolution from TODAM
test3<-normalize(convolve(x = x,y = rev(y),conj = F,type = "circular"))
# rev() around the whole deconvolution, memory as x and vector as y
test4<-rev(normalize(convolve(y = (x), x = (test3), conj = T,type = "circular")))
Cosine(y,test4)
# did the convolution happen the same?
all(round(test1,5)==round(test3,5))
# did the deconvolution happen the same?
all(round(test2,5)==round(test4,5))
# hand calculations for 1 2 3 4 5
1*5+2*1+3*2+4*3+5*4
1*4+2*5+3*1+4*2+5*3
1*3+2*4+3*5+4*1+5*2
1*2+2*3+3*4+4*5+5*1
1*1+2*2+3*3+4*4+5*5
# Modulo function for circular convolution
mod <- function (a, b) {
return(a %% b)
}
# One way circular convolution with end unit normalization
Convolve <- function (x, y) {
n <- length(x)
z <- array(0, dim=n)
y <- rev(y)
for (i in 0:(n-1)) {
for (j in 0:(n-1)) {
z[i+1] <- z[i+1] + x[mod(j,n)+1] * y[mod((i-j),n)+1]
}
}
z <- z/sqrt(sum(z^2))
return(z)
}
# One way circular deconvolution with end unit normalization
Deconvolve <- function (x, m) {
n <- length(x)
z <- array(0, dim=n)
for (i in 0:(n-1)) {
for (j in 0:(n-1)) {
z[i+1] <- z[i+1] + x[mod(j,n)+1] * m[mod((i+j),n)+1]
}
}
z <- z/sqrt(sum(z^2))
z <- rev(z)
return(z)
}
# Standard cosine function
Cosine <- function (x, y) {
z <- sum(normalize(x) * normalize(y))
return(z)
} | /Franklin_Mewhort/testing_convolution_function.R | no_license | SmithBradleyC/PhD_Code | R | false | false | 1,574 | r |
# tests of the convolution function in r
x<-1:5
y<-1:5
x<-rnorm(100)
y<-rnorm(100)
# randy's functions
test1<-Convolve(x,y)
test2<-Deconvolve(x,test1)
Cosine(y,test2)
# need the rev(y) in the y parameter to get the same convolution from TODAM
test3<-normalize(convolve(x = x,y = rev(y),conj = F,type = "circular"))
# rev() around the whole deconvolution, memory as x and vector as y
test4<-rev(normalize(convolve(y = (x), x = (test3), conj = T,type = "circular")))
Cosine(y,test4)
# did the convolution happen the same?
all(round(test1,5)==round(test3,5))
# did the deconvolution happen the same?
all(round(test2,5)==round(test4,5))
# hand calculations for 1 2 3 4 5
1*5+2*1+3*2+4*3+5*4
1*4+2*5+3*1+4*2+5*3
1*3+2*4+3*5+4*1+5*2
1*2+2*3+3*4+4*5+5*1
1*1+2*2+3*3+4*4+5*5
# Modulo function for circular convolution
mod <- function (a, b) {
return(a %% b)
}
# One way circular convolution with end unit normalization
Convolve <- function (x, y) {
n <- length(x)
z <- array(0, dim=n)
y <- rev(y)
for (i in 0:(n-1)) {
for (j in 0:(n-1)) {
z[i+1] <- z[i+1] + x[mod(j,n)+1] * y[mod((i-j),n)+1]
}
}
z <- z/sqrt(sum(z^2))
return(z)
}
# One way circular deconvolution with end unit normalization
Deconvolve <- function (x, m) {
n <- length(x)
z <- array(0, dim=n)
for (i in 0:(n-1)) {
for (j in 0:(n-1)) {
z[i+1] <- z[i+1] + x[mod(j,n)+1] * m[mod((i+j),n)+1]
}
}
z <- z/sqrt(sum(z^2))
z <- rev(z)
return(z)
}
# Standard cosine function
Cosine <- function (x, y) {
z <- sum(normalize(x) * normalize(y))
return(z)
} |
### R Stats ANOVA
# Exercise ANOVA
set.seed(234)
myobject = data.frame(group=rep(c("a", "b", "c"),10),
numeric=c(rnorm(5,5),6:15, rep(c(1,20,98),5)))
# Create the object myobject as stated above. There are 3 groups in it and every group has
# 10 observations in the column numeric
# get 3 different visual impressions of this data ( hint; jitter, boxpl, violin will work)
# perfrom an ANOVA and do at least 2 post hoc tests (test for multiple comparison)
# hint: think about ways of adjusting p values ( multiplicity)
# what is the problem if you would not adjust for p values in this post hoc tests?
# which test would you choose if myobject would not be normally distributed
# perform a non parametric test instead of ANOVA
# myobject: 3 groups, balanced, rnorm
set.seed(234)
myobject = data.frame(group=rep(c("a", "b", "c"),10),
numeric=c(rnorm(5,5),6:15, rep(c(1,20,98),5)))
levels(myobject$group)
# simple boxplot in base
boxplot(data=myobject, numeric~group)
# violin plot gets a better view on the distribution of the data
library(lattice)
bwplot(data=myobject, numeric~group, panel=panel.violin)
# jitter plot displays all the points individually
library(ggplot2)
qplot(data=myobject, formula=y~x, x=group, y=numeric, geom="jitter")
myanova=aov(data=myobject, numeric~group)
summary(myanova)
TukeyHSD(myanova)
plot(TukeyHSD(myanova))
coefficients(myanova)
#pairwise t test with adjusted p value as an alternative post hoc test
pairwise.t.test(x=myobject$numeric, g=myobject$group, p.adj="BH")
library(DTK)
#Tukey Kramer Test
TK.test(x=myobject$numeric, f=myobject$group)
# we need those post hoc tests to adjust the p values - otherwise the T1 error rate
# would be inflated ( higher than significance level 0.05)
kruskal.test(data=myobject, numeric~group)
| /R_Stat_Johns_Hopkins_work/R_Statistics/R_Stats_ANOVA.R | no_license | mpierne01/Johns_Hopkins_DS_Work | R | false | false | 1,831 | r | ### R Stats ANOVA
# Exercise ANOVA
set.seed(234)
myobject = data.frame(group=rep(c("a", "b", "c"),10),
numeric=c(rnorm(5,5),6:15, rep(c(1,20,98),5)))
# Create the object myobject as stated above. There are 3 groups in it and every group has
# 10 observations in the column numeric
# get 3 different visual impressions of this data ( hint; jitter, boxpl, violin will work)
# perfrom an ANOVA and do at least 2 post hoc tests (test for multiple comparison)
# hint: think about ways of adjusting p values ( multiplicity)
# what is the problem if you would not adjust for p values in this post hoc tests?
# which test would you choose if myobject would not be normally distributed
# perform a non parametric test instead of ANOVA
# myobject: 3 groups, balanced, rnorm
set.seed(234)
myobject = data.frame(group=rep(c("a", "b", "c"),10),
numeric=c(rnorm(5,5),6:15, rep(c(1,20,98),5)))
levels(myobject$group)
# simple boxplot in base
boxplot(data=myobject, numeric~group)
# violin plot gets a better view on the distribution of the data
library(lattice)
bwplot(data=myobject, numeric~group, panel=panel.violin)
# jitter plot displays all the points individually
library(ggplot2)
qplot(data=myobject, formula=y~x, x=group, y=numeric, geom="jitter")
myanova=aov(data=myobject, numeric~group)
summary(myanova)
TukeyHSD(myanova)
plot(TukeyHSD(myanova))
coefficients(myanova)
#pairwise t test with adjusted p value as an alternative post hoc test
pairwise.t.test(x=myobject$numeric, g=myobject$group, p.adj="BH")
library(DTK)
#Tukey Kramer Test
TK.test(x=myobject$numeric, f=myobject$group)
# we need those post hoc tests to adjust the p values - otherwise the T1 error rate
# would be inflated ( higher than significance level 0.05)
kruskal.test(data=myobject, numeric~group)
|
#setwd("/var/lib/jenkins/workspace/PredictiveAnalytics_R_Churn/")
setwd("C:\\NewtDemo_Churn_Analytics")
library(dplyr)
args<-commandArgs(TRUE)
telecomdata<-read.csv("WA_Fn-UseC_-Telco-Customer-Churn.csv",header = T)
#Create new column tenure_interval from the tenure column
group_tenure <- function(tenure){
if (tenure >= 0 && tenure <= 6){
return('0-6 Month')
}else if(tenure > 6 && tenure <= 12){
return('6-12 Month')
}else if (tenure > 12 && tenure <= 24){
return('12-24 Month')
}else if (tenure > 24 && tenure <=36){
return('24-36 Month')
}else if (tenure > 36 && tenure <=48){
return('36-48 Month')
}else if (tenure > 48 && tenure <= 62){
return('48-62 Month')
}else if (tenure > 62){
return('> 62 Month')
}
}
# apply group_tenure function on each row of dataframe
telecomdata$tenure_interval <- sapply(telecomdata$tenure,group_tenure)
telecomdata$tenure_interval <- as.factor(telecomdata$tenure_interval)
# Ignore the variables with more levels while predicting the model
# Columns "customerID" and "tenure" having more levels
telecomdata <- select(telecomdata,-customerID,-tenure)
# convert factor variables into character variables before changing the values
telecomdata$MultipleLines <- as.character(telecomdata$MultipleLines)
telecomdata$OnlineSecurity <- as.character(telecomdata$OnlineSecurity)
telecomdata$OnlineBackup <- as.character(telecomdata$OnlineBackup)
telecomdata$DeviceProtection <- as.character(telecomdata$DeviceProtection)
telecomdata$TechSupport <- as.character(telecomdata$TechSupport)
telecomdata$StreamingTV <- as.character(telecomdata$StreamingTV)
telecomdata$StreamingMovies <- as.character(telecomdata$StreamingMovies)
telecomdata$MultipleLines[telecomdata$MultipleLines=="No phone service"] <- "No"
telecomdata$OnlineSecurity[telecomdata$OnlineSecurity=="No internet service"] <- "No"
telecomdata$OnlineBackup[telecomdata$OnlineBackup=="No internet service"] <- "No"
telecomdata$DeviceProtection[telecomdata$DeviceProtection=="No internet service"] <- "No"
telecomdata$TechSupport[telecomdata$TechSupport=="No internet service"] <- "No"
telecomdata$StreamingTV[telecomdata$StreamingTV=="No internet service"] <- "No"
telecomdata$StreamingMovies[telecomdata$StreamingMovies=="No internet service"] <- "No"
# converting character variables into factor variables
telecomdata$MultipleLines <- as.factor(telecomdata$MultipleLines)
telecomdata$OnlineSecurity <- as.factor(telecomdata$OnlineSecurity)
telecomdata$OnlineBackup <- as.factor(telecomdata$OnlineBackup)
telecomdata$DeviceProtection <- as.factor(telecomdata$DeviceProtection)
telecomdata$TechSupport <- as.factor(telecomdata$TechSupport)
telecomdata$StreamingTV <- as.factor(telecomdata$StreamingTV)
telecomdata$StreamingMovies <- as.factor(telecomdata$StreamingMovies)
# check the number of NA rows if it is relatively small in number then ignore those rows from the analysis
telecomdata <- na.omit(telecomdata)
# set the seed it will output same output when ever the model is executed
set.seed(123)
#Churn Model using Logistic Regression
telecomModelstep_1<-glm(Churn ~ PaymentMethod+OnlineSecurity+MonthlyCharges+StreamingMovies+PaperlessBilling+StreamingTV+InternetService+Contract+tenure_interval+MultipleLines+SeniorCitizen,data=telecomdata,family=binomial(link="logit"))
# testData Preparation with input arguments from Java
#testData<-data.frame(PaymentMethod=factor(),OnlineSecurity=factor(),MonthlyCharges=double(),StreamingMovies=factor(),PaperlessBilling=factor(),StreamingTV=factor(),InternetService=factor(),Contract=factor(),tenure_interval=factor(),MultipleLines=factor(),SeniorCitizen=integer());
tenure_intr <- sapply(args[9],group_tenure)
tenure_intr <- as.factor(tenure_intr)
#args[1]<-substr(args[1],2,nchar(args[1])-1)
#args[7]<-substr(args[7],2,nchar(args[7])-1)
#args[8]<-substr(args[8],2,nchar(args[8])-1)
#x<-data.frame(args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],tenure_intr,args[10],args[11])
#names(x)<-c("PaymentMethod","OnlineSecurity","MonthlyCharges","StreamingMovies","PaperlessBilling","StreamingTV","InternetService","Contract","tenure_interval","MultipleLines","SeniorCitizen")
#testData<-rbind(testData,x)
testData<-data.frame(PaymentMethod=as.factor(args[1]),OnlineSecurity=as.factor(args[2]),MonthlyCharges=as.double(args[3]),StreamingMovies=as.factor(args[4]),PaperlessBilling=as.factor(args[5]),StreamingTV=as.factor(args[6]),InternetService=as.factor(args[7]),Contract=as.factor(args[8]),tenure_interval=as.factor(tenure_intr),MultipleLines=as.factor(args[10]),SeniorCitizen=as.integer(args[11]));
# Predict with the model and input args. If the prediction probability is greater than 0.5 then those
# customers are classified as churned customer less than 0.5 are classified as not churning customer
test.predictionsstep_response <- predict(telecomModelstep_1,newdata=testData,type="response")
fitted_result <- ifelse(test.predictionsstep_response > 0.5,'Yes','No')
if(fitted_result == 'Yes'){
print(paste0(args[12]," is predicted to churn out of Verizon"))
}else{
print(paste0(args[12]," is predicted to stay with Verizon"))
}
#Random Forest with selection variables
#set.seed(415)
#mytree_sel<-randomForest(Churn ~ PaymentMethod+OnlineSecurity+MonthlyCharges+StreamingMovies+PaperlessBilling+StreamingTV+InternetService+Contract+tenure_interval+MultipleLines+SeniorCitizen,data=telecomdata,importance = T)
#pred_sel<-predict(mytree_sel, newdata =testData,type="response")
#fitted_result_2 <- ifelse(pred_sel > 0.5,'Yes','No')
#if(fitted_result_2 == 'Yes'){
#print("Customer is predicted to churn out of Verizon based on Model 2")
#}else{
#print("Customer is predicted to stay with Verizon based on Model 2")
#}
| /predict_churn2.R | no_license | DevOpsInaBox/PredictiveAnalaytics_R_Churn | R | false | false | 5,724 | r | #setwd("/var/lib/jenkins/workspace/PredictiveAnalytics_R_Churn/")
setwd("C:\\NewtDemo_Churn_Analytics")
library(dplyr)
args<-commandArgs(TRUE)
telecomdata<-read.csv("WA_Fn-UseC_-Telco-Customer-Churn.csv",header = T)
#Create new column tenure_interval from the tenure column
group_tenure <- function(tenure){
if (tenure >= 0 && tenure <= 6){
return('0-6 Month')
}else if(tenure > 6 && tenure <= 12){
return('6-12 Month')
}else if (tenure > 12 && tenure <= 24){
return('12-24 Month')
}else if (tenure > 24 && tenure <=36){
return('24-36 Month')
}else if (tenure > 36 && tenure <=48){
return('36-48 Month')
}else if (tenure > 48 && tenure <= 62){
return('48-62 Month')
}else if (tenure > 62){
return('> 62 Month')
}
}
# apply group_tenure function on each row of dataframe
telecomdata$tenure_interval <- sapply(telecomdata$tenure,group_tenure)
telecomdata$tenure_interval <- as.factor(telecomdata$tenure_interval)
# Ignore the variables with more levels while predicting the model
# Columns "customerID" and "tenure" having more levels
telecomdata <- select(telecomdata,-customerID,-tenure)
# convert factor variables into character variables before changing the values
telecomdata$MultipleLines <- as.character(telecomdata$MultipleLines)
telecomdata$OnlineSecurity <- as.character(telecomdata$OnlineSecurity)
telecomdata$OnlineBackup <- as.character(telecomdata$OnlineBackup)
telecomdata$DeviceProtection <- as.character(telecomdata$DeviceProtection)
telecomdata$TechSupport <- as.character(telecomdata$TechSupport)
telecomdata$StreamingTV <- as.character(telecomdata$StreamingTV)
telecomdata$StreamingMovies <- as.character(telecomdata$StreamingMovies)
telecomdata$MultipleLines[telecomdata$MultipleLines=="No phone service"] <- "No"
telecomdata$OnlineSecurity[telecomdata$OnlineSecurity=="No internet service"] <- "No"
telecomdata$OnlineBackup[telecomdata$OnlineBackup=="No internet service"] <- "No"
telecomdata$DeviceProtection[telecomdata$DeviceProtection=="No internet service"] <- "No"
telecomdata$TechSupport[telecomdata$TechSupport=="No internet service"] <- "No"
telecomdata$StreamingTV[telecomdata$StreamingTV=="No internet service"] <- "No"
telecomdata$StreamingMovies[telecomdata$StreamingMovies=="No internet service"] <- "No"
# converting character variables into factor variables
telecomdata$MultipleLines <- as.factor(telecomdata$MultipleLines)
telecomdata$OnlineSecurity <- as.factor(telecomdata$OnlineSecurity)
telecomdata$OnlineBackup <- as.factor(telecomdata$OnlineBackup)
telecomdata$DeviceProtection <- as.factor(telecomdata$DeviceProtection)
telecomdata$TechSupport <- as.factor(telecomdata$TechSupport)
telecomdata$StreamingTV <- as.factor(telecomdata$StreamingTV)
telecomdata$StreamingMovies <- as.factor(telecomdata$StreamingMovies)
# check the number of NA rows if it is relatively small in number then ignore those rows from the analysis
telecomdata <- na.omit(telecomdata)
# set the seed it will output same output when ever the model is executed
set.seed(123)
#Churn Model using Logistic Regression
telecomModelstep_1<-glm(Churn ~ PaymentMethod+OnlineSecurity+MonthlyCharges+StreamingMovies+PaperlessBilling+StreamingTV+InternetService+Contract+tenure_interval+MultipleLines+SeniorCitizen,data=telecomdata,family=binomial(link="logit"))
# testData Preparation with input arguments from Java
#testData<-data.frame(PaymentMethod=factor(),OnlineSecurity=factor(),MonthlyCharges=double(),StreamingMovies=factor(),PaperlessBilling=factor(),StreamingTV=factor(),InternetService=factor(),Contract=factor(),tenure_interval=factor(),MultipleLines=factor(),SeniorCitizen=integer());
tenure_intr <- sapply(args[9],group_tenure)
tenure_intr <- as.factor(tenure_intr)
#args[1]<-substr(args[1],2,nchar(args[1])-1)
#args[7]<-substr(args[7],2,nchar(args[7])-1)
#args[8]<-substr(args[8],2,nchar(args[8])-1)
#x<-data.frame(args[1],args[2],args[3],args[4],args[5],args[6],args[7],args[8],tenure_intr,args[10],args[11])
#names(x)<-c("PaymentMethod","OnlineSecurity","MonthlyCharges","StreamingMovies","PaperlessBilling","StreamingTV","InternetService","Contract","tenure_interval","MultipleLines","SeniorCitizen")
#testData<-rbind(testData,x)
testData<-data.frame(PaymentMethod=as.factor(args[1]),OnlineSecurity=as.factor(args[2]),MonthlyCharges=as.double(args[3]),StreamingMovies=as.factor(args[4]),PaperlessBilling=as.factor(args[5]),StreamingTV=as.factor(args[6]),InternetService=as.factor(args[7]),Contract=as.factor(args[8]),tenure_interval=as.factor(tenure_intr),MultipleLines=as.factor(args[10]),SeniorCitizen=as.integer(args[11]));
# Predict with the model and input args. If the prediction probability is greater than 0.5 then those
# customers are classified as churned customer less than 0.5 are classified as not churning customer
test.predictionsstep_response <- predict(telecomModelstep_1,newdata=testData,type="response")
fitted_result <- ifelse(test.predictionsstep_response > 0.5,'Yes','No')
if(fitted_result == 'Yes'){
print(paste0(args[12]," is predicted to churn out of Verizon"))
}else{
print(paste0(args[12]," is predicted to stay with Verizon"))
}
#Random Forest with selection variables
#set.seed(415)
#mytree_sel<-randomForest(Churn ~ PaymentMethod+OnlineSecurity+MonthlyCharges+StreamingMovies+PaperlessBilling+StreamingTV+InternetService+Contract+tenure_interval+MultipleLines+SeniorCitizen,data=telecomdata,importance = T)
#pred_sel<-predict(mytree_sel, newdata =testData,type="response")
#fitted_result_2 <- ifelse(pred_sel > 0.5,'Yes','No')
#if(fitted_result_2 == 'Yes'){
#print("Customer is predicted to churn out of Verizon based on Model 2")
#}else{
#print("Customer is predicted to stay with Verizon based on Model 2")
#}
|
library(doParallel)
library(foreach)
# Calculate the number of cores
getDoParWorkers()
detectCores()
cl=makeCluster(detectCores()-1)
registerDoParallel(cl)
getDoParWorkers()
#for(m in 1:sim)
myfunc = function(m)
{
options(warn=-1)
#library(geepack);
library(MASS);library(ResourceSelection);
#library(ltmle); library(SuperLearner)
library(dplyr); library(glm2);
library(data.table)
#library(reshape2) #do not use for data frame only
setDTthreads(1)
logit <- function(term) {
return( ifelse(!is.na(term),log(term/(1-term)),NA) )
}
EXPIT <- function(term) {
return( ifelse(!is.na(term),exp(term)/(1+exp(term)),NA) )
}
source("datagen.R")
set.seed(1129)
seeds = floor(runif(1000)*10^8);
set.seed(seeds[m])
n <- 1000
K <- 5
delta=0.5
alpha0=-1; alpha1=-2; alpha2=-1; alpha3=1; alpha4=2;
beta1_0=-1; beta1_1=0; beta1_2=-1; beta1_3=1; beta1_4=-1; beta1_5=1; beta1_6=0; #rectal STI indicator
beta2_0=-1; beta2_1=0; beta2_2=0; beta2_3=1; beta2_4=-1; beta2_5=1; beta2_6=0; #cd4 count
beta3_0=1; beta3_1=0; beta3_2=1; beta3_3=1; beta3_4=0; beta3_5=1; beta3_6=0; #unprotected sexual activity (H/L)
theta0=1; theta1=0; theta2=3; theta3=-2; theta4=1; theta5=-1; theta6=0; #theta1 always 0
cens0=-2; cens1=0; cens2=1; cens3=-1
df <- lapply(as.list(1:n), FUN=function(ind){
datagen(ind, K=K, sigma=1,
alpha0=alpha0, alpha1=alpha1, alpha2=alpha2, alpha3=alpha3, alpha4 = alpha4,
beta1_0=beta1_0, beta1_1=beta1_1, beta1_2=beta1_2, beta1_3=beta1_3, beta1_4=beta1_4, beta1_5=beta1_5, beta1_6=beta1_6,
beta2_0=beta2_0, beta2_1=beta2_1, beta2_2=beta2_2, beta2_3=beta2_3, beta2_4=beta2_4, beta2_5=beta2_5, beta2_6=beta2_6,
beta3_0=beta3_0, beta3_1=beta3_1, beta3_2=beta3_2, beta3_3=beta3_3, beta3_4=beta3_4, beta3_5=beta3_5, beta3_6=beta3_6,
theta0=theta0, theta1=theta1, theta2=theta2, theta3=theta3, theta4=theta4, theta5=theta5, theta6=theta6,
cens0=cens0, cens1=cens1, cens2=cens2, cens3=cens3)
})
dffull <- rbindlist(df)
dffull[, paste("lag_A") := shift(A, 1, NA, type='lag'), by=id]
dffull$lag_A = ifelse(dffull$t0==0, 0, dffull$lag_A)
afitc = glm2(A ~ L1 + L2 + L3 + lag_A, family = binomial(), data = dffull) #This is from the data generation mechanism
dffull$pred_obs_correct = predict(afitc, newdata = dffull, type="response")
dffull$pred_obs_correct = ifelse(dffull$A==1, dffull$pred_obs_correct, 1-dffull$pred_obs_correct)
dffull$lag_A=NULL
afit = glm2(A ~ L1 + L2 + L3, family = binomial(), data = dffull) #This is from the data generation mechanism
dffull$pred_obs = predict(afit, newdata = dffull, type="response")
dffull$pred_obs = ifelse(dffull$A==1, dffull$pred_obs, 1-dffull$pred_obs)
dffull$pred_obs = ifelse(dffull$t0<2, dffull$pred_obs_correct, dffull$pred_obs)
dffull$lag_A=NULL
#cfit = glm2(Cen~L2+L3,family=binomial,data=dffull)
dffull$pred_obsc = 1#predict(cfit, newdata = dffull, type="response")
#dffull$pred_obsc = ifelse(dffull$Cen==1, dffull$pred_obsc, 1-dffull$pred_obsc)
dffullwide = dcast(dffull, id ~ t0, value.var = c("L1","L2","L3","A","Cen","Y","pred_obs","pred_obsc","U"))
tmpdata = dffullwide
#subset data
tmpdata$Y_1 = ifelse(tmpdata$Y_0==0,0,tmpdata$Y_1)
tmpdata$Y_2 = ifelse(!is.na(tmpdata$Y_1) & tmpdata$Y_1==0,0,tmpdata$Y_2)
tmpdata$Y_3 = ifelse(!is.na(tmpdata$Y_2) & tmpdata$Y_2==0,0,tmpdata$Y_3)
tmpdata$Y_4 = ifelse(!is.na(tmpdata$Y_3) & tmpdata$Y_3==0,0,tmpdata$Y_4)
tmpdata$Y_5 = tmpdata$Y_4
tmpdata$Y_4 = tmpdata$Y_3
tmpdata$Y_3 = tmpdata$Y_2
tmpdata$Y_2 = tmpdata$Y_1
tmpdata$Y_1 = tmpdata$Y_0
tmpdata$Y_0 = NULL
tmpdata$id = seq(1,n,by=1)
tmpdata$pi4 <- tmpdata$pi3 <- tmpdata$pi2 <- tmpdata$pi1 <- tmpdata$pi0 <- NA
tmpdata$pi4c <- tmpdata$pi3c <- tmpdata$pi2c <- tmpdata$pi1c <- tmpdata$pi0c <- NA
tmpdata$pi0 = tmpdata$pred_obs_0
tmpdata$pi1 = tmpdata$pi0*tmpdata$pred_obs_1
tmpdata$pi2 = tmpdata$pi1*tmpdata$pred_obs_2
tmpdata$pi3 = tmpdata$pi2*tmpdata$pred_obs_3
tmpdata$pi4 = tmpdata$pi3*tmpdata$pred_obs_4
tmpdata$pi0c = tmpdata$pred_obsc_0
tmpdata$pi1c = tmpdata$pi0c*tmpdata$pred_obsc_1
tmpdata$pi2c = tmpdata$pi1c*tmpdata$pred_obsc_2
tmpdata$pi3c = tmpdata$pi2c*tmpdata$pred_obsc_3
tmpdata$pi4c = tmpdata$pi3c*tmpdata$pred_obsc_4
##calculate risk
mean = NULL
ind = NA;
#time 1
tmp = tmpdata[tmpdata$Y_1<2,]
tmp$fint0 = (tmp$L1_0*delta + 1 - tmp$L1_0)*tmp$pi0 + tmp$L1_0*tmp$A_0*(1-delta)
fit1 = glm2(Y_1 ~ 1, family = binomial(), data = tmp, weights = fint0/(pi0*pi0c))
param1 = plogis(summary(fit1)$coef[1,1])
#time 2
tmp = tmpdata[tmpdata$Y_1==1 & tmpdata$Y_2<2,]
tmp$fint0 = (tmp$L1_0*delta + 1 - tmp$L1_0)*tmp$pi0 + tmp$L1_0*tmp$A_0*(1-delta)
tmp$fint1 = (tmp$L1_1*delta + 1 - tmp$L1_1)*tmp$pred_obs_1 + tmp$L1_1*tmp$A_1*(1-delta)
if(nrow(tmp)>0) {fit2 = glm2(Y_2 ~ 1, family = binomial(), data = tmp, weights = (fint0*fint1)/(pi1*pi1c))
param2 = plogis(summary(fit2)$coef[1,1])} else{param2 = NA}
#time 3
tmp = tmpdata[tmpdata$Y_2==1 & tmpdata$Y_3<2,]
tmp$fint0 = (tmp$L1_0*delta + 1 - tmp$L1_0)*tmp$pi0 + tmp$L1_0*tmp$A_0*(1-delta)
tmp$fint1 = (tmp$L1_1*delta + 1 - tmp$L1_1)*tmp$pred_obs_1 + tmp$L1_1*tmp$A_1*(1-delta)
tmp$fint2 = (tmp$L1_2*delta + 1 - tmp$L1_2)*tmp$pred_obs_2 + tmp$L1_2*tmp$A_2*(1-delta)
if(nrow(tmp)>0) {fit3 = glm2(Y_3 ~ 1, family = binomial(), data = tmp, weights = (fint0*fint1*fint2)/(pi2*pi2c))
param3 = plogis(summary(fit3)$coef[1,1])} else{param3 = NA}
#time 4
tmp = tmpdata[tmpdata$Y_3==1 & tmpdata$Y_4<2,]
tmp$fint0 = (tmp$L1_0*delta + 1 - tmp$L1_0)*tmp$pi0 + tmp$L1_0*tmp$A_0*(1-delta)
tmp$fint1 = (tmp$L1_1*delta + 1 - tmp$L1_1)*tmp$pred_obs_1 + tmp$L1_1*tmp$A_1*(1-delta)
tmp$fint2 = (tmp$L1_2*delta + 1 - tmp$L1_2)*tmp$pred_obs_2 + tmp$L1_2*tmp$A_2*(1-delta)
tmp$fint3 = (tmp$L1_3*delta + 1 - tmp$L1_3)*tmp$pred_obs_3 + tmp$L1_3*tmp$A_3*(1-delta)
if(nrow(tmp)>0) {fit4 = glm2(Y_4 ~ 1, family = binomial(), data = tmp, weights = (fint0*fint1*fint2*fint3)/(pi3*pi3c))
param4 = plogis(summary(fit4)$coef[1,1])} else{param4 = NA}
#time 5
tmp = tmpdata[tmpdata$Y_4==1 & tmpdata$Y_5<2,]
tmp$fint0 = (tmp$L1_0*delta + 1 - tmp$L1_0)*tmp$pi0 + tmp$L1_0*tmp$A_0*(1-delta)
tmp$fint1 = (tmp$L1_1*delta + 1 - tmp$L1_1)*tmp$pred_obs_1 + tmp$L1_1*tmp$A_1*(1-delta)
tmp$fint2 = (tmp$L1_2*delta + 1 - tmp$L1_2)*tmp$pred_obs_2 + tmp$L1_2*tmp$A_2*(1-delta)
tmp$fint3 = (tmp$L1_3*delta + 1 - tmp$L1_3)*tmp$pred_obs_3 + tmp$L1_3*tmp$A_3*(1-delta)
tmp$fint4 = (tmp$L1_4*delta + 1 - tmp$L1_4)*tmp$pred_obs_4 + tmp$L1_4*tmp$A_4*(1-delta)
if(nrow(tmp)>0) {fit5 = glm2(Y_5 ~ 1, family = binomial(), data = tmp, weights = (fint0*fint1*fint2*fint3*fint4)/(pi4*pi4c))
param5 = plogis(summary(fit5)$coef[1,1])} else{param5 = NA}
#ind = ifelse(comp1==0 | comp2==0 | comp3==0 | comp4==0, 1, ind)
t1 = param1
t2 = param2*(t1)
t3 = param3*(t2)
t4 = param4*(t3)
t5 = param5*(t4)
myparam = c(t1, t2, t3, t4, t5)
return(myparam)
}
test = foreach(m=1:1000) %dopar% myfunc(m)
test2 = do.call("rbind", test)
write.csv(test2,"ipw_haz_j.csv")
stopCluster(cl)
| /parametric_models/jplusone/n1000/delta_0.50/ipw_haz.R | no_license | lw499/Stochastic_treatment_paper | R | false | false | 7,410 | r | library(doParallel)
library(foreach)
# Calculate the number of cores
getDoParWorkers()
detectCores()
cl=makeCluster(detectCores()-1)
registerDoParallel(cl)
getDoParWorkers()
#for(m in 1:sim)
myfunc = function(m)
{
options(warn=-1)
#library(geepack);
library(MASS);library(ResourceSelection);
#library(ltmle); library(SuperLearner)
library(dplyr); library(glm2);
library(data.table)
#library(reshape2) #do not use for data frame only
setDTthreads(1)
logit <- function(term) {
return( ifelse(!is.na(term),log(term/(1-term)),NA) )
}
EXPIT <- function(term) {
return( ifelse(!is.na(term),exp(term)/(1+exp(term)),NA) )
}
source("datagen.R")
set.seed(1129)
seeds = floor(runif(1000)*10^8);
set.seed(seeds[m])
n <- 1000
K <- 5
delta=0.5
alpha0=-1; alpha1=-2; alpha2=-1; alpha3=1; alpha4=2;
beta1_0=-1; beta1_1=0; beta1_2=-1; beta1_3=1; beta1_4=-1; beta1_5=1; beta1_6=0; #rectal STI indicator
beta2_0=-1; beta2_1=0; beta2_2=0; beta2_3=1; beta2_4=-1; beta2_5=1; beta2_6=0; #cd4 count
beta3_0=1; beta3_1=0; beta3_2=1; beta3_3=1; beta3_4=0; beta3_5=1; beta3_6=0; #unprotected sexual activity (H/L)
theta0=1; theta1=0; theta2=3; theta3=-2; theta4=1; theta5=-1; theta6=0; #theta1 always 0
cens0=-2; cens1=0; cens2=1; cens3=-1
df <- lapply(as.list(1:n), FUN=function(ind){
datagen(ind, K=K, sigma=1,
alpha0=alpha0, alpha1=alpha1, alpha2=alpha2, alpha3=alpha3, alpha4 = alpha4,
beta1_0=beta1_0, beta1_1=beta1_1, beta1_2=beta1_2, beta1_3=beta1_3, beta1_4=beta1_4, beta1_5=beta1_5, beta1_6=beta1_6,
beta2_0=beta2_0, beta2_1=beta2_1, beta2_2=beta2_2, beta2_3=beta2_3, beta2_4=beta2_4, beta2_5=beta2_5, beta2_6=beta2_6,
beta3_0=beta3_0, beta3_1=beta3_1, beta3_2=beta3_2, beta3_3=beta3_3, beta3_4=beta3_4, beta3_5=beta3_5, beta3_6=beta3_6,
theta0=theta0, theta1=theta1, theta2=theta2, theta3=theta3, theta4=theta4, theta5=theta5, theta6=theta6,
cens0=cens0, cens1=cens1, cens2=cens2, cens3=cens3)
})
dffull <- rbindlist(df)
dffull[, paste("lag_A") := shift(A, 1, NA, type='lag'), by=id]
dffull$lag_A = ifelse(dffull$t0==0, 0, dffull$lag_A)
afitc = glm2(A ~ L1 + L2 + L3 + lag_A, family = binomial(), data = dffull) #This is from the data generation mechanism
dffull$pred_obs_correct = predict(afitc, newdata = dffull, type="response")
dffull$pred_obs_correct = ifelse(dffull$A==1, dffull$pred_obs_correct, 1-dffull$pred_obs_correct)
dffull$lag_A=NULL
afit = glm2(A ~ L1 + L2 + L3, family = binomial(), data = dffull) #This is from the data generation mechanism
dffull$pred_obs = predict(afit, newdata = dffull, type="response")
dffull$pred_obs = ifelse(dffull$A==1, dffull$pred_obs, 1-dffull$pred_obs)
dffull$pred_obs = ifelse(dffull$t0<2, dffull$pred_obs_correct, dffull$pred_obs)
dffull$lag_A=NULL
#cfit = glm2(Cen~L2+L3,family=binomial,data=dffull)
dffull$pred_obsc = 1#predict(cfit, newdata = dffull, type="response")
#dffull$pred_obsc = ifelse(dffull$Cen==1, dffull$pred_obsc, 1-dffull$pred_obsc)
dffullwide = dcast(dffull, id ~ t0, value.var = c("L1","L2","L3","A","Cen","Y","pred_obs","pred_obsc","U"))
tmpdata = dffullwide
#subset data
tmpdata$Y_1 = ifelse(tmpdata$Y_0==0,0,tmpdata$Y_1)
tmpdata$Y_2 = ifelse(!is.na(tmpdata$Y_1) & tmpdata$Y_1==0,0,tmpdata$Y_2)
tmpdata$Y_3 = ifelse(!is.na(tmpdata$Y_2) & tmpdata$Y_2==0,0,tmpdata$Y_3)
tmpdata$Y_4 = ifelse(!is.na(tmpdata$Y_3) & tmpdata$Y_3==0,0,tmpdata$Y_4)
tmpdata$Y_5 = tmpdata$Y_4
tmpdata$Y_4 = tmpdata$Y_3
tmpdata$Y_3 = tmpdata$Y_2
tmpdata$Y_2 = tmpdata$Y_1
tmpdata$Y_1 = tmpdata$Y_0
tmpdata$Y_0 = NULL
tmpdata$id = seq(1,n,by=1)
tmpdata$pi4 <- tmpdata$pi3 <- tmpdata$pi2 <- tmpdata$pi1 <- tmpdata$pi0 <- NA
tmpdata$pi4c <- tmpdata$pi3c <- tmpdata$pi2c <- tmpdata$pi1c <- tmpdata$pi0c <- NA
tmpdata$pi0 = tmpdata$pred_obs_0
tmpdata$pi1 = tmpdata$pi0*tmpdata$pred_obs_1
tmpdata$pi2 = tmpdata$pi1*tmpdata$pred_obs_2
tmpdata$pi3 = tmpdata$pi2*tmpdata$pred_obs_3
tmpdata$pi4 = tmpdata$pi3*tmpdata$pred_obs_4
tmpdata$pi0c = tmpdata$pred_obsc_0
tmpdata$pi1c = tmpdata$pi0c*tmpdata$pred_obsc_1
tmpdata$pi2c = tmpdata$pi1c*tmpdata$pred_obsc_2
tmpdata$pi3c = tmpdata$pi2c*tmpdata$pred_obsc_3
tmpdata$pi4c = tmpdata$pi3c*tmpdata$pred_obsc_4
##calculate risk
mean = NULL
ind = NA;
#time 1
tmp = tmpdata[tmpdata$Y_1<2,]
tmp$fint0 = (tmp$L1_0*delta + 1 - tmp$L1_0)*tmp$pi0 + tmp$L1_0*tmp$A_0*(1-delta)
fit1 = glm2(Y_1 ~ 1, family = binomial(), data = tmp, weights = fint0/(pi0*pi0c))
param1 = plogis(summary(fit1)$coef[1,1])
#time 2
tmp = tmpdata[tmpdata$Y_1==1 & tmpdata$Y_2<2,]
tmp$fint0 = (tmp$L1_0*delta + 1 - tmp$L1_0)*tmp$pi0 + tmp$L1_0*tmp$A_0*(1-delta)
tmp$fint1 = (tmp$L1_1*delta + 1 - tmp$L1_1)*tmp$pred_obs_1 + tmp$L1_1*tmp$A_1*(1-delta)
if(nrow(tmp)>0) {fit2 = glm2(Y_2 ~ 1, family = binomial(), data = tmp, weights = (fint0*fint1)/(pi1*pi1c))
param2 = plogis(summary(fit2)$coef[1,1])} else{param2 = NA}
#time 3
tmp = tmpdata[tmpdata$Y_2==1 & tmpdata$Y_3<2,]
tmp$fint0 = (tmp$L1_0*delta + 1 - tmp$L1_0)*tmp$pi0 + tmp$L1_0*tmp$A_0*(1-delta)
tmp$fint1 = (tmp$L1_1*delta + 1 - tmp$L1_1)*tmp$pred_obs_1 + tmp$L1_1*tmp$A_1*(1-delta)
tmp$fint2 = (tmp$L1_2*delta + 1 - tmp$L1_2)*tmp$pred_obs_2 + tmp$L1_2*tmp$A_2*(1-delta)
if(nrow(tmp)>0) {fit3 = glm2(Y_3 ~ 1, family = binomial(), data = tmp, weights = (fint0*fint1*fint2)/(pi2*pi2c))
param3 = plogis(summary(fit3)$coef[1,1])} else{param3 = NA}
#time 4
tmp = tmpdata[tmpdata$Y_3==1 & tmpdata$Y_4<2,]
tmp$fint0 = (tmp$L1_0*delta + 1 - tmp$L1_0)*tmp$pi0 + tmp$L1_0*tmp$A_0*(1-delta)
tmp$fint1 = (tmp$L1_1*delta + 1 - tmp$L1_1)*tmp$pred_obs_1 + tmp$L1_1*tmp$A_1*(1-delta)
tmp$fint2 = (tmp$L1_2*delta + 1 - tmp$L1_2)*tmp$pred_obs_2 + tmp$L1_2*tmp$A_2*(1-delta)
tmp$fint3 = (tmp$L1_3*delta + 1 - tmp$L1_3)*tmp$pred_obs_3 + tmp$L1_3*tmp$A_3*(1-delta)
if(nrow(tmp)>0) {fit4 = glm2(Y_4 ~ 1, family = binomial(), data = tmp, weights = (fint0*fint1*fint2*fint3)/(pi3*pi3c))
param4 = plogis(summary(fit4)$coef[1,1])} else{param4 = NA}
#time 5
tmp = tmpdata[tmpdata$Y_4==1 & tmpdata$Y_5<2,]
tmp$fint0 = (tmp$L1_0*delta + 1 - tmp$L1_0)*tmp$pi0 + tmp$L1_0*tmp$A_0*(1-delta)
tmp$fint1 = (tmp$L1_1*delta + 1 - tmp$L1_1)*tmp$pred_obs_1 + tmp$L1_1*tmp$A_1*(1-delta)
tmp$fint2 = (tmp$L1_2*delta + 1 - tmp$L1_2)*tmp$pred_obs_2 + tmp$L1_2*tmp$A_2*(1-delta)
tmp$fint3 = (tmp$L1_3*delta + 1 - tmp$L1_3)*tmp$pred_obs_3 + tmp$L1_3*tmp$A_3*(1-delta)
tmp$fint4 = (tmp$L1_4*delta + 1 - tmp$L1_4)*tmp$pred_obs_4 + tmp$L1_4*tmp$A_4*(1-delta)
if(nrow(tmp)>0) {fit5 = glm2(Y_5 ~ 1, family = binomial(), data = tmp, weights = (fint0*fint1*fint2*fint3*fint4)/(pi4*pi4c))
param5 = plogis(summary(fit5)$coef[1,1])} else{param5 = NA}
#ind = ifelse(comp1==0 | comp2==0 | comp3==0 | comp4==0, 1, ind)
t1 = param1
t2 = param2*(t1)
t3 = param3*(t2)
t4 = param4*(t3)
t5 = param5*(t4)
myparam = c(t1, t2, t3, t4, t5)
return(myparam)
}
test = foreach(m=1:1000) %dopar% myfunc(m)
test2 = do.call("rbind", test)
write.csv(test2,"ipw_haz_j.csv")
stopCluster(cl)
|
\name{plot.tess}
\alias{plot.tess}
\title{Plot a Tessellation}
\description{
Plots a tessellation, with optional labels for the tiles, and optional
filled colour in each tile.
}
\usage{
\method{plot}{tess}(x, \dots, main, add=FALSE,
show.all=!add,
border=NULL,
do.plot=TRUE,
do.labels=FALSE,
labels=tilenames(x), labelargs=list(),
do.col=FALSE,
values=marks(x),
multiplot=TRUE,
col=NULL, ribargs=list())
}
\arguments{
\item{x}{Tessellation (object of class \code{"tess"}) to be plotted.}
\item{\dots}{Arguments controlling the appearance of the plot.}
\item{main}{Heading for the plot. A character string.}
\item{add}{Logical. Determines whether the tessellation plot is added
to the existing plot.
}
\item{show.all}{
Logical value indicating whether to plot everything
including the main title and the observation window of \code{x}.
}
\item{border}{
Colour of the tile boundaries. A character string or other value
specifying a single colour.
Ignored for pixel tessellations.
}
\item{do.plot}{
Logical value indicating whether to actually perform the plot.
}
\item{do.labels}{
Logical value indicating whether to show a text label for each tile
of the tessellation.
}
\item{labels}{Character vector of labels for the tiles.}
\item{labelargs}{
List of arguments passed to
\code{\link{text.default}} to control display of the text labels.
}
\item{do.col}{
Logical value indicating whether tiles should be filled with
colour. Always \code{TRUE} for pixel tessellations.
}
\item{values}{
A vector of numerical values (or a factor, or vector of character
strings) that will be associated with each tile of the tessellation
and which determine the colour of the tile. The default is
the marks of \code{x}. If the tessellation is not marked,
or if the argument \code{values=NULL} is given, the default is
a factor giving the tile identifier.
}
\item{multiplot}{
Logical value giving permission to display multiple plot panels.
This applies when \code{do.col=TRUE} and \code{ncol(values) > 1}.
}
\item{col}{
A vector of colours for each of the \code{values},
or a \code{\link{colourmap}} that maps these values to colours.
}
\item{ribargs}{
List of additional arguments to control the plot of the colour map,
if \code{do.col=TRUE}. See explanation in \code{\link{plot.im}}.
}
}
\details{
This is a method for the generic \code{\link{plot}} function
for the class \code{"tess"} of tessellations (see \code{\link{tess}}).
The window of the tessellation is plotted, and then the tiles of the
tessellation are plotted in their correct positions in the window.
Rectangular or polygonal tiles are plotted individually
using \code{\link{plot.owin}}, while a
tessellation represented by a pixel image
is plotted using \code{\link{plot.im}}.
The arguments \code{\dots} control the appearance of the plot,
and are passed to \code{\link{segments}},
\code{\link{plot.owin}} or \code{\link{plot.im}} as appropriate.
If \code{do.col=TRUE}, then the tiles of the tessellation are filled
with colours determined by the argument \code{values}.
By default, these values are the marks associated with each of the tiles.
If there is more than one column of marks or values,
then the default behaviour (if \code{multiplot=TRUE})
is to display several plot panels, one for
each column of mark values. Then the arguments \code{\dots} are
passed to \code{\link{plot.solist}} to determine the arrangement of
the panels.
}
\value{
(Invisible) window of class \code{"owin"} specifying a bounding box
for the plot, or an object of class \code{"colourmap"} specifying the
colour map. (In the latter case, the bounding box information is
available as an attribute, and can be extracted
using \code{\link{as.owin}}.)
}
\seealso{
\code{\link{tess}}
}
\examples{
Rect <- tess(xgrid=0:4,ygrid=0:4)
Diri <- dirichlet(runifpoint(7))
plot(Diri)
plot(Rect, border="blue", lwd=2, lty=2)
plot(Rect, do.col=TRUE, border="white")
plot(Rect, do.col=TRUE, values=runif(16), border="white")
B <- Rect[c(1, 2, 5, 7, 9)]
plot(B, hatch=TRUE)
plot(Diri, do.col=TRUE)
plot(Diri, do.col=TRUE, do.labels=TRUE, labelargs=list(col="white"),
ribbon=FALSE)
v <- as.im(function(x,y){factor(round(5 * (x^2 + y^2)))}, W=owin())
levels(v) <- letters[seq(length(levels(v)))]
Img <- tess(image=v)
plot(Img)
plot(Img, col=rainbow(11), ribargs=list(las=1))
a <- tile.areas(Diri)
marks(Diri) <- data.frame(area=a, random=runif(7, max=max(a)))
plot(Diri, do.col=TRUE, equal.ribbon=TRUE)
}
\author{
\spatstatAuthors.
}
\keyword{spatial}
\keyword{hplot}
| /man/plot.tess.Rd | no_license | rubak/spatstat | R | false | false | 4,946 | rd | \name{plot.tess}
\alias{plot.tess}
\title{Plot a Tessellation}
\description{
Plots a tessellation, with optional labels for the tiles, and optional
filled colour in each tile.
}
\usage{
\method{plot}{tess}(x, \dots, main, add=FALSE,
show.all=!add,
border=NULL,
do.plot=TRUE,
do.labels=FALSE,
labels=tilenames(x), labelargs=list(),
do.col=FALSE,
values=marks(x),
multiplot=TRUE,
col=NULL, ribargs=list())
}
\arguments{
\item{x}{Tessellation (object of class \code{"tess"}) to be plotted.}
\item{\dots}{Arguments controlling the appearance of the plot.}
\item{main}{Heading for the plot. A character string.}
\item{add}{Logical. Determines whether the tessellation plot is added
to the existing plot.
}
\item{show.all}{
Logical value indicating whether to plot everything
including the main title and the observation window of \code{x}.
}
\item{border}{
Colour of the tile boundaries. A character string or other value
specifying a single colour.
Ignored for pixel tessellations.
}
\item{do.plot}{
Logical value indicating whether to actually perform the plot.
}
\item{do.labels}{
Logical value indicating whether to show a text label for each tile
of the tessellation.
}
\item{labels}{Character vector of labels for the tiles.}
\item{labelargs}{
List of arguments passed to
\code{\link{text.default}} to control display of the text labels.
}
\item{do.col}{
Logical value indicating whether tiles should be filled with
colour. Always \code{TRUE} for pixel tessellations.
}
\item{values}{
A vector of numerical values (or a factor, or vector of character
strings) that will be associated with each tile of the tessellation
and which determine the colour of the tile. The default is
the marks of \code{x}. If the tessellation is not marked,
or if the argument \code{values=NULL} is given, the default is
a factor giving the tile identifier.
}
\item{multiplot}{
Logical value giving permission to display multiple plot panels.
This applies when \code{do.col=TRUE} and \code{ncol(values) > 1}.
}
\item{col}{
A vector of colours for each of the \code{values},
or a \code{\link{colourmap}} that maps these values to colours.
}
\item{ribargs}{
List of additional arguments to control the plot of the colour map,
if \code{do.col=TRUE}. See explanation in \code{\link{plot.im}}.
}
}
\details{
This is a method for the generic \code{\link{plot}} function
for the class \code{"tess"} of tessellations (see \code{\link{tess}}).
The window of the tessellation is plotted, and then the tiles of the
tessellation are plotted in their correct positions in the window.
Rectangular or polygonal tiles are plotted individually
using \code{\link{plot.owin}}, while a
tessellation represented by a pixel image
is plotted using \code{\link{plot.im}}.
The arguments \code{\dots} control the appearance of the plot,
and are passed to \code{\link{segments}},
\code{\link{plot.owin}} or \code{\link{plot.im}} as appropriate.
If \code{do.col=TRUE}, then the tiles of the tessellation are filled
with colours determined by the argument \code{values}.
By default, these values are the marks associated with each of the tiles.
If there is more than one column of marks or values,
then the default behaviour (if \code{multiplot=TRUE})
is to display several plot panels, one for
each column of mark values. Then the arguments \code{\dots} are
passed to \code{\link{plot.solist}} to determine the arrangement of
the panels.
}
\value{
(Invisible) window of class \code{"owin"} specifying a bounding box
for the plot, or an object of class \code{"colourmap"} specifying the
colour map. (In the latter case, the bounding box information is
available as an attribute, and can be extracted
using \code{\link{as.owin}}.)
}
\seealso{
\code{\link{tess}}
}
\examples{
Rect <- tess(xgrid=0:4,ygrid=0:4)
Diri <- dirichlet(runifpoint(7))
plot(Diri)
plot(Rect, border="blue", lwd=2, lty=2)
plot(Rect, do.col=TRUE, border="white")
plot(Rect, do.col=TRUE, values=runif(16), border="white")
B <- Rect[c(1, 2, 5, 7, 9)]
plot(B, hatch=TRUE)
plot(Diri, do.col=TRUE)
plot(Diri, do.col=TRUE, do.labels=TRUE, labelargs=list(col="white"),
ribbon=FALSE)
v <- as.im(function(x,y){factor(round(5 * (x^2 + y^2)))}, W=owin())
levels(v) <- letters[seq(length(levels(v)))]
Img <- tess(image=v)
plot(Img)
plot(Img, col=rainbow(11), ribargs=list(las=1))
a <- tile.areas(Diri)
marks(Diri) <- data.frame(area=a, random=runif(7, max=max(a)))
plot(Diri, do.col=TRUE, equal.ribbon=TRUE)
}
\author{
\spatstatAuthors.
}
\keyword{spatial}
\keyword{hplot}
|
#install.packages("RPostgreSQL")
require("RPostgreSQL")
getDB <- function() {
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, dbname = "yteduphong",
host = "localhost", port = 5432,
user = "postgres", pass = "PG@2014")
}
getData <- function() {
con <<- getDB()
df_postgres <- dbGetQuery(con, "select sm.giatri as SOMAC, mdmc.giatri as DI, ncmc.giatri as HIM, dclq.giatri as BI, nclq.giatri as HIL, tlnlq.giatri as CI, tt.giatri as AD, m.giatri as M, nd.giatri as ND, sm.thang, sm.nam, sm.diemdo, dp.name as HUYEN from ytdp_somacchet sm, ytdp_matdomuoicai mdmc, ytdp_nhacomuoicai ncmc, ytdp_dungculangquang dclq, ytdp_nhacolangquang nclq, ytdp_tilenuoclangquang tlnlq, ytdp_thoitiet tt, ytdp_mua m, ytdp_nhietdo nd, ytdp_diaphuong dp where sm.thang = mdmc.thang AND sm.nam = mdmc.nam AND sm.diemdo = mdmc.diemdo AND sm.thang = ncmc.thang AND sm.nam = ncmc.nam AND sm.diemdo = ncmc.diemdo AND sm.thang = dclq.thang AND sm.nam = dclq.nam AND sm.diemdo = dclq.diemdo AND sm.thang = nclq.thang AND sm.nam = nclq.nam AND sm.diemdo = nclq.diemdo AND sm.thang = tlnlq.thang AND sm.nam = tlnlq.nam AND sm.diemdo = tlnlq.diemdo AND sm.thang = tt.thang AND sm.nam = tt.nam AND sm.diemdo = tt.diemdo AND sm.thang = m.thang AND sm.nam = m.nam AND sm.diemdo = m.diemdo AND sm.thang = nd.thang AND sm.nam = nd.nam AND sm.diemdo = nd.diemdo AND sm.diemdo = dp.id AND sm.name = 'SM' AND mdmc.name = 'DI' AND ncmc.name = 'HIM' AND dclq.name = 'BI' AND nclq.name = 'HIL' AND tlnlq.name = 'CI' AND tt.name = 'AD' AND m.name = 'M' AND nd.name = 'ND' order by sm.diemdo, sm.nam desc, sm.thang desc")
return(df_postgres)
}
init <- function() {
df_postgres <<- getData()
#convert NA to 0
df_postgres[is.na(df_postgres)] <<- 0
thang <<- unique(df_postgres$thang)
nam <<- unique(df_postgres$nam)
diemdo <<- unique(df_postgres$diemdo)
number_of_gap_years <<- 1
predictDataForNextYear()
}
saveOrUpdate <- function(existsSQL, insertSQL, updateSQL) {
#check if's existed
isExisted <- dbGetQuery(con, existsSQL)
if(isExisted == "FALSE"){
print("going to INSERT")
#not existed then INSERT NEW record
rs <- dbSendQuery(con, insertSQL)
dbClearResult(rs)
} else {
#if it's existed then UPDATE NEW value
print("going to UPDATE")
rs <- dbSendQuery(con, updateSQL)
dbClearResult(rs)
}
}
getMeanError <- function(model_type, diemdo){
calculateMeanErrorSQL <- sprintf("select abs(p.sm_du_bao - s.giatri) as saiso, p.thang, p.nam from ytdp_prediction p, ytdp_somacchet s where p.phuong_phap = '%s' and p.diemdo = %d and s.diemdo = p.diemdo and s.name = 'SM' and p.nam=s.nam and p.thang = s.thang order by p.diemdo, p.nam, p.thang", model_type, diemdo)
meanError <- dbGetQuery(con, calculateMeanErrorSQL)
return(meanError)
}
getSumMeanError <- function(model_type){
calculateMeanErrorSQL <- sprintf("select sum(abs(p.sm_du_bao - s.giatri)) as tongsaiso, p.diemdo from ytdp_prediction p, ytdp_somacchet s where p.phuong_phap = '%s' and p.diemdo BETWEEN 11 and 20 and s.diemdo = p.diemdo and s.name = 'SM' and p.nam=s.nam and p.thang = s.thang group by p.diemdo", model_type)
meanError <- dbGetQuery(con, calculateMeanErrorSQL)
return(meanError)
}
saveOrUpdatePrediction <- function(predict_month, predict_year, location, model_type, predict) {
existsSQL <- sprintf("SELECT EXISTS(SELECT 1 FROM ytdp_prediction WHERE thang = %d AND nam = %d AND diemdo = %d AND phuong_phap = '%s')", predict_month, predict_year, location, model_type)
insertSQL <- sprintf("INSERT INTO ytdp_prediction (thang, nam, diemdo, phuong_phap, sm_du_bao) VALUES (%d, %d, %d, '%s', %e);", predict_month, predict_year, location, model_type, predict)
updateSQL <- sprintf("UPDATE ytdp_prediction SET sm_du_bao = %e WHERE thang = %d AND nam = %d AND diemdo = %d AND phuong_phap = '%s';", predict, predict_month, predict_year, location, model_type)
#save or update predicted values
saveOrUpdate(existsSQL, insertSQL, updateSQL)
}
saveOrUpdateRegressionModel <- function(factor1_name, factor2_name, location, model, model_type) {
existsSQL <- sprintf("SELECT EXISTS(SELECT 1 FROM ytdp_regression WHERE nhan_to_1 = '%s' AND nhan_to_2 = '%s' AND diemdo = %d AND phuong_phap = '%s')", factor1_name, factor2_name, location, model_type)
insertSQL <- sprintf("INSERT INTO ytdp_regression (nhan_to_1, nhan_to_2, diemdo, mo_hinh, phuong_phap) VALUES ('%s', '%s', %d, '%s', '%s');", factor1_name, factor2_name, location, model, model_type)
updateSQL <- sprintf("UPDATE ytdp_regression SET mo_hinh = '%s' WHERE nhan_to_1 = '%s' AND nhan_to_2 = '%s' AND diemdo = %d AND phuong_phap = '%s';", model, factor1_name, factor2_name, location, model_type)
#save or update prediction model
saveOrUpdate(existsSQL, insertSQL, updateSQL)
}
predictDataForNextYear <- function() {
# predict values and add to df_predict
for (diem in diemdo) {
#subset theo diem do, start o thang dau tien nam dau tien
sub_df_diem <- subset(df_postgres, diemdo == diem)
sub_df_diem <- sub_df_diem[nrow(sub_df_diem):1,]
first_row <- head(sub_df_diem, n=1)
first_month <- first_row$thang
first_year <- first_row$nam
last_row <- tail(sub_df_diem, n=12)
months <- last_row$thang
years <- last_row$nam
concat_df <- last_row
concat_df$nam <- concat_df$nam + 1
# tim tat ca cac name, predict tat ca tru somac, diemdo, thang, nam, huyen
sub_df_diem <- subset(sub_df_diem, select=-c(somac,diemdo,thang,nam,huyen))
for(name in names(sub_df_diem)) {
a <- ts(sub_df_diem[,c(name)],frequency=12,start=c(first_year,first_month))
#chua ro input dau vao
fit <- arima(a, order=c(1,0,0), list(order=c(2,1,0), period=12), method="CSS")
#predict
fore <- predict(fit, n.ahead=12)
concat_df[[name]] <- as.numeric(fore$pred)
#print predict data and save to db
#for(i in 1:length(fore$pred)){
#get month and year for prediction
# predict_month <- months[i]
# predict_year <- years[i] + 1
# predict <- fore$pred[i]
# if(is.na(predict)||predict < 0){
# predict <- 0
# }
# print(paste0("Predict value of ", predict_month, "/", predict_year, " is ", predict))
# table_names <- list("di"="ytdp_matdomuoicai", "him"="ytdp_nhacomuoicai", "bi"="ytdp_dungculangquang", "hil"="ytdp_nhacolangquang", "ci"="ytdp_tilenuoclangquang", "ad"="ytdp_thoitiet", "m"="ytdp_mua", "nd"="ytdp_nhietdo")
# existsSQL <- sprintf("SELECT EXISTS(SELECT 1 FROM %s WHERE thang = %d AND nam = %d AND diemdo = %d)", table_names[[name]], predict_month, predict_year, diem)
# insertSQL <- sprintf("INSERT INTO %s (thang, nam, diemdo, giatri, name) VALUES (%d, %d, %d, %e, '%s');", table_names[[name]], predict_month, predict_year, diem, predict, toupper(name))
# updateSQL <- sprintf("UPDATE %s SET giatri = %e WHERE thang = %d AND nam = %d AND diemdo = %d;", table_names[[name]], predict, predict_month, predict_year, diem)
#save or update predicted values
# saveOrUpdate(existsSQL, insertSQL, updateSQL)
#}
}
# merge df
if (!exists("df_predict")) {
df_predict <- concat_df
} else {
df_predict <- merge(df_predict, concat_df, all=TRUE)
}
}
#sort df
df_predict <<- df_predict[order(df_predict$diemdo,df_predict$thang),]
}
init() | /db.R | no_license | thachphat/dengue_prediction | R | false | false | 7,245 | r | #install.packages("RPostgreSQL")
require("RPostgreSQL")
getDB <- function() {
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, dbname = "yteduphong",
host = "localhost", port = 5432,
user = "postgres", pass = "PG@2014")
}
getData <- function() {
con <<- getDB()
df_postgres <- dbGetQuery(con, "select sm.giatri as SOMAC, mdmc.giatri as DI, ncmc.giatri as HIM, dclq.giatri as BI, nclq.giatri as HIL, tlnlq.giatri as CI, tt.giatri as AD, m.giatri as M, nd.giatri as ND, sm.thang, sm.nam, sm.diemdo, dp.name as HUYEN from ytdp_somacchet sm, ytdp_matdomuoicai mdmc, ytdp_nhacomuoicai ncmc, ytdp_dungculangquang dclq, ytdp_nhacolangquang nclq, ytdp_tilenuoclangquang tlnlq, ytdp_thoitiet tt, ytdp_mua m, ytdp_nhietdo nd, ytdp_diaphuong dp where sm.thang = mdmc.thang AND sm.nam = mdmc.nam AND sm.diemdo = mdmc.diemdo AND sm.thang = ncmc.thang AND sm.nam = ncmc.nam AND sm.diemdo = ncmc.diemdo AND sm.thang = dclq.thang AND sm.nam = dclq.nam AND sm.diemdo = dclq.diemdo AND sm.thang = nclq.thang AND sm.nam = nclq.nam AND sm.diemdo = nclq.diemdo AND sm.thang = tlnlq.thang AND sm.nam = tlnlq.nam AND sm.diemdo = tlnlq.diemdo AND sm.thang = tt.thang AND sm.nam = tt.nam AND sm.diemdo = tt.diemdo AND sm.thang = m.thang AND sm.nam = m.nam AND sm.diemdo = m.diemdo AND sm.thang = nd.thang AND sm.nam = nd.nam AND sm.diemdo = nd.diemdo AND sm.diemdo = dp.id AND sm.name = 'SM' AND mdmc.name = 'DI' AND ncmc.name = 'HIM' AND dclq.name = 'BI' AND nclq.name = 'HIL' AND tlnlq.name = 'CI' AND tt.name = 'AD' AND m.name = 'M' AND nd.name = 'ND' order by sm.diemdo, sm.nam desc, sm.thang desc")
return(df_postgres)
}
init <- function() {
df_postgres <<- getData()
#convert NA to 0
df_postgres[is.na(df_postgres)] <<- 0
thang <<- unique(df_postgres$thang)
nam <<- unique(df_postgres$nam)
diemdo <<- unique(df_postgres$diemdo)
number_of_gap_years <<- 1
predictDataForNextYear()
}
saveOrUpdate <- function(existsSQL, insertSQL, updateSQL) {
#check if's existed
isExisted <- dbGetQuery(con, existsSQL)
if(isExisted == "FALSE"){
print("going to INSERT")
#not existed then INSERT NEW record
rs <- dbSendQuery(con, insertSQL)
dbClearResult(rs)
} else {
#if it's existed then UPDATE NEW value
print("going to UPDATE")
rs <- dbSendQuery(con, updateSQL)
dbClearResult(rs)
}
}
getMeanError <- function(model_type, diemdo){
calculateMeanErrorSQL <- sprintf("select abs(p.sm_du_bao - s.giatri) as saiso, p.thang, p.nam from ytdp_prediction p, ytdp_somacchet s where p.phuong_phap = '%s' and p.diemdo = %d and s.diemdo = p.diemdo and s.name = 'SM' and p.nam=s.nam and p.thang = s.thang order by p.diemdo, p.nam, p.thang", model_type, diemdo)
meanError <- dbGetQuery(con, calculateMeanErrorSQL)
return(meanError)
}
getSumMeanError <- function(model_type){
calculateMeanErrorSQL <- sprintf("select sum(abs(p.sm_du_bao - s.giatri)) as tongsaiso, p.diemdo from ytdp_prediction p, ytdp_somacchet s where p.phuong_phap = '%s' and p.diemdo BETWEEN 11 and 20 and s.diemdo = p.diemdo and s.name = 'SM' and p.nam=s.nam and p.thang = s.thang group by p.diemdo", model_type)
meanError <- dbGetQuery(con, calculateMeanErrorSQL)
return(meanError)
}
saveOrUpdatePrediction <- function(predict_month, predict_year, location, model_type, predict) {
existsSQL <- sprintf("SELECT EXISTS(SELECT 1 FROM ytdp_prediction WHERE thang = %d AND nam = %d AND diemdo = %d AND phuong_phap = '%s')", predict_month, predict_year, location, model_type)
insertSQL <- sprintf("INSERT INTO ytdp_prediction (thang, nam, diemdo, phuong_phap, sm_du_bao) VALUES (%d, %d, %d, '%s', %e);", predict_month, predict_year, location, model_type, predict)
updateSQL <- sprintf("UPDATE ytdp_prediction SET sm_du_bao = %e WHERE thang = %d AND nam = %d AND diemdo = %d AND phuong_phap = '%s';", predict, predict_month, predict_year, location, model_type)
#save or update predicted values
saveOrUpdate(existsSQL, insertSQL, updateSQL)
}
saveOrUpdateRegressionModel <- function(factor1_name, factor2_name, location, model, model_type) {
existsSQL <- sprintf("SELECT EXISTS(SELECT 1 FROM ytdp_regression WHERE nhan_to_1 = '%s' AND nhan_to_2 = '%s' AND diemdo = %d AND phuong_phap = '%s')", factor1_name, factor2_name, location, model_type)
insertSQL <- sprintf("INSERT INTO ytdp_regression (nhan_to_1, nhan_to_2, diemdo, mo_hinh, phuong_phap) VALUES ('%s', '%s', %d, '%s', '%s');", factor1_name, factor2_name, location, model, model_type)
updateSQL <- sprintf("UPDATE ytdp_regression SET mo_hinh = '%s' WHERE nhan_to_1 = '%s' AND nhan_to_2 = '%s' AND diemdo = %d AND phuong_phap = '%s';", model, factor1_name, factor2_name, location, model_type)
#save or update prediction model
saveOrUpdate(existsSQL, insertSQL, updateSQL)
}
predictDataForNextYear <- function() {
# predict values and add to df_predict
for (diem in diemdo) {
#subset theo diem do, start o thang dau tien nam dau tien
sub_df_diem <- subset(df_postgres, diemdo == diem)
sub_df_diem <- sub_df_diem[nrow(sub_df_diem):1,]
first_row <- head(sub_df_diem, n=1)
first_month <- first_row$thang
first_year <- first_row$nam
last_row <- tail(sub_df_diem, n=12)
months <- last_row$thang
years <- last_row$nam
concat_df <- last_row
concat_df$nam <- concat_df$nam + 1
# tim tat ca cac name, predict tat ca tru somac, diemdo, thang, nam, huyen
sub_df_diem <- subset(sub_df_diem, select=-c(somac,diemdo,thang,nam,huyen))
for(name in names(sub_df_diem)) {
a <- ts(sub_df_diem[,c(name)],frequency=12,start=c(first_year,first_month))
#chua ro input dau vao
fit <- arima(a, order=c(1,0,0), list(order=c(2,1,0), period=12), method="CSS")
#predict
fore <- predict(fit, n.ahead=12)
concat_df[[name]] <- as.numeric(fore$pred)
#print predict data and save to db
#for(i in 1:length(fore$pred)){
#get month and year for prediction
# predict_month <- months[i]
# predict_year <- years[i] + 1
# predict <- fore$pred[i]
# if(is.na(predict)||predict < 0){
# predict <- 0
# }
# print(paste0("Predict value of ", predict_month, "/", predict_year, " is ", predict))
# table_names <- list("di"="ytdp_matdomuoicai", "him"="ytdp_nhacomuoicai", "bi"="ytdp_dungculangquang", "hil"="ytdp_nhacolangquang", "ci"="ytdp_tilenuoclangquang", "ad"="ytdp_thoitiet", "m"="ytdp_mua", "nd"="ytdp_nhietdo")
# existsSQL <- sprintf("SELECT EXISTS(SELECT 1 FROM %s WHERE thang = %d AND nam = %d AND diemdo = %d)", table_names[[name]], predict_month, predict_year, diem)
# insertSQL <- sprintf("INSERT INTO %s (thang, nam, diemdo, giatri, name) VALUES (%d, %d, %d, %e, '%s');", table_names[[name]], predict_month, predict_year, diem, predict, toupper(name))
# updateSQL <- sprintf("UPDATE %s SET giatri = %e WHERE thang = %d AND nam = %d AND diemdo = %d;", table_names[[name]], predict, predict_month, predict_year, diem)
#save or update predicted values
# saveOrUpdate(existsSQL, insertSQL, updateSQL)
#}
}
# merge df
if (!exists("df_predict")) {
df_predict <- concat_df
} else {
df_predict <- merge(df_predict, concat_df, all=TRUE)
}
}
#sort df
df_predict <<- df_predict[order(df_predict$diemdo,df_predict$thang),]
}
init() |
## From Yangying
## 2017.7.6 li learn
rm(list=ls(all=TRUE))
setwd('D:/文件存放/galaxy pipeline/galaxy/自己整理代码/种面积关系')
##setwd('C:/Users/dell/Desktop/article/Beijing-Tibet-Changsha/ITS 236-270/TAR')
rt <- read.table("B T C TAR.txt",sep="\t",head=TRUE);
#rt
row.names(rt)<-rt$OTUID
###可以加以改进,写成循环或函数。
##b0 = c
##for (i in 6:22){
j=paste("B0",1:5,sep="")
k=paste("sb0",1:5,sep="")
j[1]
j[1]<- rt[,2:i]
j[1]<- data.frame(rowSums(j[1]))
j[1][j[1]>0,]<- 1
k[1]<- colSums(j[1])
k[1]
}
## i=i+4
##函数
statistics = function(x) {
b01s<- data.frame(rowSums(x))
b01s[b01s>0,]<- 1
sb01<- colSums(b01s)
sb01
}
b01<- rt[,2:6]
sb01 = statistics(b01);sb01
#B0 ###统计面积不断扩大的物种数量
b01<- rt[,2:6]#选取OTU table???2???6列赋予b01
b01s<- data.frame(rowSums(b01))# 对b01进行行求???
b01s[b01s$rowSums.b01.>0,]<- 1
sb01<- colSums(b01s)
b02<- rt[,2:10]
b02s<- data.frame(rowSums(b02))
b02s[b02s$rowSums.b02.>0,]<- 1
sb02<- colSums(b02s)
b03<- rt[,2:14]
b03s<- data.frame(rowSums(b03))
b03s[b03s$rowSums.b03.>0,]<- 1
sb03<- colSums(b03s)
b04<- rt[,2:18]
b04s<- data.frame(rowSums(b04))
b04s[b04s$rowSums.b04.>0,]<- 1
sb04<- colSums(b04s)
b05<- rt[,2:22]
b05s<- data.frame(rowSums(b05))
b05s[b05s$rowSums.b05.>0,]<- 1
sb05<- colSums(b05s)
B0<- c(sb01,sb02,sb03,sb04,sb05)
#B1
b11<- rt[,23:27]
b11s<- data.frame(rowSums(b11))
b11s[b11s$rowSums.b11.>0,]<- 1
sb11<- colSums(b11s)
b12<- rt[,23:31]
b12s<- data.frame(rowSums(b12))
b12s[b12s$rowSums.b12.>0,]<- 1
sb12<- colSums(b12s)
b13<- rt[,23:35]
b13s<- data.frame(rowSums(b13))
b13s[b13s$rowSums.b13.>0,]<- 1
sb13<- colSums(b13s)
b14<- rt[,23:39]
b14s<- data.frame(rowSums(b14))
b14s[b14s$rowSums.b14.>0,]<- 1
sb14<- colSums(b14s)
b15<- rt[,23:43]
b15s<- data.frame(rowSums(b15))
b15s[b15s$rowSums.b15.>0,]<- 1
sb15<- colSums(b15s)
B1<- c(sb11,sb12,sb13,sb14,sb15)
#B2
b21<- rt[,44:48]
b21s<- data.frame(rowSums(b21))
b21s[b21s$rowSums.b21.>0,]<- 1
sb21<- colSums(b21s)
b22<- rt[,44:52]
b22s<- data.frame(rowSums(b22))
b22s[b22s$rowSums.b22.>0,]<- 1
sb22<- colSums(b22s)
b23<- rt[,44:56]
b23s<- data.frame(rowSums(b23))
b23s[b23s$rowSums.b23.>0,]<- 1
sb23<- colSums(b23s)
b24<- rt[,44:60]
b24s<- data.frame(rowSums(b24))
b24s[b24s$rowSums.b24.>0,]<- 1
sb24<- colSums(b24s)
b25<- rt[,44:64]
b25s<- data.frame(rowSums(b25))
b25s[b25s$rowSums.b25.>0,]<- 1
sb25<- colSums(b25s)
B2<- c(sb21,sb22,sb23,sb24,sb25)
#B3
b31<- rt[,65:69]
b31s<- data.frame(rowSums(b31))
b31s[b31s$rowSums.b31.>0,]<- 1
sb31<- colSums(b31s)
b32<- rt[,65:73]
b32s<- data.frame(rowSums(b32))
b32s[b32s$rowSums.b32.>0,]<- 1
sb32<- colSums(b32s)
b33<- rt[,65:77]
b33s<- data.frame(rowSums(b33))
b33s[b33s$rowSums.b33.>0,]<- 1
sb33<- colSums(b33s)
b34<- rt[,65:81]
b34s<- data.frame(rowSums(b34))
b34s[b34s$rowSums.b34.>0,]<- 1
sb34<- colSums(b34s)
b35<- rt[,65:85]
b35s<- data.frame(rowSums(b35))
b35s[b35s$rowSums.b35.>0,]<- 1
sb35<- colSums(b35s)
B3<- c(sb31,sb32,sb33,sb34,sb35)
#T0
t01<- rt[,86:90]
t01s<- data.frame(rowSums(t01))
t01s[t01s$rowSums.t01.>0,]<- 1
st01<- colSums(t01s)
t02<- rt[,86:94]
t02s<- data.frame(rowSums(t02))
t02s[t02s$rowSums.t02.>0,]<- 1
st02<- colSums(t02s)
t03<- rt[,86:98]
t03s<- data.frame(rowSums(t03))
t03s[t03s$rowSums.t03.>0,]<- 1
st03<- colSums(t03s)
t04<- rt[,86:102]
t04s<- data.frame(rowSums(t04))
t04s[t04s$rowSums.t04.>0,]<- 1
st04<- colSums(t04s)
t05<- rt[,86:106]
t05s<- data.frame(rowSums(t05))
t05s[t05s$rowSums.t05.>0,]<- 1
st05<- colSums(t05s)
t06<- rt[,86:110]
t06s<- data.frame(rowSums(t06))
t06s[t06s$rowSums.t06.>0,]<- 1
st06<- colSums(t06s)
T0<- c(st01,st02,st03,st04,st05,st06)
#T1
t11<- rt[,111:115]
t11s<- data.frame(rowSums(t11))
t11s[t11s$rowSums.t11.>0,]<- 1
st11<- colSums(t11s)
t12<- rt[,111:119]
t12s<- data.frame(rowSums(t12))
t12s[t12s$rowSums.t12.>0,]<- 1
st12<- colSums(t12s)
t13<- rt[,111:123]
t13s<- data.frame(rowSums(t13))
t13s[t13s$rowSums.t13.>0,]<- 1
st13<- colSums(t13s)
t14<- rt[,111:127]
t14s<- data.frame(rowSums(t14))
t14s[t14s$rowSums.t14.>0,]<- 1
st14<- colSums(t14s)
t15<- rt[,111:131]
t15s<- data.frame(rowSums(t15))
t15s[t15s$rowSums.t15.>0,]<- 1
st15<- colSums(t15s)
t16<- rt[,111:135]
t16s<- data.frame(rowSums(t16))
t16s[t16s$rowSums.t16.>0,]<- 1
st16<- colSums(t16s)
T1<- c(st11,st12,st13,st14,st15,st16)
#CS
cs1<- rt[,136:140]
cs1s<- data.frame(rowSums(cs1))
cs1s[cs1s$rowSums.cs1.>0,]<- 1
scs1<- colSums(cs1s)
cs2<- rt[,136:144]
cs2s<- data.frame(rowSums(cs2))
cs2s[cs2s$rowSums.cs2.>0,]<- 1
scs2<- colSums(cs2s)
cs3<- rt[,136:148]
cs3s<- data.frame(rowSums(cs3))
cs3s[cs3s$rowSums.cs3.>0,]<- 1
scs3<- colSums(cs3s)
cs4<- rt[,136:152]
cs4s<- data.frame(rowSums(cs4))
cs4s[cs4s$rowSums.cs4.>0,]<- 1
scs4<- colSums(cs4s)
cs5<- rt[,136:156]
cs5s<- data.frame(rowSums(cs5))
cs5s[cs5s$rowSums.cs5.>0,]<- 1
scs5<- colSums(cs5s)
CS<-c(scs1,scs2,scs3,scs4,scs5)
X<-rbind(B0,B1,B2,B3,T0,T1,CS)
X
##如何画出来,li added
area1 <- read.table("phylum TAR/area1.txt",sep="\t",head=TRUE);
area1
B0<- c(sb01,sb02,sb03,sb04,sb05)
B1<- c(sb11,sb12,sb13,sb14,sb15)
B2<- c(sb21,sb22,sb23,sb24,sb25)
B3<- c(sb31,sb32,sb33,sb34,sb35)
T0<- c(st01,st02,st03,st04,st05,st06)
T1<- c(st11,st12,st13,st14,st15,st16)
CS<-c(scs1,scs2,scs3,scs4,scs5)
x=log(area1$area)
yB0=log(B0);yB1=log(B1);yB2=log(B2);yB3=log(B3)
##par(new=TURE) ##两次plot图形叠加在一起。两次的图形应该设置横纵坐标一致。
##par(mfrow = c(2,2), font.lab = 6, font.main = 6,font.axis = 6, font = 6) ## 分割成2行2列,共四个图
plot(x,yB0,type="p")
abline(lm(yB0~x),col=2) ##abline添加给定斜率的线
yB0.reg <- lm(yB0 ~ x) ##lm线性模型,yB0 ~ x 前为纵轴,后为横轴
slopeb0 = yB0.reg$coefficients[2]
slopeb0
corb0 = cor.test(yB0, x)
corb0
p=corb0$p.value ;p ##p
r2=corb0$est ;r2 ##cor
legend.label = c(p,r2)
legend(yB0~x,legend = legend.label)
##曲线拟合(局部回归)
##lowess(x,y,f=2/3,iter = 3)
##f为窗宽参数,越大越平滑
##iter为迭代次数,越大计算越慢
##loess(y~x, data,span=0.75,degree=2)
##data为包含x,y的数据集;span为窗宽参数,degree默认为二次回归。
##sample
lines(lowess(x,yB0)) ####lowess做回归曲线
lines(x,predict(loess(yB0~x))) ####lowess做回归曲线,predict是取回归预测值
lowess(x,yB0,f=50,iter = 10);plot(x,y)
loess(yB0~x,span=0.75,degree=2);plot(yB0~x)
##通过lines一条一条加。
plot(x,yB0,type="p")
lines(x,yB1,type="p")
lines(x,yB2,type="p")
...
| /分析及画图/3. TAR-DDR/种面积关系/TAR.R | no_license | mrzhangqjankun/R-code-for-myself | R | false | false | 6,847 | r | ## From Yangying
## 2017.7.6 li learn
rm(list=ls(all=TRUE))
setwd('D:/文件存放/galaxy pipeline/galaxy/自己整理代码/种面积关系')
##setwd('C:/Users/dell/Desktop/article/Beijing-Tibet-Changsha/ITS 236-270/TAR')
rt <- read.table("B T C TAR.txt",sep="\t",head=TRUE);
#rt
row.names(rt)<-rt$OTUID
###可以加以改进,写成循环或函数。
##b0 = c
##for (i in 6:22){
j=paste("B0",1:5,sep="")
k=paste("sb0",1:5,sep="")
j[1]
j[1]<- rt[,2:i]
j[1]<- data.frame(rowSums(j[1]))
j[1][j[1]>0,]<- 1
k[1]<- colSums(j[1])
k[1]
}
## i=i+4
##函数
statistics = function(x) {
b01s<- data.frame(rowSums(x))
b01s[b01s>0,]<- 1
sb01<- colSums(b01s)
sb01
}
b01<- rt[,2:6]
sb01 = statistics(b01);sb01
#B0 ###统计面积不断扩大的物种数量
b01<- rt[,2:6]#选取OTU table???2???6列赋予b01
b01s<- data.frame(rowSums(b01))# 对b01进行行求???
b01s[b01s$rowSums.b01.>0,]<- 1
sb01<- colSums(b01s)
b02<- rt[,2:10]
b02s<- data.frame(rowSums(b02))
b02s[b02s$rowSums.b02.>0,]<- 1
sb02<- colSums(b02s)
b03<- rt[,2:14]
b03s<- data.frame(rowSums(b03))
b03s[b03s$rowSums.b03.>0,]<- 1
sb03<- colSums(b03s)
b04<- rt[,2:18]
b04s<- data.frame(rowSums(b04))
b04s[b04s$rowSums.b04.>0,]<- 1
sb04<- colSums(b04s)
b05<- rt[,2:22]
b05s<- data.frame(rowSums(b05))
b05s[b05s$rowSums.b05.>0,]<- 1
sb05<- colSums(b05s)
B0<- c(sb01,sb02,sb03,sb04,sb05)
#B1
b11<- rt[,23:27]
b11s<- data.frame(rowSums(b11))
b11s[b11s$rowSums.b11.>0,]<- 1
sb11<- colSums(b11s)
b12<- rt[,23:31]
b12s<- data.frame(rowSums(b12))
b12s[b12s$rowSums.b12.>0,]<- 1
sb12<- colSums(b12s)
b13<- rt[,23:35]
b13s<- data.frame(rowSums(b13))
b13s[b13s$rowSums.b13.>0,]<- 1
sb13<- colSums(b13s)
b14<- rt[,23:39]
b14s<- data.frame(rowSums(b14))
b14s[b14s$rowSums.b14.>0,]<- 1
sb14<- colSums(b14s)
b15<- rt[,23:43]
b15s<- data.frame(rowSums(b15))
b15s[b15s$rowSums.b15.>0,]<- 1
sb15<- colSums(b15s)
B1<- c(sb11,sb12,sb13,sb14,sb15)
#B2
b21<- rt[,44:48]
b21s<- data.frame(rowSums(b21))
b21s[b21s$rowSums.b21.>0,]<- 1
sb21<- colSums(b21s)
b22<- rt[,44:52]
b22s<- data.frame(rowSums(b22))
b22s[b22s$rowSums.b22.>0,]<- 1
sb22<- colSums(b22s)
b23<- rt[,44:56]
b23s<- data.frame(rowSums(b23))
b23s[b23s$rowSums.b23.>0,]<- 1
sb23<- colSums(b23s)
b24<- rt[,44:60]
b24s<- data.frame(rowSums(b24))
b24s[b24s$rowSums.b24.>0,]<- 1
sb24<- colSums(b24s)
b25<- rt[,44:64]
b25s<- data.frame(rowSums(b25))
b25s[b25s$rowSums.b25.>0,]<- 1
sb25<- colSums(b25s)
B2<- c(sb21,sb22,sb23,sb24,sb25)
#B3
b31<- rt[,65:69]
b31s<- data.frame(rowSums(b31))
b31s[b31s$rowSums.b31.>0,]<- 1
sb31<- colSums(b31s)
b32<- rt[,65:73]
b32s<- data.frame(rowSums(b32))
b32s[b32s$rowSums.b32.>0,]<- 1
sb32<- colSums(b32s)
b33<- rt[,65:77]
b33s<- data.frame(rowSums(b33))
b33s[b33s$rowSums.b33.>0,]<- 1
sb33<- colSums(b33s)
b34<- rt[,65:81]
b34s<- data.frame(rowSums(b34))
b34s[b34s$rowSums.b34.>0,]<- 1
sb34<- colSums(b34s)
b35<- rt[,65:85]
b35s<- data.frame(rowSums(b35))
b35s[b35s$rowSums.b35.>0,]<- 1
sb35<- colSums(b35s)
B3<- c(sb31,sb32,sb33,sb34,sb35)
#T0
t01<- rt[,86:90]
t01s<- data.frame(rowSums(t01))
t01s[t01s$rowSums.t01.>0,]<- 1
st01<- colSums(t01s)
t02<- rt[,86:94]
t02s<- data.frame(rowSums(t02))
t02s[t02s$rowSums.t02.>0,]<- 1
st02<- colSums(t02s)
t03<- rt[,86:98]
t03s<- data.frame(rowSums(t03))
t03s[t03s$rowSums.t03.>0,]<- 1
st03<- colSums(t03s)
t04<- rt[,86:102]
t04s<- data.frame(rowSums(t04))
t04s[t04s$rowSums.t04.>0,]<- 1
st04<- colSums(t04s)
t05<- rt[,86:106]
t05s<- data.frame(rowSums(t05))
t05s[t05s$rowSums.t05.>0,]<- 1
st05<- colSums(t05s)
t06<- rt[,86:110]
t06s<- data.frame(rowSums(t06))
t06s[t06s$rowSums.t06.>0,]<- 1
st06<- colSums(t06s)
T0<- c(st01,st02,st03,st04,st05,st06)
#T1
t11<- rt[,111:115]
t11s<- data.frame(rowSums(t11))
t11s[t11s$rowSums.t11.>0,]<- 1
st11<- colSums(t11s)
t12<- rt[,111:119]
t12s<- data.frame(rowSums(t12))
t12s[t12s$rowSums.t12.>0,]<- 1
st12<- colSums(t12s)
t13<- rt[,111:123]
t13s<- data.frame(rowSums(t13))
t13s[t13s$rowSums.t13.>0,]<- 1
st13<- colSums(t13s)
t14<- rt[,111:127]
t14s<- data.frame(rowSums(t14))
t14s[t14s$rowSums.t14.>0,]<- 1
st14<- colSums(t14s)
t15<- rt[,111:131]
t15s<- data.frame(rowSums(t15))
t15s[t15s$rowSums.t15.>0,]<- 1
st15<- colSums(t15s)
t16<- rt[,111:135]
t16s<- data.frame(rowSums(t16))
t16s[t16s$rowSums.t16.>0,]<- 1
st16<- colSums(t16s)
T1<- c(st11,st12,st13,st14,st15,st16)
#CS
cs1<- rt[,136:140]
cs1s<- data.frame(rowSums(cs1))
cs1s[cs1s$rowSums.cs1.>0,]<- 1
scs1<- colSums(cs1s)
cs2<- rt[,136:144]
cs2s<- data.frame(rowSums(cs2))
cs2s[cs2s$rowSums.cs2.>0,]<- 1
scs2<- colSums(cs2s)
cs3<- rt[,136:148]
cs3s<- data.frame(rowSums(cs3))
cs3s[cs3s$rowSums.cs3.>0,]<- 1
scs3<- colSums(cs3s)
cs4<- rt[,136:152]
cs4s<- data.frame(rowSums(cs4))
cs4s[cs4s$rowSums.cs4.>0,]<- 1
scs4<- colSums(cs4s)
cs5<- rt[,136:156]
cs5s<- data.frame(rowSums(cs5))
cs5s[cs5s$rowSums.cs5.>0,]<- 1
scs5<- colSums(cs5s)
CS<-c(scs1,scs2,scs3,scs4,scs5)
X<-rbind(B0,B1,B2,B3,T0,T1,CS)
X
##如何画出来,li added
area1 <- read.table("phylum TAR/area1.txt",sep="\t",head=TRUE);
area1
B0<- c(sb01,sb02,sb03,sb04,sb05)
B1<- c(sb11,sb12,sb13,sb14,sb15)
B2<- c(sb21,sb22,sb23,sb24,sb25)
B3<- c(sb31,sb32,sb33,sb34,sb35)
T0<- c(st01,st02,st03,st04,st05,st06)
T1<- c(st11,st12,st13,st14,st15,st16)
CS<-c(scs1,scs2,scs3,scs4,scs5)
x=log(area1$area)
yB0=log(B0);yB1=log(B1);yB2=log(B2);yB3=log(B3)
##par(new=TURE) ##两次plot图形叠加在一起。两次的图形应该设置横纵坐标一致。
##par(mfrow = c(2,2), font.lab = 6, font.main = 6,font.axis = 6, font = 6) ## 分割成2行2列,共四个图
plot(x,yB0,type="p")
abline(lm(yB0~x),col=2) ##abline添加给定斜率的线
yB0.reg <- lm(yB0 ~ x) ##lm线性模型,yB0 ~ x 前为纵轴,后为横轴
slopeb0 = yB0.reg$coefficients[2]
slopeb0
corb0 = cor.test(yB0, x)
corb0
p=corb0$p.value ;p ##p
r2=corb0$est ;r2 ##cor
legend.label = c(p,r2)
legend(yB0~x,legend = legend.label)
##曲线拟合(局部回归)
##lowess(x,y,f=2/3,iter = 3)
##f为窗宽参数,越大越平滑
##iter为迭代次数,越大计算越慢
##loess(y~x, data,span=0.75,degree=2)
##data为包含x,y的数据集;span为窗宽参数,degree默认为二次回归。
##sample
lines(lowess(x,yB0)) ####lowess做回归曲线
lines(x,predict(loess(yB0~x))) ####lowess做回归曲线,predict是取回归预测值
lowess(x,yB0,f=50,iter = 10);plot(x,y)
loess(yB0~x,span=0.75,degree=2);plot(yB0~x)
##通过lines一条一条加。
plot(x,yB0,type="p")
lines(x,yB1,type="p")
lines(x,yB2,type="p")
...
|
# Author: Bharathkumar Ramachandra/tnybny
# this file performs the scanning window based approach to detecting
# clusters of anomalous behavior
# clear workspace
rm(list = ls(all = T))
# load required libraries
require(R.matlab) # for mat files
require(rworldmap) # for mapping
require(colorspace) # for legend
require(corpcor) # for pseudoinverse
# set the color palette
palette(diverge_hsv(21))
dir <- integer(1) # direction of extreme (warm (1) or cold (-1))
calcMDsqFromMVG <- function(rectGrid, origYData, day)
{
# calculates the parameters of the baseline multivariate normal that
# corresponds to the rectangle under investigation, then calculates the
# mahalanobis distance of current observation from that MVG
#
# Params
# rectGrid: data frame containing information about the rectangle under
# investigation
# origYdata: original data for all years in order to facilitate calculation
# of covariance matrix of baseline distribution
# day: current day under consideration
#
# Returns
# MDsq: Squared Mahalanobis distance of rectangle observation from MVG
# dir: not returned, but explicitly sets direction of extreme
# observed temperatures under current window
currObs <- rectGrid[, 3]
# gather data under spatio-temporal cuboid specified by window in d
timeWindow <- (day - 2):(day + 2)
d <- t(do.call(rbind, lapply(1:nrow(rectGrid), FUN = function(g) {
unlist(lapply(origYData, "[", timeWindow, rectGrid[g, 1],
rectGrid[g, 2]))})))
# remove from d the observation that's currently under inspection for
# anomalous behavior so that mean and covariance estimates aren't biased
if(nrow(rectGrid) == 1)
{
d <- d[-which(d == currObs)[1]]
mu <- mean(d)
} else {
matchidx <- apply(d, 1, FUN = function(obs) all(obs == currObs))
d <- d[-which(matchidx)[1], ]
mu <- colMeans(d)
}
# if window spans warm and cold extremes (Quadrants II or IV), skip
if(!(all((currObs - mu) >= 0) |
all((currObs - mu) <= 0)))
{
return(0)
}
# find direction of anomalous behavior
dir <<- ifelse(mean(currObs) > mean(mu), 1, -1)
iCOV <- pseudoinverse(cov(as.matrix(d)))
# squared mahalanobis distance
MDsq <- mahalanobis(currObs, center = mu, cov = iCOV, inverted = TRUE)
return(MDsq)
}
# load the data if not already done so
if(!exists("origYData"))
source("loadData.R")
# period of record
years <- 1979
days <- 3
plotpath <- paste("./allplots/plot%02d.jpg")
jpeg(plotpath, width = 1024, height = 680)
ptm <- proc.time()
for(year in years){
for(day in days){
mat <- origYData[[year - 1978]][day, , ]
# create result matrix to color
resToday <- matrix (0, 73, 144)
for (i1 in 1:73){
for (j1 in 1:144){
for (i2 in i1:(i1 + 2)){
if(i2 > 73)
i2 <- 73
for (j2 in j1:(j1 + 2)){
if(j2 > 144)
j2 <- 144
rectGrid <- data.frame(expand.grid(i1:i2, j1:j2),
c(mat[i1:i2, j1:j2]))
MDsq <- calcMDsqFromMVG(rectGrid, origYData, day)
Pr <- ifelse(MDsq == 0, 0, pchisq(MDsq, nrow(rectGrid)))
# color the grid cells with value = +-Pr
changeIdx <- !(abs(c(resToday[i1:i2, j1:j2])) > Pr)
resToday[i1:i2, j1:j2][changeIdx] <- dir * Pr
}
}
}
}
# do mapping transformations
resToday <- t(resToday)
resToday <- resToday[, ncol(resToday):1]
# cut at 5% at both tails
cutoff <- sort(abs(resToday), decreasing = T)[(0.05 * length(resToday))]
cutResToday <- resToday
cutResToday[cutResToday < cutoff & cutResToday > -cutoff] = 0
# linearly stretch the values to [-1, 1] scale
negRange <- range(cutResToday[cutResToday < 0])
posRange <- range(cutResToday[cutResToday > 0])
cutResTodayPrime <- ifelse(cutResToday < 0,
(cutResToday - negRange[1]) /
(negRange[2] - negRange[1]) *
(-0.001 - (-1)) + (-1),
cutResToday)
cutResTodayPrime <- ifelse(cutResToday > 0,
(cutResToday - posRange[1]) /
(posRange[2] - posRange[1]) *
(1 - (0.001)) + (0.001),
cutResTodayPrime)
# map the result
mapGriddedData(cutResTodayPrime, numCats = 21, catMethod = "diverging",
colourPalette = "palette", borderCol = "black")
}
}
print(proc.time() - ptm)
dev.off() | /scan.R | no_license | tnybny/Anomalous-cluster-detection | R | false | false | 5,058 | r | # Author: Bharathkumar Ramachandra/tnybny
# this file performs the scanning window based approach to detecting
# clusters of anomalous behavior
# clear workspace
rm(list = ls(all = T))
# load required libraries
require(R.matlab) # for mat files
require(rworldmap) # for mapping
require(colorspace) # for legend
require(corpcor) # for pseudoinverse
# set the color palette
palette(diverge_hsv(21))
dir <- integer(1) # direction of extreme (warm (1) or cold (-1))
calcMDsqFromMVG <- function(rectGrid, origYData, day)
{
# calculates the parameters of the baseline multivariate normal that
# corresponds to the rectangle under investigation, then calculates the
# mahalanobis distance of current observation from that MVG
#
# Params
# rectGrid: data frame containing information about the rectangle under
# investigation
# origYdata: original data for all years in order to facilitate calculation
# of covariance matrix of baseline distribution
# day: current day under consideration
#
# Returns
# MDsq: Squared Mahalanobis distance of rectangle observation from MVG
# dir: not returned, but explicitly sets direction of extreme
# observed temperatures under current window
currObs <- rectGrid[, 3]
# gather data under spatio-temporal cuboid specified by window in d
timeWindow <- (day - 2):(day + 2)
d <- t(do.call(rbind, lapply(1:nrow(rectGrid), FUN = function(g) {
unlist(lapply(origYData, "[", timeWindow, rectGrid[g, 1],
rectGrid[g, 2]))})))
# remove from d the observation that's currently under inspection for
# anomalous behavior so that mean and covariance estimates aren't biased
if(nrow(rectGrid) == 1)
{
d <- d[-which(d == currObs)[1]]
mu <- mean(d)
} else {
matchidx <- apply(d, 1, FUN = function(obs) all(obs == currObs))
d <- d[-which(matchidx)[1], ]
mu <- colMeans(d)
}
# if window spans warm and cold extremes (Quadrants II or IV), skip
if(!(all((currObs - mu) >= 0) |
all((currObs - mu) <= 0)))
{
return(0)
}
# find direction of anomalous behavior
dir <<- ifelse(mean(currObs) > mean(mu), 1, -1)
iCOV <- pseudoinverse(cov(as.matrix(d)))
# squared mahalanobis distance
MDsq <- mahalanobis(currObs, center = mu, cov = iCOV, inverted = TRUE)
return(MDsq)
}
# load the data if not already done so
if(!exists("origYData"))
source("loadData.R")
# period of record
years <- 1979
days <- 3
plotpath <- paste("./allplots/plot%02d.jpg")
jpeg(plotpath, width = 1024, height = 680)
ptm <- proc.time()
for(year in years){
for(day in days){
mat <- origYData[[year - 1978]][day, , ]
# create result matrix to color
resToday <- matrix (0, 73, 144)
for (i1 in 1:73){
for (j1 in 1:144){
for (i2 in i1:(i1 + 2)){
if(i2 > 73)
i2 <- 73
for (j2 in j1:(j1 + 2)){
if(j2 > 144)
j2 <- 144
rectGrid <- data.frame(expand.grid(i1:i2, j1:j2),
c(mat[i1:i2, j1:j2]))
MDsq <- calcMDsqFromMVG(rectGrid, origYData, day)
Pr <- ifelse(MDsq == 0, 0, pchisq(MDsq, nrow(rectGrid)))
# color the grid cells with value = +-Pr
changeIdx <- !(abs(c(resToday[i1:i2, j1:j2])) > Pr)
resToday[i1:i2, j1:j2][changeIdx] <- dir * Pr
}
}
}
}
# do mapping transformations
resToday <- t(resToday)
resToday <- resToday[, ncol(resToday):1]
# cut at 5% at both tails
cutoff <- sort(abs(resToday), decreasing = T)[(0.05 * length(resToday))]
cutResToday <- resToday
cutResToday[cutResToday < cutoff & cutResToday > -cutoff] = 0
# linearly stretch the values to [-1, 1] scale
negRange <- range(cutResToday[cutResToday < 0])
posRange <- range(cutResToday[cutResToday > 0])
cutResTodayPrime <- ifelse(cutResToday < 0,
(cutResToday - negRange[1]) /
(negRange[2] - negRange[1]) *
(-0.001 - (-1)) + (-1),
cutResToday)
cutResTodayPrime <- ifelse(cutResToday > 0,
(cutResToday - posRange[1]) /
(posRange[2] - posRange[1]) *
(1 - (0.001)) + (0.001),
cutResTodayPrime)
# map the result
mapGriddedData(cutResTodayPrime, numCats = 21, catMethod = "diverging",
colourPalette = "palette", borderCol = "black")
}
}
print(proc.time() - ptm)
dev.off() |
# test arguments that should not be plotted
test_that("Function requires correct argument input types", {
# numeric scalar as only argument
expect_error(boxplot_10(1))
# logical scalar as only argument
expect_error(boxplot_10(T))
# list as only argument
expect_error(boxplot_10(list(c(1,2,3), c("A","B","C"))))
})
# check the output type of our function
test_that("Function outputs a boxplot", {
msk <- readr::read_tsv("https://raw.githubusercontent.com/stat545ubc-2021/mini-data-analysis-EL/main/data/msk_impact_2017_clinical_data.tsv")
p <- boxplot_10(msk, `Cancer Type`, `Overall Survival (Months)`, min_sample_size = 5, na.rm = T, .desc = T)
# class of the output should be a ggplot object
expect_s3_class(p ,"ggplot")
# check that geom layer is a boxplot
expect_identical(class(p$layers[[1]]$geom)[1], "GeomBoxplot")
})
| /boxquickplot/tests/testthat/test-boxplot_10.R | permissive | stat545ubc-2021/functions-YFL-UBC | R | false | false | 850 | r | # test arguments that should not be plotted
test_that("Function requires correct argument input types", {
# numeric scalar as only argument
expect_error(boxplot_10(1))
# logical scalar as only argument
expect_error(boxplot_10(T))
# list as only argument
expect_error(boxplot_10(list(c(1,2,3), c("A","B","C"))))
})
# check the output type of our function
test_that("Function outputs a boxplot", {
msk <- readr::read_tsv("https://raw.githubusercontent.com/stat545ubc-2021/mini-data-analysis-EL/main/data/msk_impact_2017_clinical_data.tsv")
p <- boxplot_10(msk, `Cancer Type`, `Overall Survival (Months)`, min_sample_size = 5, na.rm = T, .desc = T)
# class of the output should be a ggplot object
expect_s3_class(p ,"ggplot")
# check that geom layer is a boxplot
expect_identical(class(p$layers[[1]]$geom)[1], "GeomBoxplot")
})
|
# This is a script to explore the gapminder dataset using R
# Lauren Fuess
# fuess@uta.edu
# 4.18.2015
# install dplyr
# install.packages("dplyr",dependencies = TRUE)
#load dplyr
library("dplyr")
#Read in data as tab-deliminated
gap.in <- read.table("./output//combined_gapMinder.tsv",
sep ="\t",
header = TRUE)
gap.in %>%
filter(country == "China") %>%
select(country, year, pop) %>%
group_by(country) %>%
summarize(min = min(pop))
#Challenge
# Calculate the mean population per continent per year for years prior to 1990
gap.in %>%
filter(year < 1990) %>%
select(continent, year, pop) %>%
group_by(continent, year) %>%
summarize(mean = mean(pop)) | /Sunday_R_Script.R | no_license | lfuess/Sunday_scripts | R | false | false | 715 | r | # This is a script to explore the gapminder dataset using R
# Lauren Fuess
# fuess@uta.edu
# 4.18.2015
# install dplyr
# install.packages("dplyr",dependencies = TRUE)
#load dplyr
library("dplyr")
#Read in data as tab-deliminated
gap.in <- read.table("./output//combined_gapMinder.tsv",
sep ="\t",
header = TRUE)
gap.in %>%
filter(country == "China") %>%
select(country, year, pop) %>%
group_by(country) %>%
summarize(min = min(pop))
#Challenge
# Calculate the mean population per continent per year for years prior to 1990
gap.in %>%
filter(year < 1990) %>%
select(continent, year, pop) %>%
group_by(continent, year) %>%
summarize(mean = mean(pop)) |
# The goal of this project is to predict movie ratings given a dataset provided with "rating" as label column,
#and "userId", "movieId" as factors to be used as variables to compute predictor. The process will be carried
#on gradually to test the effect of each variable on RMSE.
#
#The Movielens dataset has been split in two separate dataframes: the "edx" train dataframe that contains 90% of data,
#on which the supervised algorithm is trained and cross-validated; the "validation" that contains 10% which
#is used as test set.
library(tidyverse)
library(ggplot2)
###@@@@@@@@@@@@@@@@@@ DATA CLEANING ########################
dim(movielens)
summary(movielens)
############################################################
###@@@@@@@@@@@@@@@@@@ DATA EXPLORATION ############################
#######MOVIE EFFECTS visualization########
#Generating a movie distribution plot: there are more than ten thousand
#movies, they have been grouped and numbered as consecutive to produce
#uniform graphic ( the original movieId would appear with a central no data area)
#movies distribution and effect
movie_domain <- movielens %>%
group_by(movieId) %>% summarize(votes=n(),rating = mean(rating) )
nMovies <- movie_domain %>% nrow(.)
df1 <- data.frame(x=c(1:nMovies), y=movie_domain$votes) #temp dataframe
df2 <- data.frame(x=c(1:nMovies), y=movie_domain$rating) #temp dataframe
ggplot(df1,aes(x=df1$x,y=df1$y)) +
ggtitle(" Movie distribution")+ geom_point() + scale_y_log10() +
scale_x_continuous(limits=c(0,nMovies))+xlab("Movies")+ ylab("Votes")
ggplot(df2,aes(x=df2$x,y=df2$y)) +
ggtitle(" Movie vs rating")+ geom_point() +
scale_x_continuous(limits=c(0,nMovies))+xlab("MovieItem")+ ylab("rating") +
geom_smooth()
##########
#######USERS EFFECTS visualization########
#users distribution and effect. Two plots: the first shows users distribution
#vs grouped users. The second users VS rating
user_domain <- movielens %>%
group_by(userId) %>% summarize(votes=n(), rating = mean(rating))
nUsers <- user_domain %>% nrow(.)
df1 <- data.frame(x=c(1:nUsers), y=user_domain$votes ) #temp dataframe
df2 <- data.frame(x=c(1:nUsers), y=user_domain$rating) #temp dataframe
ggplot(df1,aes(x=df1$x,y=df1$y)) +
ggtitle(" User distribution")+ geom_point() + scale_y_log10() +
scale_x_continuous(limits=c(0,nUsers))+xlab("Users")+ ylab("Votes")
ggplot(df2,aes(x=df2$x,y=df2$y)) +
ggtitle(" User VS rating")+ geom_point() +
scale_x_continuous(limits=c(0,nUsers))+xlab("Users")+ ylab("rating") +
geom_smooth(span = 0.1)
rm(df1, df2,user_domain,movie_domain) # removing temporary dataframes
######
#######TIME EFFECTS visualization########
#The next two code blocks generates plots to show the time effects on rating. The first one is week based
#the second day based. Both illustrates a weak influence on rating, so the time will not be considered
# as predictor
library(lubridate)
movielens %>% mutate(datetime = round_date(as_datetime(timestamp))) %>% mutate(rating_week=week(datetime))%>%
group_by(rating_week) %>%
summarize(rating = mean(rating)) %>%
ggplot(aes(rating_week, rating)) +
geom_point() +
geom_smooth()
movielens %>% mutate(datetime = round_date(as_datetime(timestamp))) %>% mutate(rating_day=day(datetime))%>%
group_by(rating_day) %>%
summarize(rating = mean(rating)) %>%
ggplot(aes(rating_day, rating)) +
geom_point() +
geom_smooth()
###############################
#######GENRE EFFECTS visualization########
#the next code is cumputed to build-up a numeric variabile called "genreId" as a numeric
#counterpart of the string "genre" variable
genre_domain <- movielens %>%
group_by(genres) %>% summarize(n=n())
nGenres <- genre_domain %>% nrow(.)
genre_domain <- mutate(genre_domain, genreId=c(1:nGenres)) #create a new numeric genreId column
#The next three lines of code: creating a temporary df as join from movielens and the
#df with numeric genreId, and then tO select only candidate variables to compute for correlation.
#Finally, the plot that shows graphics of correlations ( ignoring the character variable genres ) )
#As the very last operation, we get the data frame used for prediction
movielens_temp <- inner_join(movielens,genre_domain, by = "genres") %>% mutate(datetime = round_date(as_datetime(timestamp))) %>% mutate(rating_week=week(datetime))
movielens_temp <- subset( movielens_temp, select = c("userId","movieId","rating","genreId", "genres") )
movielens <- movielens_temp
rm(movielens_temp,genre_domain,nGenres) #remove the temporary dfs and variables
##genres distribution and effect
genre_domain <- movielens %>%
group_by(genres) %>% summarize(votes=n(),rating = mean(rating) )
nGenres <- genre_domain %>% nrow(.)
df1 <- data.frame(x=c(1:nGenres), y=genre_domain$votes) #temp dataframe
df2 <- data.frame(x=c(1:nGenres), y=genre_domain$rating) #temp dataframe
ggplot(df1,aes(x=df1$x,y=df1$y)) +
ggtitle(" Genre distribution")+ geom_point() + scale_y_log10() +
scale_x_continuous(limits=c(0,nGenres))+xlab("Genre")+ ylab("Votes")
ggplot(df2,aes(x=df2$x,y=df2$y)) +
ggtitle(" Genre vs rating")+ geom_point() +
scale_x_continuous(limits=c(0,nGenres))+xlab("Genre")+ ylab("rating") +
geom_smooth()
remove(df1,df2,genre_domain,movie_domain,movie_titles) #removibìng temporary dfs
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@CORRELATION@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# As analysis final step, the correlation graphics will be created. Using the GGally
#library it' possible to get a one shot plot the shows the full correlation relationships across
#factors
library(GGally)
ggcorr(movielens,label = TRUE, label_alpha = TRUE)
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
###################################################################################
#@@@@T@@@@@@@@@@@@@@@@@@@@@@@@@THE MODELING APPROACH###############################
# The first step is to compute RMSE, that can be considered as a standard deviation between the true value in the test-set
#and the predicted value
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
#####EDX AND VALIDATION SET#########
# Validation set obtained as 10% of MovieLens data
library(caret)
set.seed(1)
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# To make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Adding rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm( ratings, movies, test_index, temp, movielens, removed)
#########################
# 1) Regularization and the user+movie effect approach
#The rating distribution through movies, tells that some movies are rated more than others.
#Next code produces graphics that show movies versus ratings
library(ggplot2)
nMovies <- edx %>% count(movieId) %>% nrow(.) #total movies
edx %>% count(movieId) %>% mutate( RateShare=ifelse(.$n>5,"moreThanFive","lessThanFive")) %>%
ggplot(aes(x=c(1:nMovies),y=.$n, color=RateShare)) +
ggtitle(" Ratings Distribution by movie")+ geom_point() + scale_y_log10() +
scale_x_continuous(limits=c(0,nMovies))+xlab("Movies")+ ylab("Total ratings")
#The graphic is split in two parts: the upper in blu referring to the movies with more than five ratings
#the lower in red to that movies with five or lower number of ratings. The red section, just as an example,
#show that movies with few users ratings is a minor but a significant part of data
# This are noisy data, that should be processed by regularization
#The next step is to evaluate to implement the regularization principle centered on the lambda parameter.
#To minimize the few rating noise effects , the lambda parameter is adopted. In the general
#mathematical formula lambda acts as a penalty when the n amount of ratings for a given movie
#is low, viceversa it is ignored when the n is high: b_i=sum(rating - mu)/(n+lambda) and b_u=sum(rating - mu-b_i)/(n+lambda)
#The optimal lambda must be chosen after tuning processing that consists of a CROSS VALIDATION procedure
#For this, we have to further split the edx data frame in two data frames:
#The edx_train that is used to calculate b_u(lambda) and b_i(lambda) and the edx_test that is used
#to calculated RMSEs
#@@@@@@@@@@@@@@@ Training and test set for CV##########
set.seed(1)
test_index <- createDataPartition(y = edx$rating, times = 1, p = 0.1, list = FALSE)
edx_train <- edx[-test_index,]
temp <- edx[test_index,]
# Make sure userId and movieId in validation set are also in edx set
edx_test <- temp %>%
semi_join(edx_train, by = "movieId") %>%
semi_join(edx_train, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, edx_test)
edx_train <- rbind(edx_train, removed)
rm( test_index, temp, removed)
#######################################################
# In the general mathematical formula lambda acts as a penalty when the n amount of ratings for a given movie
#is low, viceversa it is ignored when the n is high
lambdas <- seq(0, 10, 0.25)
rmses <- sapply(lambdas, function(l){
mu <- mean(edx_train$rating)
b_i <- edx_train %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+l))
b_u <- edx_train %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu)/(n()+l))
predicted_ratings <-
edx_test %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
pull(pred)
return(RMSE(predicted_ratings, edx_test$rating))
})
ggplot2::qplot(lambdas, rmses)
best_lambda <- lambdas[which.min(rmses)] #the lambda that minimize RMSE on the edx_test set
#Next, let's apply the best_lambda on the target edx and validation sets to
#finally calculate predicted ratings
b_i <- edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+best_lambda))
b_u <- edx %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu)/(n()+best_lambda))
predicted_ratings <-
validation %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>% .$pred
#the user+movie effect rmse
rmse_ui_reg <- RMSE(predicted_ratings, validation$rating)
# 2) KNN approach
library(caret)
library(dplyr)
#######6% Sample of entire edx df###########
edx_sample <- sample_n(edx, 600000)
set.seed(1)
test_index <- createDataPartition(y = edx_sample$rating, times = 1, p = 0.1, list = FALSE)
edx_sample_train <- edx_sample[-test_index,]
temp <- edx_sample[test_index,]
# Make sure userId and movieId in validation set are also in edx_sample set
edx_sample_test <- temp %>%
semi_join(edx_sample, by = "movieId") %>%
semi_join(edx_sample, by = "userId")
# Add rows removed from validation set back into edx_sample set
removed <- anti_join(temp, edx_sample_test)
edx_sample_train <- rbind(edx_sample_train, removed)
rm( test_index, temp, removed)
####end sample#######################################################
# Fit the model on the edx_sample training set
Sys.time() #start time for evaluating computing time
set.seed(123)
y <- edx_sample_train$rating
data=subset(edx_sample_train,select=c("movieId","userId")) #selecting only the predictors
model <- train(
y=y, x=data,
trControl = trainControl("cv", number = 10),
preProcess = c("center","scale"),
data=data, method = "knn",
tuneGrid = data.frame(k = seq(60, 100, 5))
)
# Plot model error RMSE vs different values of k
plot(model)
# Best tuning parameter k that minimize the RMSE
model$bestTune
# Make predictions on the test data
predictions <- model %>% predict(edx_sample_test)
head(predictions)
# Compute the prediction error RMSE
rmse_knn <-RMSE(predictions, edx_sample_test$rating)
Sys.time() #stop time for evalueting computing time
#@@@@@@@@@@ Fit the KNN model on the whole edx training set
#not to be run on the 10M Dataframe on desktop system!!
Sys.time()
set.seed(123)
y <- edx$rating
data=subset(edx,select=c("movieId","userId")) #selecting only the predictors
model <- train(
y=y, x=data,
trControl = trainControl("cv", number = 5),
preProcess = c("center","scale"),
data=data, method = "knn",
tuneGrid = data.frame(k = seq(70, 95, 5))
)
# Plot model error RMSE vs different values of k
plot(model)
# Best tuning parameter k that minimize the RMSE
model$bestTune
# Make predictions on the test data
predictions <- model %>% predict(validation)
# Compute the prediction error RMSE
rmse_knn <-RMSE(predictions, validation$rating)
Sys.time()
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#save rmses in a dataframe and in a file
a <- c("knn","uireg")
b <- c(rmse_knn,rmse_ui_reg)
computed_rmses <- data.frame(a,b)
names(computed_rmses) <- c("method","rmse")
saveRDS(computed_rmses,"rmses.rds")
| /movielens.R | no_license | micpesce/capstone_dsh | R | false | false | 13,440 | r | # The goal of this project is to predict movie ratings given a dataset provided with "rating" as label column,
#and "userId", "movieId" as factors to be used as variables to compute predictor. The process will be carried
#on gradually to test the effect of each variable on RMSE.
#
#The Movielens dataset has been split in two separate dataframes: the "edx" train dataframe that contains 90% of data,
#on which the supervised algorithm is trained and cross-validated; the "validation" that contains 10% which
#is used as test set.
library(tidyverse)
library(ggplot2)
###@@@@@@@@@@@@@@@@@@ DATA CLEANING ########################
dim(movielens)
summary(movielens)
############################################################
###@@@@@@@@@@@@@@@@@@ DATA EXPLORATION ############################
#######MOVIE EFFECTS visualization########
#Generating a movie distribution plot: there are more than ten thousand
#movies, they have been grouped and numbered as consecutive to produce
#uniform graphic ( the original movieId would appear with a central no data area)
#movies distribution and effect
movie_domain <- movielens %>%
group_by(movieId) %>% summarize(votes=n(),rating = mean(rating) )
nMovies <- movie_domain %>% nrow(.)
df1 <- data.frame(x=c(1:nMovies), y=movie_domain$votes) #temp dataframe
df2 <- data.frame(x=c(1:nMovies), y=movie_domain$rating) #temp dataframe
ggplot(df1,aes(x=df1$x,y=df1$y)) +
ggtitle(" Movie distribution")+ geom_point() + scale_y_log10() +
scale_x_continuous(limits=c(0,nMovies))+xlab("Movies")+ ylab("Votes")
ggplot(df2,aes(x=df2$x,y=df2$y)) +
ggtitle(" Movie vs rating")+ geom_point() +
scale_x_continuous(limits=c(0,nMovies))+xlab("MovieItem")+ ylab("rating") +
geom_smooth()
##########
#######USERS EFFECTS visualization########
#users distribution and effect. Two plots: the first shows users distribution
#vs grouped users. The second users VS rating
user_domain <- movielens %>%
group_by(userId) %>% summarize(votes=n(), rating = mean(rating))
nUsers <- user_domain %>% nrow(.)
df1 <- data.frame(x=c(1:nUsers), y=user_domain$votes ) #temp dataframe
df2 <- data.frame(x=c(1:nUsers), y=user_domain$rating) #temp dataframe
ggplot(df1,aes(x=df1$x,y=df1$y)) +
ggtitle(" User distribution")+ geom_point() + scale_y_log10() +
scale_x_continuous(limits=c(0,nUsers))+xlab("Users")+ ylab("Votes")
ggplot(df2,aes(x=df2$x,y=df2$y)) +
ggtitle(" User VS rating")+ geom_point() +
scale_x_continuous(limits=c(0,nUsers))+xlab("Users")+ ylab("rating") +
geom_smooth(span = 0.1)
rm(df1, df2,user_domain,movie_domain) # removing temporary dataframes
######
#######TIME EFFECTS visualization########
#The next two code blocks generates plots to show the time effects on rating. The first one is week based
#the second day based. Both illustrates a weak influence on rating, so the time will not be considered
# as predictor
library(lubridate)
movielens %>% mutate(datetime = round_date(as_datetime(timestamp))) %>% mutate(rating_week=week(datetime))%>%
group_by(rating_week) %>%
summarize(rating = mean(rating)) %>%
ggplot(aes(rating_week, rating)) +
geom_point() +
geom_smooth()
movielens %>% mutate(datetime = round_date(as_datetime(timestamp))) %>% mutate(rating_day=day(datetime))%>%
group_by(rating_day) %>%
summarize(rating = mean(rating)) %>%
ggplot(aes(rating_day, rating)) +
geom_point() +
geom_smooth()
###############################
#######GENRE EFFECTS visualization########
#the next code is cumputed to build-up a numeric variabile called "genreId" as a numeric
#counterpart of the string "genre" variable
genre_domain <- movielens %>%
group_by(genres) %>% summarize(n=n())
nGenres <- genre_domain %>% nrow(.)
genre_domain <- mutate(genre_domain, genreId=c(1:nGenres)) #create a new numeric genreId column
#The next three lines of code: creating a temporary df as join from movielens and the
#df with numeric genreId, and then tO select only candidate variables to compute for correlation.
#Finally, the plot that shows graphics of correlations ( ignoring the character variable genres ) )
#As the very last operation, we get the data frame used for prediction
movielens_temp <- inner_join(movielens,genre_domain, by = "genres") %>% mutate(datetime = round_date(as_datetime(timestamp))) %>% mutate(rating_week=week(datetime))
movielens_temp <- subset( movielens_temp, select = c("userId","movieId","rating","genreId", "genres") )
movielens <- movielens_temp
rm(movielens_temp,genre_domain,nGenres) #remove the temporary dfs and variables
##genres distribution and effect
genre_domain <- movielens %>%
group_by(genres) %>% summarize(votes=n(),rating = mean(rating) )
nGenres <- genre_domain %>% nrow(.)
df1 <- data.frame(x=c(1:nGenres), y=genre_domain$votes) #temp dataframe
df2 <- data.frame(x=c(1:nGenres), y=genre_domain$rating) #temp dataframe
ggplot(df1,aes(x=df1$x,y=df1$y)) +
ggtitle(" Genre distribution")+ geom_point() + scale_y_log10() +
scale_x_continuous(limits=c(0,nGenres))+xlab("Genre")+ ylab("Votes")
ggplot(df2,aes(x=df2$x,y=df2$y)) +
ggtitle(" Genre vs rating")+ geom_point() +
scale_x_continuous(limits=c(0,nGenres))+xlab("Genre")+ ylab("rating") +
geom_smooth()
remove(df1,df2,genre_domain,movie_domain,movie_titles) #removibìng temporary dfs
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@CORRELATION@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# As analysis final step, the correlation graphics will be created. Using the GGally
#library it' possible to get a one shot plot the shows the full correlation relationships across
#factors
library(GGally)
ggcorr(movielens,label = TRUE, label_alpha = TRUE)
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
###################################################################################
#@@@@T@@@@@@@@@@@@@@@@@@@@@@@@@THE MODELING APPROACH###############################
# The first step is to compute RMSE, that can be considered as a standard deviation between the true value in the test-set
#and the predicted value
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
#####EDX AND VALIDATION SET#########
# Validation set obtained as 10% of MovieLens data
library(caret)
set.seed(1)
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# To make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Adding rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm( ratings, movies, test_index, temp, movielens, removed)
#########################
# 1) Regularization and the user+movie effect approach
#The rating distribution through movies, tells that some movies are rated more than others.
#Next code produces graphics that show movies versus ratings
library(ggplot2)
nMovies <- edx %>% count(movieId) %>% nrow(.) #total movies
edx %>% count(movieId) %>% mutate( RateShare=ifelse(.$n>5,"moreThanFive","lessThanFive")) %>%
ggplot(aes(x=c(1:nMovies),y=.$n, color=RateShare)) +
ggtitle(" Ratings Distribution by movie")+ geom_point() + scale_y_log10() +
scale_x_continuous(limits=c(0,nMovies))+xlab("Movies")+ ylab("Total ratings")
#The graphic is split in two parts: the upper in blu referring to the movies with more than five ratings
#the lower in red to that movies with five or lower number of ratings. The red section, just as an example,
#show that movies with few users ratings is a minor but a significant part of data
# This are noisy data, that should be processed by regularization
#The next step is to evaluate to implement the regularization principle centered on the lambda parameter.
#To minimize the few rating noise effects , the lambda parameter is adopted. In the general
#mathematical formula lambda acts as a penalty when the n amount of ratings for a given movie
#is low, viceversa it is ignored when the n is high: b_i=sum(rating - mu)/(n+lambda) and b_u=sum(rating - mu-b_i)/(n+lambda)
#The optimal lambda must be chosen after tuning processing that consists of a CROSS VALIDATION procedure
#For this, we have to further split the edx data frame in two data frames:
#The edx_train that is used to calculate b_u(lambda) and b_i(lambda) and the edx_test that is used
#to calculated RMSEs
#@@@@@@@@@@@@@@@ Training and test set for CV##########
set.seed(1)
test_index <- createDataPartition(y = edx$rating, times = 1, p = 0.1, list = FALSE)
edx_train <- edx[-test_index,]
temp <- edx[test_index,]
# Make sure userId and movieId in validation set are also in edx set
edx_test <- temp %>%
semi_join(edx_train, by = "movieId") %>%
semi_join(edx_train, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, edx_test)
edx_train <- rbind(edx_train, removed)
rm( test_index, temp, removed)
#######################################################
# In the general mathematical formula lambda acts as a penalty when the n amount of ratings for a given movie
#is low, viceversa it is ignored when the n is high
lambdas <- seq(0, 10, 0.25)
rmses <- sapply(lambdas, function(l){
mu <- mean(edx_train$rating)
b_i <- edx_train %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+l))
b_u <- edx_train %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu)/(n()+l))
predicted_ratings <-
edx_test %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
pull(pred)
return(RMSE(predicted_ratings, edx_test$rating))
})
ggplot2::qplot(lambdas, rmses)
best_lambda <- lambdas[which.min(rmses)] #the lambda that minimize RMSE on the edx_test set
#Next, let's apply the best_lambda on the target edx and validation sets to
#finally calculate predicted ratings
b_i <- edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+best_lambda))
b_u <- edx %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu)/(n()+best_lambda))
predicted_ratings <-
validation %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>% .$pred
#the user+movie effect rmse
rmse_ui_reg <- RMSE(predicted_ratings, validation$rating)
# 2) KNN approach
library(caret)
library(dplyr)
#######6% Sample of entire edx df###########
edx_sample <- sample_n(edx, 600000)
set.seed(1)
test_index <- createDataPartition(y = edx_sample$rating, times = 1, p = 0.1, list = FALSE)
edx_sample_train <- edx_sample[-test_index,]
temp <- edx_sample[test_index,]
# Make sure userId and movieId in validation set are also in edx_sample set
edx_sample_test <- temp %>%
semi_join(edx_sample, by = "movieId") %>%
semi_join(edx_sample, by = "userId")
# Add rows removed from validation set back into edx_sample set
removed <- anti_join(temp, edx_sample_test)
edx_sample_train <- rbind(edx_sample_train, removed)
rm( test_index, temp, removed)
####end sample#######################################################
# Fit the model on the edx_sample training set
Sys.time() #start time for evaluating computing time
set.seed(123)
y <- edx_sample_train$rating
data=subset(edx_sample_train,select=c("movieId","userId")) #selecting only the predictors
model <- train(
y=y, x=data,
trControl = trainControl("cv", number = 10),
preProcess = c("center","scale"),
data=data, method = "knn",
tuneGrid = data.frame(k = seq(60, 100, 5))
)
# Plot model error RMSE vs different values of k
plot(model)
# Best tuning parameter k that minimize the RMSE
model$bestTune
# Make predictions on the test data
predictions <- model %>% predict(edx_sample_test)
head(predictions)
# Compute the prediction error RMSE
rmse_knn <-RMSE(predictions, edx_sample_test$rating)
Sys.time() #stop time for evalueting computing time
#@@@@@@@@@@ Fit the KNN model on the whole edx training set
#not to be run on the 10M Dataframe on desktop system!!
Sys.time()
set.seed(123)
y <- edx$rating
data=subset(edx,select=c("movieId","userId")) #selecting only the predictors
model <- train(
y=y, x=data,
trControl = trainControl("cv", number = 5),
preProcess = c("center","scale"),
data=data, method = "knn",
tuneGrid = data.frame(k = seq(70, 95, 5))
)
# Plot model error RMSE vs different values of k
plot(model)
# Best tuning parameter k that minimize the RMSE
model$bestTune
# Make predictions on the test data
predictions <- model %>% predict(validation)
# Compute the prediction error RMSE
rmse_knn <-RMSE(predictions, validation$rating)
Sys.time()
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#save rmses in a dataframe and in a file
a <- c("knn","uireg")
b <- c(rmse_knn,rmse_ui_reg)
computed_rmses <- data.frame(a,b)
names(computed_rmses) <- c("method","rmse")
saveRDS(computed_rmses,"rmses.rds")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gpav.R
\name{gpav}
\alias{gpav}
\title{Generalized Pool-Adjacent Violators (GPAV)}
\usage{
gpav(Y, W = NULL, adj)
}
\arguments{
\item{Y}{vector of scores relative to a single example. \code{Y} must be a numeric named vector, where names
correspond to classes' names, i.e. nodes of the graph \code{g} (root node included).}
\item{W}{vector of weight relative to a single example. If \code{W=NULL} (def.) it is assumed that
\code{W} is a unitary vector of the same length of the columns' number of the matrix \code{S} (root node included).}
\item{adj}{adjacency matrix of the graph which must be sparse, logical and upper triangular. Number of columns of \code{adj} must be
equal to the length of \code{Y} and \code{W}.}
}
\value{
A list of 3 elements:
\itemize{
\item \code{YFit}: a named vector with the scores of the classes corrected according to the \code{GPAV} algorithm.
\item \code{blocks}: list of vectors, containing the partitioning of nodes (represented with an integer number) into blocks;
\item \code{W}: vector of weights.
}
}
\description{
Implementation of \code{GPAV} (Generalized Pool-Adjacent Violators) algorithm.
(\cite{Burdakov et al., In: Di Pillo G, Roma M, editors. An O(n2) Algorithm for Isotonic Regression. Boston, MA: Springer US; 2006.
p. 25–33. Available from: \doi{10.1007/0-387-30065-1_3}}
}
\details{
Given the constraints adjacency matrix of the graph, a vector of scores \eqn{\hat{y} \in R^n} and a vector of strictly positive
weights \eqn{w \in R^n}, the \code{GPAV} algorithm returns a vector \eqn{\bar{y}} which is as close as possible, in the least-squares sense,
to the response vector \eqn{\hat{y}} and whose components are partially ordered in accordance with the constraints matrix \code{adj}.
In other words, \code{GPAV} solves the following problem:
\deqn{
\bar{y} = \left\{
\begin{array}{l}
\min \sum_{i \in V} (\hat{y}_i - \bar{y}_i )^2\\\\
\forall i, \quad j \in par(i) \Rightarrow \bar{y}_j \geq \bar{y}_i
\end{array}
\right.
}
where \eqn{V} are the number of vertexes of the graph.
}
\examples{
data(graph);
data(scores);
Y <- S[3,];
adj <- adj.upper.tri(g);
Y.gpav <- gpav(Y,W=NULL,adj);
}
| /man/gpav.Rd | no_license | cran/HEMDAG | R | false | true | 2,240 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gpav.R
\name{gpav}
\alias{gpav}
\title{Generalized Pool-Adjacent Violators (GPAV)}
\usage{
gpav(Y, W = NULL, adj)
}
\arguments{
\item{Y}{vector of scores relative to a single example. \code{Y} must be a numeric named vector, where names
correspond to classes' names, i.e. nodes of the graph \code{g} (root node included).}
\item{W}{vector of weight relative to a single example. If \code{W=NULL} (def.) it is assumed that
\code{W} is a unitary vector of the same length of the columns' number of the matrix \code{S} (root node included).}
\item{adj}{adjacency matrix of the graph which must be sparse, logical and upper triangular. Number of columns of \code{adj} must be
equal to the length of \code{Y} and \code{W}.}
}
\value{
A list of 3 elements:
\itemize{
\item \code{YFit}: a named vector with the scores of the classes corrected according to the \code{GPAV} algorithm.
\item \code{blocks}: list of vectors, containing the partitioning of nodes (represented with an integer number) into blocks;
\item \code{W}: vector of weights.
}
}
\description{
Implementation of \code{GPAV} (Generalized Pool-Adjacent Violators) algorithm.
(\cite{Burdakov et al., In: Di Pillo G, Roma M, editors. An O(n2) Algorithm for Isotonic Regression. Boston, MA: Springer US; 2006.
p. 25–33. Available from: \doi{10.1007/0-387-30065-1_3}}
}
\details{
Given the constraints adjacency matrix of the graph, a vector of scores \eqn{\hat{y} \in R^n} and a vector of strictly positive
weights \eqn{w \in R^n}, the \code{GPAV} algorithm returns a vector \eqn{\bar{y}} which is as close as possible, in the least-squares sense,
to the response vector \eqn{\hat{y}} and whose components are partially ordered in accordance with the constraints matrix \code{adj}.
In other words, \code{GPAV} solves the following problem:
\deqn{
\bar{y} = \left\{
\begin{array}{l}
\min \sum_{i \in V} (\hat{y}_i - \bar{y}_i )^2\\\\
\forall i, \quad j \in par(i) \Rightarrow \bar{y}_j \geq \bar{y}_i
\end{array}
\right.
}
where \eqn{V} are the number of vertexes of the graph.
}
\examples{
data(graph);
data(scores);
Y <- S[3,];
adj <- adj.upper.tri(g);
Y.gpav <- gpav(Y,W=NULL,adj);
}
|
## ----setup, eval=TRUE,echo=FALSE-----------------------------------------
knitr::opts_chunk$set(error = TRUE)
knitr::opts_chunk$set(echo = TRUE)
suppressMessages(library("tidyverse"))
library(keras)
library(tensorflow)
suppressMessages(library(GGally))
## ----data mnist, eval=TRUE, fig.show='asis', fig.keep='all'--------------
#loading the keras inbuilt mnist dataset
data <- dataset_mnist()
#separating train and test file
train_x <- data$train$x
train_y <- data$train$y
test_x <- data$test$x
test_y <- data$test$y
# converting a 2D array into a 1D array for feeding into the MLP and normalising the matrix
train_x <- array(train_x, dim = c(dim(train_x)[1], prod(dim(train_x)[-1]))) / 255
test_x <- array(test_x, dim = c(dim(test_x)[1], prod(dim(test_x)[-1]))) / 255
image(matrix(train_x[2,],28,28,byrow=T), axes = FALSE,col=gray.colors(255))
#converting the target variable to once hot encoded vectors using keras inbuilt function
train_y_cat <- to_categorical(train_y,10)
test_y_cat <- to_categorical(test_y,10)
train_y <- train_y_cat
test_y <- test_y_cat
## ----mnist network initialisation, eval=TRUE, echo=TRUE-----------------
model <- keras_model_sequential()
## ----mnist networkdefinition, eval =TRUE---------------------------------
model %>%
layer_dense(units = 784, input_shape = 784) %>%
layer_dropout(rate = 0.4) %>%
layer_activation(activation = 'relu') %>%
layer_dense(units = 10) %>%
layer_activation(activation = 'softmax')
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = c('accuracy')
)
### ----learning echo, eval=TRUE, echo=FALSE--------------------------------
#if (!exists("learning_history") & exists('model')) {learning_history <- model %>% fit(train_x, train_y, epochs = 30, batch_size = 1000)}
## ----learning, eval=FALSE------------------------------------------------
learning_history <- model %>% fit(train_x, train_y, epochs = 10, batch_size = 1000)
## ----learning again, eval=FALSE------------------------------------------
learning_history <- model %>% fit(train_x, train_y, epochs = 10, batch_size = 1000)
## ----testing, eval = TRUE------------------------------------------------
predictions <- model %>% predict_classes(test_x)
predictions_proba <- model %>% predict_proba(test_x)
loss_and_metrics <- model %>% evaluate(test_x, test_y, batch_size = 128)
## ----output--------------------------------------------------------------
summary(model)
plot(learning_history)
## ----saving and loading keras object-------------------------------------
save_model_hdf5(model, "NN_mnist.h5")
## ----transfo data image--------------------------------------------------
d <- dim(data$train$x)
train_x_picture <- array(0,c(d,1))
train_x_picture[,,,1] <- data$train$x/255
## ----def CNN-------------------------------------------------------------
model_convol <- keras_model_sequential()
model_convol %>%
layer_conv_2d(filters = 64, kernel_size = c(3,3), use_bias = TRUE, activation = 'relu',data_format = 'channels_last') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(0.2) %>%
layer_conv_2d(filters = 32, kernel_size = c(3,3), activation = 'relu') %>%
layer_flatten() %>%
layer_dense(units = 10) %>%
layer_activation(activation = 'softmax')
model_convol %>% compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = c('accuracy')
)
## ----fit CNN-------------------------------------------------------------
learning_history_convol <- model_convol %>% fit(train_x_picture, train_y, validation_split = 0.1, epochs = 10, batch_size = 1000)
save_model_hdf5(model_convol, "CNN_mnist.h5")
## ----output CNN----------------------------------------------------------
summary(model_convol)
plot(learning_history_convol)
## ----iris----------------------------------------------------------------
data(iris)
x_iris <-
iris %>%
select(-Species) %>%
as.matrix %>%
scale
y_iris <- to_categorical(as.integer(iris$Species)-1)
## ----stratified_train_test_splitting-------------------------------------
set.seed(0)
ntest <- 15 # number of test samples in each class
test.index <-
tibble(row_number =1:nrow(iris),Species = iris$Species) %>% group_by(Species) %>% sample_n(ntest) %>% pull(row_number)
train.index <- (1:nrow(iris))[-test.index]
x_iris_train <- x_iris[train.index,]
y_iris_train <- y_iris[train.index,]
x_iris_test <- x_iris[test.index,]
y_iris_test <- y_iris[test.index,]
## ---- eval=FALSE---------------------------------------------------------
## model <- keras_model_sequential()
## model %>%
## layer_dense(units = 4, input_shape = 4) %>%
## layer_dropout(rate=0.1)%>%
## layer_activation(activation = 'relu') %>%
## layer_dense(units = 3) %>%
## layer_activation(activation = 'softmax')
##
## model %>% compile(
## loss = 'categorical_crossentropy',
## optimizer = 'adam',
## metrics = c('accuracy')
## )
## learning_history <- model %>% fit(x_iris_train, y_iris_train, epochs = 200, validation_split=0.0)
## loss_and_metrics <- model %>% evaluate(x_iris_test, y_iris_test)
##
## estimation <- apply(predict(model,x_iris_test),1,which.max)
## truth <- apply(y_iris_test,1,which.max)
## table(estimation, truth)
## ----model_for_iris_2_layers, eval = FALSE-------------------------------
## model_autoencoder <- keras_model_sequential()
##
## model_autoencoder %>%
## layer_dense(units = 2, activation = 'linear',input_shape = ncol(x_iris),name = "inter_layer") %>%
## layer_dense(units = 4, activation = 'linear')
##
## model_autoencoder %>% compile(
## loss = 'mse',
## optimizer = 'adam',
## metrics = 'mse'
## )
##
## model_autoencoder %>% fit(
## x_iris_train,
## x_iris_train,
## epochs = 1000,
## batch_size = 16,
## shuffle = TRUE,
## validation_split = 0.1,
## )
##
## model_projection = keras_model(inputs = model_autoencoder$input, outputs = get_layer(model_autoencoder,"inter_layer")$output)
##
## intermediate_output = predict(model_projection,x_iris_train)
##
##
## ---- eval = FALSE-------------------------------------------------------
## library(FactoMineR)
## res.pca <- PCA(x_iris_train, graph = FALSE)
##
## par(mfrow=c(1,2))
## plot(intermediate_output[,1],intermediate_output[,2],col = y_iris_train %*% (1:3))
## plot(res.pca$ind$coord[,1],res.pca$ind$coord[,2], col = y_iris_train %*% (1:3))
##
## ----saving_loading------------------------------------------------------
save_model_hdf5(model, "my_model.h5")
model <- load_model_hdf5("my_model.h5")
| /En-pratique/Deep-with-R.R | no_license | Sophiedonnet/NeuralNetworksForDummies | R | false | false | 6,511 | r | ## ----setup, eval=TRUE,echo=FALSE-----------------------------------------
knitr::opts_chunk$set(error = TRUE)
knitr::opts_chunk$set(echo = TRUE)
suppressMessages(library("tidyverse"))
library(keras)
library(tensorflow)
suppressMessages(library(GGally))
## ----data mnist, eval=TRUE, fig.show='asis', fig.keep='all'--------------
#loading the keras inbuilt mnist dataset
data <- dataset_mnist()
#separating train and test file
train_x <- data$train$x
train_y <- data$train$y
test_x <- data$test$x
test_y <- data$test$y
# converting a 2D array into a 1D array for feeding into the MLP and normalising the matrix
train_x <- array(train_x, dim = c(dim(train_x)[1], prod(dim(train_x)[-1]))) / 255
test_x <- array(test_x, dim = c(dim(test_x)[1], prod(dim(test_x)[-1]))) / 255
image(matrix(train_x[2,],28,28,byrow=T), axes = FALSE,col=gray.colors(255))
#converting the target variable to once hot encoded vectors using keras inbuilt function
train_y_cat <- to_categorical(train_y,10)
test_y_cat <- to_categorical(test_y,10)
train_y <- train_y_cat
test_y <- test_y_cat
## ----mnist network initialisation, eval=TRUE, echo=TRUE-----------------
model <- keras_model_sequential()
## ----mnist networkdefinition, eval =TRUE---------------------------------
model %>%
layer_dense(units = 784, input_shape = 784) %>%
layer_dropout(rate = 0.4) %>%
layer_activation(activation = 'relu') %>%
layer_dense(units = 10) %>%
layer_activation(activation = 'softmax')
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = c('accuracy')
)
### ----learning echo, eval=TRUE, echo=FALSE--------------------------------
#if (!exists("learning_history") & exists('model')) {learning_history <- model %>% fit(train_x, train_y, epochs = 30, batch_size = 1000)}
## ----learning, eval=FALSE------------------------------------------------
learning_history <- model %>% fit(train_x, train_y, epochs = 10, batch_size = 1000)
## ----learning again, eval=FALSE------------------------------------------
learning_history <- model %>% fit(train_x, train_y, epochs = 10, batch_size = 1000)
## ----testing, eval = TRUE------------------------------------------------
predictions <- model %>% predict_classes(test_x)
predictions_proba <- model %>% predict_proba(test_x)
loss_and_metrics <- model %>% evaluate(test_x, test_y, batch_size = 128)
## ----output--------------------------------------------------------------
summary(model)
plot(learning_history)
## ----saving and loading keras object-------------------------------------
save_model_hdf5(model, "NN_mnist.h5")
## ----transfo data image--------------------------------------------------
d <- dim(data$train$x)
train_x_picture <- array(0,c(d,1))
train_x_picture[,,,1] <- data$train$x/255
## ----def CNN-------------------------------------------------------------
model_convol <- keras_model_sequential()
model_convol %>%
layer_conv_2d(filters = 64, kernel_size = c(3,3), use_bias = TRUE, activation = 'relu',data_format = 'channels_last') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(0.2) %>%
layer_conv_2d(filters = 32, kernel_size = c(3,3), activation = 'relu') %>%
layer_flatten() %>%
layer_dense(units = 10) %>%
layer_activation(activation = 'softmax')
model_convol %>% compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = c('accuracy')
)
## ----fit CNN-------------------------------------------------------------
learning_history_convol <- model_convol %>% fit(train_x_picture, train_y, validation_split = 0.1, epochs = 10, batch_size = 1000)
save_model_hdf5(model_convol, "CNN_mnist.h5")
## ----output CNN----------------------------------------------------------
summary(model_convol)
plot(learning_history_convol)
## ----iris----------------------------------------------------------------
data(iris)
x_iris <-
iris %>%
select(-Species) %>%
as.matrix %>%
scale
y_iris <- to_categorical(as.integer(iris$Species)-1)
## ----stratified_train_test_splitting-------------------------------------
set.seed(0)
ntest <- 15 # number of test samples in each class
test.index <-
tibble(row_number =1:nrow(iris),Species = iris$Species) %>% group_by(Species) %>% sample_n(ntest) %>% pull(row_number)
train.index <- (1:nrow(iris))[-test.index]
x_iris_train <- x_iris[train.index,]
y_iris_train <- y_iris[train.index,]
x_iris_test <- x_iris[test.index,]
y_iris_test <- y_iris[test.index,]
## ---- eval=FALSE---------------------------------------------------------
## model <- keras_model_sequential()
## model %>%
## layer_dense(units = 4, input_shape = 4) %>%
## layer_dropout(rate=0.1)%>%
## layer_activation(activation = 'relu') %>%
## layer_dense(units = 3) %>%
## layer_activation(activation = 'softmax')
##
## model %>% compile(
## loss = 'categorical_crossentropy',
## optimizer = 'adam',
## metrics = c('accuracy')
## )
## learning_history <- model %>% fit(x_iris_train, y_iris_train, epochs = 200, validation_split=0.0)
## loss_and_metrics <- model %>% evaluate(x_iris_test, y_iris_test)
##
## estimation <- apply(predict(model,x_iris_test),1,which.max)
## truth <- apply(y_iris_test,1,which.max)
## table(estimation, truth)
## ----model_for_iris_2_layers, eval = FALSE-------------------------------
## model_autoencoder <- keras_model_sequential()
##
## model_autoencoder %>%
## layer_dense(units = 2, activation = 'linear',input_shape = ncol(x_iris),name = "inter_layer") %>%
## layer_dense(units = 4, activation = 'linear')
##
## model_autoencoder %>% compile(
## loss = 'mse',
## optimizer = 'adam',
## metrics = 'mse'
## )
##
## model_autoencoder %>% fit(
## x_iris_train,
## x_iris_train,
## epochs = 1000,
## batch_size = 16,
## shuffle = TRUE,
## validation_split = 0.1,
## )
##
## model_projection = keras_model(inputs = model_autoencoder$input, outputs = get_layer(model_autoencoder,"inter_layer")$output)
##
## intermediate_output = predict(model_projection,x_iris_train)
##
##
## ---- eval = FALSE-------------------------------------------------------
## library(FactoMineR)
## res.pca <- PCA(x_iris_train, graph = FALSE)
##
## par(mfrow=c(1,2))
## plot(intermediate_output[,1],intermediate_output[,2],col = y_iris_train %*% (1:3))
## plot(res.pca$ind$coord[,1],res.pca$ind$coord[,2], col = y_iris_train %*% (1:3))
##
## ----saving_loading------------------------------------------------------
save_model_hdf5(model, "my_model.h5")
model <- load_model_hdf5("my_model.h5")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.