content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
###Assignment 2###
# Author: Paul DellaGrotte
library(ggplot2) # Charts
library(pastecs) # descriptive statistics
library(rpart) # decision tree
library(rpart.plot)
library(randomForest)
library(MASS)
library(reshape2)
library(lattice)
library(leaps)
library(glmnet)
library(plyr)
######################################################
############### Load Data ############################
######################################################
data <- "filepath of csv"
# read in data from web, no header
df <- read.csv(data, header=TRUE)
df2 <- df # create data frame for dummy variables
######################################################
######################################################
############### Data Quality Check ###################
######################################################
head(df)
names(df)
stat.desc(df) # print table of descriptive stats
######################################################
######################################################
############## Data Transformations ##################
######################################################
# Create dummy variables for each categorical variable
for(level in unique(df2$color)){
df2[paste("color", level, sep = "_")] <- ifelse(df2$color == level, 1, 0)
}
for(level in unique(df2$clarity)){
df2[paste("clarity", level, sep = "_")] <- ifelse(df2$clarity == level, 1, 0)
}
for(level in unique(df2$store)){
df2[paste("store", level, sep = "_")] <- ifelse(df2$store == level, 1, 0)
}
for(level in unique(df2$channel)){
df2[paste("channel", level, sep = "_")] <- ifelse(df2$channel == level, 1, 0)
}
#Remove redundant x variables
df2$color <- NULL
df2$clarity <- NULL
df2$channel <- NULL
df2$store <- NULL
#Remove reference category (e.g. color_1, clarity_2, ect)
df2$color_1 <- NULL
df2$clarity_2 <- NULL
df2$store_Ashford <- NULL
df2$channel_Independent <-NULL
#Remove spaces from names to be read as continuous string
names(df2)[names(df2)=="store_R. Holland"] <- "store_RHolland"
names(df2)[names(df2)=="store_Fred Meyer"] <- "store_FredMeyer"
names(df2)[names(df2)=="store_Blue Nile"] <- "store_BlueNile"
#Add log transformations of price and carat
df2$logprice <- log(df2$price)
df2$logcarat<- log(df2$carat)
df2$price <- NULL
df2$carat <-NULL
# Print results of transformations
View(df2) # check to make sure df2 has all proper dummy variables
str(df2) # Show structure
######################################################
######################################################
####################### EDA ##########################
######################################################
hist(df$price)
hist(df$carat)
scale_x <- scale_x_continuous(limits = c(0, 3),
breaks = round(seq(0, max(df$carat), by = .25),2))
scale_y <- scale_y_continuous(limits = c(0, 30000),
labels = scales::dollar,
breaks = round(seq(0, max(df$price), by = 5000),2))
gcorr<- round(cor(df$carat, df$price),4) # Correlation for display
ggplot(df, aes(x=carat, y=price, color=color, shape=cut)) +
geom_point() +
scale_y + scale_x +
labs(title=paste("Correlation=",gcorr), x = "carat", y= "price") +
theme(plot.title = element_text(face="bold", size=rel(1.25)))
ggplot(df, aes(carat, price)) + geom_point() + geom_smooth() +
labs(x="carat", y="price") + scale_x + scale_y
ggplot(df, aes(log(carat), log(price))) + geom_point() + geom_smooth()+
labs(x="log(carat)", y="log(price)")
gplot1 <- ggplot(data = df, aes(color, price)) + theme(legend.position="none")
gplot1 + geom_boxplot(aes(fill = color)) + scale_y
gplot2 <- ggplot(data = df, aes(channel, price))
gplot2 + geom_boxplot(aes(fill = channel)) + scale_y + theme(legend.position="none")
gplot2 <- ggplot(data = df, aes(cut, price))
gplot2 + geom_boxplot(aes(fill = cut)) + scale_y + theme(legend.position="none")
gplot2 <- ggplot(data = df, aes(clarity, price))
gplot2 + geom_boxplot(aes(fill = clarity)) + scale_y + theme(legend.position="none")
gplot3 <- ggplot(data = df, aes(store, price))
gplot3 + geom_boxplot(aes(fill = cut)) + scale_y
gplot3 <- ggplot(data = df, aes(clarity, price))
gplot3 + geom_boxplot(aes(fill = cut))
gplot4 <- ggplot(data = df, aes(carat, price))
gplot4 + geom_point(color="red")
gplot4 + geom_point(aes(color=cut))
ggplot(df, aes(x=carat, y=price, color=clarity)) +
geom_point() + facet_grid(~ cut)
gplot5 <- ggplot(df, aes(color, fill=cut)) + geom_bar()
ggplot(df, aes(price, color=cut)) + geom_freqpoly(binwidth=1000)
# looks like ideal cut is bimodal for price & carat
ggplot(df, aes(price, fill=cut)) + geom_histogram(alpha = 0.5, binwidth =600)
ggplot(df, aes(carat, fill=cut)) + geom_histogram(binwidth =0.4)
hist(df$price, freq = F, main=" ", xlab= "Price")
curve(dnorm(x, mean=mean(df$price),sd=sd(df$price)), add = T, col="red", lwd=2)
hist(df$carat, freq = F, main=" ", xlab= "Carat")
curve(dnorm(x, mean=mean(df$carat),sd=sd(df$carat)), add=T, col="red", lwd=2)
### Decision Tree for EDA #####
M0 <- rpart(price ~ ., data=df, method="anova")
summary(M0)
rpart.plot(M0) # plot model
###############################
######################################################
######################################################
############## Split Training-Testing ################
######################################################
# 70 / 30 Split per assignment instructions
set.seed(1200) # set the seed so randomness is reproducable
g <- runif(nrow(df2)) # set a bunch of random numbers as rows
df_random <- df2[order(g),] # reorder the data set
train_size <- floor(.70 * nrow(df2)) # Select % of data set to use for training
test_size <- nrow(df2) - train_size # use remainder of data set for testing
df_train <- df_random[1:train_size,]
df_test <- df_random[(train_size+1):nrow(df2),]
######################################################
######################################################
####################### Models ########################
######################################################
# Functions to compute R-Squared and RMSE
rsq <- function(y,f) {1 - sum((y-f)^2)/sum((y-mean(y))^2) }
rmse <- function(y, f) {sqrt(mean((y-f)^2)) }
### Decision Tree #####
M0 <- rpart(logprice ~ ., data=df_train, method="anova")
p0 <- predict(M0, newdata=df_test) #set type = to class to get correct output
plot(df_test$logprice, p0)
actual <- df_test$logprice
predicted <- p0
rsq(actual,predicted)
rmse(actual,predicted)
# On Training
p0 <- predict(M0, newdata=df_train) #set type = to class to get correct output
actual <- df_train$logprice
predicted <- p0
rsq(actual,predicted)
rmse(actual,predicted)
########################
#### Single Variable ###
M1<- lm(logprice ~ logcarat, data=df_train)
p1 <- predict(M1, newdata=df_test)
actual <- df_test$logprice
predicted <- p1
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
# On Training
p1 <- predict(M1, newdata=df_train)
actual <- df_train$logprice
predicted <- p1
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
summary(M1)$r.squared
########################
########################
## Variable Selection ##
M2 <- lm(logprice~ ., data = df_train)
step_b <- step(M2, direction = "backward")
step_f <- step(M2, direction = "forward")
step_s <- step(M2, direction = "both")
listRsqu <- list()
c(listRsqu, a=summary(step_b)$r.squared, b=summary(step_f)$r.squared, c=summary(step_s)$r.squared)
listRsqu # best is forward selection
p4 <- predict(step_f, newdata=df_test)
actual <- df_test$logprice
predicted <- p4
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
# On Training
p4 <- predict(step_f, newdata=df_train)
actual <- df_train$logprice
predicted <- p4
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
########################
## Model w/ Interaction ##
M3 <- lm(logprice~ logcarat+cut*channel_Internet, data = df_train)
summary(M3)$r.squared
M3 <-lm(formula = logprice ~ cut + color_4 + color_5 + color_7 + color_8 +
color_3 + color_2 + color_6 + color_9 + clarity_7 + clarity_6 +
clarity_4 + clarity_8 + clarity_9 + clarity_5 + clarity_10 +
clarity_3 + store_Goodmans + store_Chalmers + store_FredMeyer +
store_RHolland + store_Ausmans + store_University + store_Kay +
store_Zales + store_Danford + store_BlueNile + store_Riddles +
channel_Mall + channel_Internet + logcarat + channel_Internet*cut, data = df_train)
p4 <- predict(M3, newdata=df_test)
actual <- df_test$logprice
predicted <- p4
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
# On Training
p4 <- predict(M3, newdata=df_train)
actual <- df_train$logprice
predicted <- p4
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
########################
####### LASSO ##########
xfactors <- model.matrix(df$price ~ df$carat +
df$color + df$clarity +
df$cut + df$channel + df$store)
xfactors <-model.matrix(data = df2,logprice ~ cut + color_4 + color_5 + color_7 + color_8 +
color_3 + color_2 + color_6 + color_9 + clarity_7 + clarity_6 +
clarity_4 + clarity_8 + clarity_9 + clarity_5 + clarity_10 +
clarity_3 + store_Goodmans + store_Chalmers + store_FredMeyer +
store_RHolland + store_Ausmans + store_University + store_Kay +
store_Zales + store_Danford + store_BlueNile + store_Riddles +
channel_Mall + channel_Internet + logcarat)
fit = glmnet(xfactors, y = df$price, alpha = 1)
plot(fit)
coef(fit)
summary(fit)
########################
#### Random Forest #####
M4 <-randomForest(logprice ~ ., data=df_train, replace=T,ntree=100)
#vars<-dimnames(imp)[[1]]
#imp<- data.frame(vars=vars, imp=as.numeric(imp[,1]))
#imp<-imp[order(imp$imp,decreasing=T),]
par(mfrow=c(1,2))
varImpPlot(M4, main="Variable Importance Plot: Base Model")
plot(M4, main="Error vs. No. of Trees Plot: Base Model")
p4<- predict(object=M4, newdata = df_test)
actual <- df_test$logprice
predicted <- p4
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
#On Training
p4<- predict(object=M4, newdata = df_train)
actual <- df_train$logprice
predicted <- p4
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
########################
######################################################
######################################################
############### Model Comparison #####################
######################################################
#coefficients(m1) # model coefficients
#confint(m1, level=0.95) # CIs for model parameters
#m1ted(m1) # predicted values
#residuals(m1) # residuals
#anova(m1) # anova table
#vcov(m1) # covariance matrix for model parameters
#influence(m1) # regression diagnostics
###################################################### | /diamonds_analysis.R | no_license | pdellagrotte/diamonds | R | false | false | 10,967 | r | ###Assignment 2###
# Author: Paul DellaGrotte
library(ggplot2) # Charts
library(pastecs) # descriptive statistics
library(rpart) # decision tree
library(rpart.plot)
library(randomForest)
library(MASS)
library(reshape2)
library(lattice)
library(leaps)
library(glmnet)
library(plyr)
######################################################
############### Load Data ############################
######################################################
data <- "filepath of csv"
# read in data from web, no header
df <- read.csv(data, header=TRUE)
df2 <- df # create data frame for dummy variables
######################################################
######################################################
############### Data Quality Check ###################
######################################################
head(df)
names(df)
stat.desc(df) # print table of descriptive stats
######################################################
######################################################
############## Data Transformations ##################
######################################################
# Create dummy variables for each categorical variable
for(level in unique(df2$color)){
df2[paste("color", level, sep = "_")] <- ifelse(df2$color == level, 1, 0)
}
for(level in unique(df2$clarity)){
df2[paste("clarity", level, sep = "_")] <- ifelse(df2$clarity == level, 1, 0)
}
for(level in unique(df2$store)){
df2[paste("store", level, sep = "_")] <- ifelse(df2$store == level, 1, 0)
}
for(level in unique(df2$channel)){
df2[paste("channel", level, sep = "_")] <- ifelse(df2$channel == level, 1, 0)
}
#Remove redundant x variables
df2$color <- NULL
df2$clarity <- NULL
df2$channel <- NULL
df2$store <- NULL
#Remove reference category (e.g. color_1, clarity_2, ect)
df2$color_1 <- NULL
df2$clarity_2 <- NULL
df2$store_Ashford <- NULL
df2$channel_Independent <-NULL
#Remove spaces from names to be read as continuous string
names(df2)[names(df2)=="store_R. Holland"] <- "store_RHolland"
names(df2)[names(df2)=="store_Fred Meyer"] <- "store_FredMeyer"
names(df2)[names(df2)=="store_Blue Nile"] <- "store_BlueNile"
#Add log transformations of price and carat
df2$logprice <- log(df2$price)
df2$logcarat<- log(df2$carat)
df2$price <- NULL
df2$carat <-NULL
# Print results of transformations
View(df2) # check to make sure df2 has all proper dummy variables
str(df2) # Show structure
######################################################
######################################################
####################### EDA ##########################
######################################################
hist(df$price)
hist(df$carat)
scale_x <- scale_x_continuous(limits = c(0, 3),
breaks = round(seq(0, max(df$carat), by = .25),2))
scale_y <- scale_y_continuous(limits = c(0, 30000),
labels = scales::dollar,
breaks = round(seq(0, max(df$price), by = 5000),2))
gcorr<- round(cor(df$carat, df$price),4) # Correlation for display
ggplot(df, aes(x=carat, y=price, color=color, shape=cut)) +
geom_point() +
scale_y + scale_x +
labs(title=paste("Correlation=",gcorr), x = "carat", y= "price") +
theme(plot.title = element_text(face="bold", size=rel(1.25)))
ggplot(df, aes(carat, price)) + geom_point() + geom_smooth() +
labs(x="carat", y="price") + scale_x + scale_y
ggplot(df, aes(log(carat), log(price))) + geom_point() + geom_smooth()+
labs(x="log(carat)", y="log(price)")
gplot1 <- ggplot(data = df, aes(color, price)) + theme(legend.position="none")
gplot1 + geom_boxplot(aes(fill = color)) + scale_y
gplot2 <- ggplot(data = df, aes(channel, price))
gplot2 + geom_boxplot(aes(fill = channel)) + scale_y + theme(legend.position="none")
gplot2 <- ggplot(data = df, aes(cut, price))
gplot2 + geom_boxplot(aes(fill = cut)) + scale_y + theme(legend.position="none")
gplot2 <- ggplot(data = df, aes(clarity, price))
gplot2 + geom_boxplot(aes(fill = clarity)) + scale_y + theme(legend.position="none")
gplot3 <- ggplot(data = df, aes(store, price))
gplot3 + geom_boxplot(aes(fill = cut)) + scale_y
gplot3 <- ggplot(data = df, aes(clarity, price))
gplot3 + geom_boxplot(aes(fill = cut))
gplot4 <- ggplot(data = df, aes(carat, price))
gplot4 + geom_point(color="red")
gplot4 + geom_point(aes(color=cut))
ggplot(df, aes(x=carat, y=price, color=clarity)) +
geom_point() + facet_grid(~ cut)
gplot5 <- ggplot(df, aes(color, fill=cut)) + geom_bar()
ggplot(df, aes(price, color=cut)) + geom_freqpoly(binwidth=1000)
# looks like ideal cut is bimodal for price & carat
ggplot(df, aes(price, fill=cut)) + geom_histogram(alpha = 0.5, binwidth =600)
ggplot(df, aes(carat, fill=cut)) + geom_histogram(binwidth =0.4)
hist(df$price, freq = F, main=" ", xlab= "Price")
curve(dnorm(x, mean=mean(df$price),sd=sd(df$price)), add = T, col="red", lwd=2)
hist(df$carat, freq = F, main=" ", xlab= "Carat")
curve(dnorm(x, mean=mean(df$carat),sd=sd(df$carat)), add=T, col="red", lwd=2)
### Decision Tree for EDA #####
M0 <- rpart(price ~ ., data=df, method="anova")
summary(M0)
rpart.plot(M0) # plot model
###############################
######################################################
######################################################
############## Split Training-Testing ################
######################################################
# 70 / 30 Split per assignment instructions
set.seed(1200) # set the seed so randomness is reproducable
g <- runif(nrow(df2)) # set a bunch of random numbers as rows
df_random <- df2[order(g),] # reorder the data set
train_size <- floor(.70 * nrow(df2)) # Select % of data set to use for training
test_size <- nrow(df2) - train_size # use remainder of data set for testing
df_train <- df_random[1:train_size,]
df_test <- df_random[(train_size+1):nrow(df2),]
######################################################
######################################################
####################### Models ########################
######################################################
# Functions to compute R-Squared and RMSE
rsq <- function(y,f) {1 - sum((y-f)^2)/sum((y-mean(y))^2) }
rmse <- function(y, f) {sqrt(mean((y-f)^2)) }
### Decision Tree #####
M0 <- rpart(logprice ~ ., data=df_train, method="anova")
p0 <- predict(M0, newdata=df_test) #set type = to class to get correct output
plot(df_test$logprice, p0)
actual <- df_test$logprice
predicted <- p0
rsq(actual,predicted)
rmse(actual,predicted)
# On Training
p0 <- predict(M0, newdata=df_train) #set type = to class to get correct output
actual <- df_train$logprice
predicted <- p0
rsq(actual,predicted)
rmse(actual,predicted)
########################
#### Single Variable ###
M1<- lm(logprice ~ logcarat, data=df_train)
p1 <- predict(M1, newdata=df_test)
actual <- df_test$logprice
predicted <- p1
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
# On Training
p1 <- predict(M1, newdata=df_train)
actual <- df_train$logprice
predicted <- p1
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
summary(M1)$r.squared
########################
########################
## Variable Selection ##
M2 <- lm(logprice~ ., data = df_train)
step_b <- step(M2, direction = "backward")
step_f <- step(M2, direction = "forward")
step_s <- step(M2, direction = "both")
listRsqu <- list()
c(listRsqu, a=summary(step_b)$r.squared, b=summary(step_f)$r.squared, c=summary(step_s)$r.squared)
listRsqu # best is forward selection
p4 <- predict(step_f, newdata=df_test)
actual <- df_test$logprice
predicted <- p4
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
# On Training
p4 <- predict(step_f, newdata=df_train)
actual <- df_train$logprice
predicted <- p4
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
########################
## Model w/ Interaction ##
M3 <- lm(logprice~ logcarat+cut*channel_Internet, data = df_train)
summary(M3)$r.squared
M3 <-lm(formula = logprice ~ cut + color_4 + color_5 + color_7 + color_8 +
color_3 + color_2 + color_6 + color_9 + clarity_7 + clarity_6 +
clarity_4 + clarity_8 + clarity_9 + clarity_5 + clarity_10 +
clarity_3 + store_Goodmans + store_Chalmers + store_FredMeyer +
store_RHolland + store_Ausmans + store_University + store_Kay +
store_Zales + store_Danford + store_BlueNile + store_Riddles +
channel_Mall + channel_Internet + logcarat + channel_Internet*cut, data = df_train)
p4 <- predict(M3, newdata=df_test)
actual <- df_test$logprice
predicted <- p4
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
# On Training
p4 <- predict(M3, newdata=df_train)
actual <- df_train$logprice
predicted <- p4
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
########################
####### LASSO ##########
xfactors <- model.matrix(df$price ~ df$carat +
df$color + df$clarity +
df$cut + df$channel + df$store)
xfactors <-model.matrix(data = df2,logprice ~ cut + color_4 + color_5 + color_7 + color_8 +
color_3 + color_2 + color_6 + color_9 + clarity_7 + clarity_6 +
clarity_4 + clarity_8 + clarity_9 + clarity_5 + clarity_10 +
clarity_3 + store_Goodmans + store_Chalmers + store_FredMeyer +
store_RHolland + store_Ausmans + store_University + store_Kay +
store_Zales + store_Danford + store_BlueNile + store_Riddles +
channel_Mall + channel_Internet + logcarat)
fit = glmnet(xfactors, y = df$price, alpha = 1)
plot(fit)
coef(fit)
summary(fit)
########################
#### Random Forest #####
M4 <-randomForest(logprice ~ ., data=df_train, replace=T,ntree=100)
#vars<-dimnames(imp)[[1]]
#imp<- data.frame(vars=vars, imp=as.numeric(imp[,1]))
#imp<-imp[order(imp$imp,decreasing=T),]
par(mfrow=c(1,2))
varImpPlot(M4, main="Variable Importance Plot: Base Model")
plot(M4, main="Error vs. No. of Trees Plot: Base Model")
p4<- predict(object=M4, newdata = df_test)
actual <- df_test$logprice
predicted <- p4
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
#On Training
p4<- predict(object=M4, newdata = df_train)
actual <- df_train$logprice
predicted <- p4
error <- actual - predicted
rsq(actual,predicted)
rmse(actual,predicted)
########################
######################################################
######################################################
############### Model Comparison #####################
######################################################
#coefficients(m1) # model coefficients
#confint(m1, level=0.95) # CIs for model parameters
#m1ted(m1) # predicted values
#residuals(m1) # residuals
#anova(m1) # anova table
#vcov(m1) # covariance matrix for model parameters
#influence(m1) # regression diagnostics
###################################################### |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/02_Romics_Base_Functions.R
\name{romicsUpdateSteps}
\alias{romicsUpdateSteps}
\title{stepUpdater()}
\usage{
romicsUpdateSteps(romics_object, arguments)
}
\arguments{
\item{romics_object}{A romics_object created using romicsCreateObject()}
\item{arguments}{the arguments of a function are required to read the user input of a function, this user input will be used to generate the steps, the arguments are obtained by running the following code <arguments<-as.list(match.call())> in the first line of a function}
}
\value{
This function add the description of the processing to the step layer of an Romics object
}
\description{
Updates the steps of the romics_object, require to have recorded the argument in earlier steps of the function
}
\details{
The goal of Romics processor is to provide a trackable and reproducible pipeline for processing omics data. Subsequently it is necessary when a function is created to implement a way to record the user input that will be recorded in the steps layer of the Romics_object.
This function will enable to simplify the work of developers who want to contribute to Romics by simplifying this process. Only two lines of codes are then necessary to update the steps.
The first line of code has to be placed in the first line after the function declaration : <arguments<-as.list(match.call())>
The second line of code has to be <romics_object<-stepUpdater(romics_object,arguments)> placed at the end of the function code (ideally right before returning the processed romics_object or graphic generated by the function)
}
\author{
Geremy Clair
}
| /man/romicsUpdateSteps.Rd | permissive | asalt/RomicsProcessor | R | false | true | 1,668 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/02_Romics_Base_Functions.R
\name{romicsUpdateSteps}
\alias{romicsUpdateSteps}
\title{stepUpdater()}
\usage{
romicsUpdateSteps(romics_object, arguments)
}
\arguments{
\item{romics_object}{A romics_object created using romicsCreateObject()}
\item{arguments}{the arguments of a function are required to read the user input of a function, this user input will be used to generate the steps, the arguments are obtained by running the following code <arguments<-as.list(match.call())> in the first line of a function}
}
\value{
This function add the description of the processing to the step layer of an Romics object
}
\description{
Updates the steps of the romics_object, require to have recorded the argument in earlier steps of the function
}
\details{
The goal of Romics processor is to provide a trackable and reproducible pipeline for processing omics data. Subsequently it is necessary when a function is created to implement a way to record the user input that will be recorded in the steps layer of the Romics_object.
This function will enable to simplify the work of developers who want to contribute to Romics by simplifying this process. Only two lines of codes are then necessary to update the steps.
The first line of code has to be placed in the first line after the function declaration : <arguments<-as.list(match.call())>
The second line of code has to be <romics_object<-stepUpdater(romics_object,arguments)> placed at the end of the function code (ideally right before returning the processed romics_object or graphic generated by the function)
}
\author{
Geremy Clair
}
|
filename <- "./household_power_consumption.txt"
##Opens the file and sets the class of each column
data <- read.table(filename,header = TRUE,sep = ";",colClasses = c("character", "character", rep("numeric",7)),na = "?")
##Subsets the data according to the two days we are looking to plot
data <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007",]
##Creates a new column that combines the Date and Time columns into one
data$DateTime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
##Creates the plot
plot(data$DateTime, data$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="")
##Saves the plot in a png file
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off() | /plot2.R | no_license | Bissingb/ExData_Plotting1 | R | false | false | 725 | r | filename <- "./household_power_consumption.txt"
##Opens the file and sets the class of each column
data <- read.table(filename,header = TRUE,sep = ";",colClasses = c("character", "character", rep("numeric",7)),na = "?")
##Subsets the data according to the two days we are looking to plot
data <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007",]
##Creates a new column that combines the Date and Time columns into one
data$DateTime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
##Creates the plot
plot(data$DateTime, data$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="")
##Saves the plot in a png file
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off() |
#' Get all templates imported on the server
#'
#' \code{get_qx} returns a data frame with all the questionnaires that are currently
#' imported on the server
#'
#' @param server Prefix for the survey server. It is whatever comes before
#' mysurvey.solutions: [prefix].mysurvey.solutions.
#' @param user Username for the API user on the server.
#' @param password Password for the API user on the server.
#'
#' @importFrom rlang .data
#' @export
#'
#' @return A data frame with information about the
#' imported questionnaires on the server.
#' @examples
#' \dontrun{
#' get_qx(server = "lfs2018", user = "APIuser2018", password = "SafePassword123")
#' }
get_qx <- function(server=NULL, user=NULL, password=NULL) {
#== CHECK PARAMETERS
# NOTE: Look at utils.R file for code for checks
# check that server, user, password are non-missing and strings
check_server_params(server)
check_server_params(user)
check_server_params(password)
# check internet connection
check_internet()
# trim and lower server prefix
server <- tolower(trimws(server))
# check server exists
server_url <- paste0("https://", server, ".mysurvey.solutions")
# Check server exists
check_server(server_url)
# build base URL for API
api_url <- paste0(server_url, "/api/v1")
# build query
endpoint <- paste0(api_url, "/questionnaires")
# Send GET request to API
data <- httr::GET(endpoint, httr::authenticate(user, password),
query = list(limit = 40, offset = 1))
# If response code is 200, request was succesffuly processed
if (httr::status_code(data)==200) {
# save the list of imported templates from the API as a data frame
qnrList <- jsonlite::fromJSON(httr::content(data, as = "text"), flatten = TRUE)
qnrList_temp <- as.data.frame(qnrList$Questionnaires)
if (qnrList$TotalCount <= 40) {
# if 40 questionnaires or less, then do not need to call again
# Extract information about questionnaires on server
qnrList_all <- dplyr::arrange(qnrList_temp, .data$Title, .data$Version)
} else {
quest_more <- list(qnrList_temp)
# If more than 40 questionnaires, run query again to get the rest
nquery <- ceiling(qnrList$TotalCount/40)
# send query for more questionnaires
for(i in 2:nquery){
data2 <- httr::GET(endpoint, httr::authenticate(user, password),
query = list(limit = 40, offset = i))
qnrList_more <- jsonlite::fromJSON(httr::content(data2, as = "text"),
flatten = TRUE)
questList_more <- as.data.frame(qnrList_more$Questionnaires)
# append loop df to list
quest_more[[i]] <- questList_more
}
qnrList_temp <- dplyr::bind_rows(quest_more)
qnrList_all <- dplyr::arrange(qnrList_temp, .data$Title, .data$Version)
}
# return data frame of questionnaire
return(qnrList_all)
} else if (httr::status_code(data) == 401) { # login error
stop("Incorrect username or password.")
} else { # Issue error message
stop("Encountered issue with status code ", httr::status_code(data))
}
}
| /R/get_qx.R | permissive | l2nguyen/susoapir | R | false | false | 3,142 | r | #' Get all templates imported on the server
#'
#' \code{get_qx} returns a data frame with all the questionnaires that are currently
#' imported on the server
#'
#' @param server Prefix for the survey server. It is whatever comes before
#' mysurvey.solutions: [prefix].mysurvey.solutions.
#' @param user Username for the API user on the server.
#' @param password Password for the API user on the server.
#'
#' @importFrom rlang .data
#' @export
#'
#' @return A data frame with information about the
#' imported questionnaires on the server.
#' @examples
#' \dontrun{
#' get_qx(server = "lfs2018", user = "APIuser2018", password = "SafePassword123")
#' }
get_qx <- function(server=NULL, user=NULL, password=NULL) {
#== CHECK PARAMETERS
# NOTE: Look at utils.R file for code for checks
# check that server, user, password are non-missing and strings
check_server_params(server)
check_server_params(user)
check_server_params(password)
# check internet connection
check_internet()
# trim and lower server prefix
server <- tolower(trimws(server))
# check server exists
server_url <- paste0("https://", server, ".mysurvey.solutions")
# Check server exists
check_server(server_url)
# build base URL for API
api_url <- paste0(server_url, "/api/v1")
# build query
endpoint <- paste0(api_url, "/questionnaires")
# Send GET request to API
data <- httr::GET(endpoint, httr::authenticate(user, password),
query = list(limit = 40, offset = 1))
# If response code is 200, request was succesffuly processed
if (httr::status_code(data)==200) {
# save the list of imported templates from the API as a data frame
qnrList <- jsonlite::fromJSON(httr::content(data, as = "text"), flatten = TRUE)
qnrList_temp <- as.data.frame(qnrList$Questionnaires)
if (qnrList$TotalCount <= 40) {
# if 40 questionnaires or less, then do not need to call again
# Extract information about questionnaires on server
qnrList_all <- dplyr::arrange(qnrList_temp, .data$Title, .data$Version)
} else {
quest_more <- list(qnrList_temp)
# If more than 40 questionnaires, run query again to get the rest
nquery <- ceiling(qnrList$TotalCount/40)
# send query for more questionnaires
for(i in 2:nquery){
data2 <- httr::GET(endpoint, httr::authenticate(user, password),
query = list(limit = 40, offset = i))
qnrList_more <- jsonlite::fromJSON(httr::content(data2, as = "text"),
flatten = TRUE)
questList_more <- as.data.frame(qnrList_more$Questionnaires)
# append loop df to list
quest_more[[i]] <- questList_more
}
qnrList_temp <- dplyr::bind_rows(quest_more)
qnrList_all <- dplyr::arrange(qnrList_temp, .data$Title, .data$Version)
}
# return data frame of questionnaire
return(qnrList_all)
} else if (httr::status_code(data) == 401) { # login error
stop("Incorrect username or password.")
} else { # Issue error message
stop("Encountered issue with status code ", httr::status_code(data))
}
}
|
context("Check xrefs")
test_that("Check case-sensitivity", {
expect_error(check_xrefs("./check-xrefs/case-sensitive.tex", permitted.case = "upper"))
expect_error(check_xrefs("./check-xrefs/case-sensitive-C.tex", permitted.case = "upper"))
expect_error(check_xrefs("./check-xrefs/case-sensitive-lower.tex", permitted.case = "lower"))
expect_error(check_xrefs("./check-xrefs/case-sensitive-lower-C.tex", permitted.case = "lower"))
expect_null(check_xrefs("./check-xrefs/case-sensitive-C.tex", permitted.case = "lower"))
expect_null(check_xrefs("./check-xrefs/case-sensitive-C.tex"))
expect_error(check_xrefs("./check-xrefs/case-sensitive-both.tex"))
expect_null(check_xrefs("./check-xrefs/case-sensitive-both.tex", permitted.case = NULL))
})
test_that("Literal xrefs are detected", {
expect_null(check_literal_xrefs("./check-xrefs/no-literals-xrefs.tex"))
expect_error(check_literal_xrefs("./check-xrefs/literals-xrefs.tex"),
regexp = "Hard-coded xref")
})
| /tests/testthat/test_check_xrefs.R | no_license | HughParsonage/TeXCheckR | R | false | false | 996 | r | context("Check xrefs")
test_that("Check case-sensitivity", {
expect_error(check_xrefs("./check-xrefs/case-sensitive.tex", permitted.case = "upper"))
expect_error(check_xrefs("./check-xrefs/case-sensitive-C.tex", permitted.case = "upper"))
expect_error(check_xrefs("./check-xrefs/case-sensitive-lower.tex", permitted.case = "lower"))
expect_error(check_xrefs("./check-xrefs/case-sensitive-lower-C.tex", permitted.case = "lower"))
expect_null(check_xrefs("./check-xrefs/case-sensitive-C.tex", permitted.case = "lower"))
expect_null(check_xrefs("./check-xrefs/case-sensitive-C.tex"))
expect_error(check_xrefs("./check-xrefs/case-sensitive-both.tex"))
expect_null(check_xrefs("./check-xrefs/case-sensitive-both.tex", permitted.case = NULL))
})
test_that("Literal xrefs are detected", {
expect_null(check_literal_xrefs("./check-xrefs/no-literals-xrefs.tex"))
expect_error(check_literal_xrefs("./check-xrefs/literals-xrefs.tex"),
regexp = "Hard-coded xref")
})
|
library(tidyverse)
library(economiccomplexity)
data_yrpc <- dir("data/country/", full.names = TRUE) %>%
map_df(readRDS)
# data_yrpc <- data_yrpc %>%
# mutate_if(is.numeric, replace_na, 0)
data_yrpc %>%
filter(is.na(community_name))
data_yrpc <- data_yrpc %>%
filter(!is.na(community_name))
data_yrpc %>%
filter(partner_iso == "all")
data_yrpc %>%
filter(export_value_usd + import_value_usd == 0)
data_yr <- data_yrpc %>%
distinct(year) %>%
pull() %>%
map_df(function(y = 2000){
message(y)
# AUXLIAR DATA
dux <- data_yrpc %>%
filter(year == y) %>%
group_by(year, reporter_iso, community_name) %>%
summarise(
export_value_usd = sum(export_value_usd, na.rm = TRUE),
import_value_usd = sum(import_value_usd, na.rm = TRUE)
) %>%
filter(TRUE)
# COMPLEXITY
rca <- balassa_index(dux, country = "reporter_iso", product = "community_name", value = "export_value_usd", discrete = TRUE)
com <- complexity_measures(rca)
dcx <- tibble(
reporter_iso = names(com$complexity_index_country),
complexity_index_country = com$complexity_index_country
)
dcx <- dcx %>%
mutate(
complexity_index_country = (complexity_index_country - mean(complexity_index_country))/sd(complexity_index_country)
)
# DIVERSITY
# x <- c(100, 100, 100, 100)
# x <- c(100, 100, 100, 90000)
# 1 - sum((x/sum(x))^2
div <- dux %>%
group_by(reporter_iso) %>%
summarise_at(
vars(export_value_usd, import_value_usd),
.funs = list(diversity = function(x) 1 - sum((x/sum(x))^2))
)
# JOIN
dout <- dux %>%
group_by(year, reporter_iso) %>%
summarise_at(vars(export_value_usd, import_value_usd), sum) %>%
ungroup()
dout <- dout %>%
left_join(dcx, by = "reporter_iso") %>%
mutate_if(is.numeric, replace_na, 0)
dout <- dout %>%
left_join(div, by = "reporter_iso")
dout
})
# data_yrpc %>%
# group_split(reporter_iso)
#
# data_yr %>%
# group_split(reporter_iso)
saveRDS(data_yrpc, "data/yrpc.rds", compress = "xz")
saveRDS(data_yr, "data/yr.rds", compress = "xz")
| /R/01-process-data.R | no_license | yuster0/trd-sttstcs | R | false | false | 2,214 | r | library(tidyverse)
library(economiccomplexity)
data_yrpc <- dir("data/country/", full.names = TRUE) %>%
map_df(readRDS)
# data_yrpc <- data_yrpc %>%
# mutate_if(is.numeric, replace_na, 0)
data_yrpc %>%
filter(is.na(community_name))
data_yrpc <- data_yrpc %>%
filter(!is.na(community_name))
data_yrpc %>%
filter(partner_iso == "all")
data_yrpc %>%
filter(export_value_usd + import_value_usd == 0)
data_yr <- data_yrpc %>%
distinct(year) %>%
pull() %>%
map_df(function(y = 2000){
message(y)
# AUXLIAR DATA
dux <- data_yrpc %>%
filter(year == y) %>%
group_by(year, reporter_iso, community_name) %>%
summarise(
export_value_usd = sum(export_value_usd, na.rm = TRUE),
import_value_usd = sum(import_value_usd, na.rm = TRUE)
) %>%
filter(TRUE)
# COMPLEXITY
rca <- balassa_index(dux, country = "reporter_iso", product = "community_name", value = "export_value_usd", discrete = TRUE)
com <- complexity_measures(rca)
dcx <- tibble(
reporter_iso = names(com$complexity_index_country),
complexity_index_country = com$complexity_index_country
)
dcx <- dcx %>%
mutate(
complexity_index_country = (complexity_index_country - mean(complexity_index_country))/sd(complexity_index_country)
)
# DIVERSITY
# x <- c(100, 100, 100, 100)
# x <- c(100, 100, 100, 90000)
# 1 - sum((x/sum(x))^2
div <- dux %>%
group_by(reporter_iso) %>%
summarise_at(
vars(export_value_usd, import_value_usd),
.funs = list(diversity = function(x) 1 - sum((x/sum(x))^2))
)
# JOIN
dout <- dux %>%
group_by(year, reporter_iso) %>%
summarise_at(vars(export_value_usd, import_value_usd), sum) %>%
ungroup()
dout <- dout %>%
left_join(dcx, by = "reporter_iso") %>%
mutate_if(is.numeric, replace_na, 0)
dout <- dout %>%
left_join(div, by = "reporter_iso")
dout
})
# data_yrpc %>%
# group_split(reporter_iso)
#
# data_yr %>%
# group_split(reporter_iso)
saveRDS(data_yrpc, "data/yrpc.rds", compress = "xz")
saveRDS(data_yr, "data/yr.rds", compress = "xz")
|
library(tidyverse)
library(synapser)
library(random)
synLogin()
dr_zip <- synGet("syn21036458")$path
dr_files <- unzip(dr_zip, list = T)$Name
dr_files <- dr_files[grep("dose-responses/[A-Za-z0-9-]+.csv",dr_files)]
dr_paths <- unzip(dr_zip, files = dr_files)
dr <- purrr::map(dr_paths, readr::read_csv)
cmpd_ids <- synGet("syn21197825")$path %>% read_csv
id_map <- cmpd_ids %>%
add_row(cmpd_id = "cmpd_dmso", cmpd = "DMSO") %>%
add_row(cmpd_id = "cmpd_untreated", cmpd = "UNTREATED")
id_map_vec <- id_map$cmpd_id
names(id_map_vec) <- id_map$cmpd
dr_anon <- lapply(dr, function(x){
x <- x %>%
rename(dose_log10_uM = X1)
colnames(x) <- stringr::str_replace_all(colnames(x), id_map_vec)
x
})
paths <- sapply(dr_paths, function(x){
path<-str_extract(x, "[A-Za-z0-9-]+.csv") %>% print()
write_csv(dr_anon[[which(dr_paths == x)]], path)
path
})
zipped_path<-zip('dose_response_concealed.zip',paths)
synStore(File("dose_response_concealed.zip", parentId = "syn21036376"))
| /infra/R/data_gen/rename_dose_response_data.R | permissive | Sage-Bionetworks-Challenges/CTD2-Panacea-Challenge | R | false | false | 1,022 | r | library(tidyverse)
library(synapser)
library(random)
synLogin()
dr_zip <- synGet("syn21036458")$path
dr_files <- unzip(dr_zip, list = T)$Name
dr_files <- dr_files[grep("dose-responses/[A-Za-z0-9-]+.csv",dr_files)]
dr_paths <- unzip(dr_zip, files = dr_files)
dr <- purrr::map(dr_paths, readr::read_csv)
cmpd_ids <- synGet("syn21197825")$path %>% read_csv
id_map <- cmpd_ids %>%
add_row(cmpd_id = "cmpd_dmso", cmpd = "DMSO") %>%
add_row(cmpd_id = "cmpd_untreated", cmpd = "UNTREATED")
id_map_vec <- id_map$cmpd_id
names(id_map_vec) <- id_map$cmpd
dr_anon <- lapply(dr, function(x){
x <- x %>%
rename(dose_log10_uM = X1)
colnames(x) <- stringr::str_replace_all(colnames(x), id_map_vec)
x
})
paths <- sapply(dr_paths, function(x){
path<-str_extract(x, "[A-Za-z0-9-]+.csv") %>% print()
write_csv(dr_anon[[which(dr_paths == x)]], path)
path
})
zipped_path<-zip('dose_response_concealed.zip',paths)
synStore(File("dose_response_concealed.zip", parentId = "syn21036376"))
|
\name{NEWS}
\title{News for Package 'Rcpp'}
\newcommand{\cpkg}{\href{http://CRAN.R-project.org/package=#1}{\pkg{#1}}}
\section{Changes in [unreleased] Rcpp version 0.10.7 (2013-11-30)}{
\itemize{
\item Changes in Rcpp API:
\itemize{
\item New class \code{StretchyList} for pair lists with fast addition of
elements at the front and back. This abstracts the 3 functions
\code{NewList}, \code{GrowList} and \code{Insert} used in various
packages and in parsers in R.
\item The function \code{dnt}, \code{pnt}, \code{qnt} sugar
functions were incorrectly expanding to the no-degree-of-freedoms
variant.
\item Unit tests for \code{pnt} were added.
\item The sugar table function did not handle NAs and NaNs properly
for numeric vectors. Fixed and tests added.
\item The internal coercion mechanism mapping numerics to strings has
been updated to better match \R (specifically with \code{Inf}, \code{-Inf},
and \code{NaN}.)
\item Applied two bug fixes to Vector \code{sort()} and \code{RObject}
definition spotted and correct by Kevin Ushey
}
\item Changes in Rcpp documentation:
\itemize{
\item The Rcpp-FAQ vignette have been updated and expanded.
}
}
}
\section{Changes in Rcpp version 0.10.6 (2013-10-27)}{
\itemize{
\item Changes in Rcpp API:
\itemize{
\item The function \code{exposeClass} takes a description of the
constructors, fields and methods to be exposed from a C++
class, and writes C++ and R files in the package. Inherited
classes can be dealt with, but require data type information.
This approach avoids hand-coding module files.
\item Two missing \code{is<>()} templates for
\code{CharacterVector} and \code{CharacterMatrix} have been added,
and some tests for \code{is_na()} and \code{is_finite()} have been
corrected thanks to Thomas Tse.
}
\item Changes in R code:
\itemize{
\item Export linking helper function \code{LdFlags} as well as
\code{RcppLdFlags}.
\item Function \code{Rcpp.package.skeleton()} no longer passes a
\code{namespace} argument on to \code{package.skeleton()}
}
\item Changes in R setup:
\itemize{
\item Raise requirement for R itself to be version 3.0.0 or later
as needed by the vignette processing
}
\item Changes in Rcpp attributes:
\itemize{
\item \code{sourceCpp} now correctly binds to Rtools 3.0 and 3.1
}
}
}
\section{Changes in Rcpp version 0.10.5 (2013-09-28)}{
\itemize{
\item Changes in R code:
\itemize{
\item New R function \code{demangle} that calls the \code{DEMANGLE} macro.
\item New R function \code{sizeof} to query the byte size of a type. This
returns an object of S3 class \code{bytes} that has a \code{print} method
showing bytes and bits.
}
\item Changes in Rcpp API:
\itemize{
\item Add \code{defined(__sun)} to lists of operating systems to
test for when checking for lack of \code{backtrace()} needed for
stack traces.
\item \code{as<T*>}, \code{as<const T*>}, \code{as<T&>} and
\code{as<const T&>} are now supported, when
T is a class exposed by modules, i.e. with \code{RCPP_EXPOSED_CLASS}
\item \code{DoubleVector} as been added as an alias to
\code{NumericVector}
\item New template function \code{is<T>} to identify if an R object
can be seen as a \code{T}. For example \code{is<DataFrame>(x)}.
This is a building block for more expressive dispatch in various places
(modules and attributes functions).
\item \code{wrap} can now handle more types, i.e. types that iterate over
\code{std::pair<const KEY, VALUE>} where KEY can be converted to a
\code{String} and \code{VALUE} is either a primitive type (int, double)
or a type that wraps. Examples :
\itemize{
\item \code{std::map<int, double>} : we can make a String from an int,
and double is primitive
\item \code{boost::unordered_map<double, std::vector<double> >}: we can make
a String from a double and \code{std::vector<double>} can wrap itself
}
Other examples of this are included at the end of the \code{wrap} unit test
file (\code{runit.wrap.R} and \code{wrap.cpp}).
\item \code{wrap} now handles containers of classes handled by modules. e.g.
if you expose a class \code{Foo} via modules, then you can wrap
\code{vector<Foo>}, ... An example is included in the \code{wrap} unit test
file
\item \code{RcppLdFlags()}, often used in \code{Makevars} files of
packages using \pkg{Rcpp}, is now exported from the package namespace.
}
\item Changes in Attributes:
\itemize{
\item Objects exported by a module (i.e. by a \code{RCPP_MODULE} call
in a file that is processed by \code{sourceCpp}) are now directly
available in the environment. We used to make the module object
available, which was less useful.
\item A plugin for \code{openmp} has been added to support use of OpenMP.
\item \code{Rcpp::export} now takes advantage of the more flexible
\code{as<>}, handling constness and referenceness of the input types.
For users, it means that for the parameters of function exported by modules,
we can now use references, pointers and const versions of them.
The file \code{Module.cpp} file has an example.
\item{No longer call non-exported functions from the tools package}
\item{No longer search the inline package as a fallback when loading
plugins for the the \code{Rcpp::plugins} attribute}.
}
\item Changes in Modules:
\itemize{
\item We can now expose functions and methods that take
\code{T&} or \code{const T&} as arguments. In these situations
objects are no longer copied as they used to be.
}
\item Changes in sugar:
\itemize{
\item \code{is_na} supports classes \code{DatetimeVector} and
\code{DateVector}
}
\item Changes in Rcpp documentation:
\itemize{
\item The vignettes have been moved from \code{inst/doc/} to the
\code{vignettes} directory which is now preferred.
\item The appearance of the vignettes has been refreshed by
switching to the Bistream Charter font, and microtype package.
}
\item Deprecation of \code{RCPP_FUNCTION_*}:
\itemize{
\item The macros from the \code{preprocessor_generated.h} file
have been deprecated. They are still available, but they print a
message in addition to their expected behavior.
\item The macros will be permanently removed in the first \pkg{Rcpp}
release after July 2014.
\item Users of these macros should start replacing them with more
up-to-date code, such as using 'Rcpp attributes' or 'Rcpp modules'.
}
}
}
\section{Changes in Rcpp version 0.10.4 (2013-06-23)}{
\itemize{
\item Changes in R code: None beyond those detailed for Rcpp Attributes
\item Changes in Rcpp attributes:
\itemize{
\item Fixed problem whereby the interaction between the gc and the
RNGScope destructor could cause a crash.
\item Don't include package header file in generated C++ interface
header files.
\item Lookup plugins in \pkg{inline} package if they aren't found
within the \pkg{Rcpp} package.
\item Disallow compilation for files that don't have extensions
supported by R CMD SHLIB
}
\item Changes in Rcpp API:
\itemize{
\item The \code{DataFrame::create} set of functions has been reworked
to just use \code{List::create} and feed to the \code{DataFrame}
constructor
\item The \code{operator-()} semantics for \code{Date} and
\code{Datetime} are now more inline with standard C++ behaviour;
with thanks to Robin Girard for the report.
\item RNGScope counter now uses unsigned long rather than int.
\item \code{Vector<*>::erase(iterator, iterator)} was fixed. Now
it does not remove the element pointed by last (similar to what is
done on stl types and what was intended initially). Reported on
Rcpp-devel by Toni Giorgino.
\item Added equality operator between elements of
\code{CharacterVector}s.
}
\item Changes in Rcpp sugar:
\itemize{
\item New function \code{na_omit} based on the StackOverflow thread
\url{http://stackoverflow.com/questions/15953768/}
\item New function \code{is_finite} and \code{is_infinite} that
reproduces the behavior of R's \code{is.finite} and
\code{is.infinite} functions
}
\item Changes in Rcpp build tools:
\itemize{
\item Fix by Martyn Plummer for Solaris in handling of
\code{SingleLogicalResult}.
\item The \code{src/Makevars} file can now optionally override the
path for \code{/usr/bin/install_name_tool} which is used on OS X.
\item Vignettes are trying harder not to be built in parallel.
}
\item Changes in Rcpp documentation:
\itemize{
\item Updated the bibliography in \code{Rcpp.bib} (which is also
sourced by packages using Rcpp).
\item Updated the \code{THANKS} file.
}
\item Planned Deprecation of \code{RCPP_FUNCTION_*}:
\itemize{
\item The set of macros \code{RCPP_FUNCTION_} etc ... from the
\code{preprocessor_generated.h} file will be deprecated in the next version
of \pkg{Rcpp}, i.e they will still be available but will generate some
warning in addition to their expected behavior.
\item In the first release that is at least 12 months after this announcement, the
macros will be removed from \pkg{Rcpp}.
\item Users of these macros (if there are any) should start replacing them
with more up to date code, such as using Rcpp attributes or Rcpp
modules.
}
}
}
\section{Changes in Rcpp version 0.10.3 (2013-03-23)}{
\itemize{
\item Changes in R code:
\itemize{
\item Prevent build failures on Windowsn when Rcpp is installed
in a library path with spaces (transform paths in the same manner
that R does before passing them to the build system).
}
\item Changes in Rcpp attributes:
\itemize{
\item Rcpp modules can now be used with \code{sourceCpp}
\item Standalone roxygen chunks (e.g. to document a class) are now
transposed into RcppExports.R
\item Added \code{Rcpp::plugins} attribute for binding
directly to inline plugins. Plugins can be registered using
the new \code{registerPlugin} function.
\item Added built-in \code{cpp11} plugin for specifying
the use of C++11 in a translation unit
\item Merge existing values of build related environment
variables for sourceCpp
\item Add global package include file to RcppExports.cpp
if it exists
\item Stop with an error if the file name passed to
\code{sourceCpp} has spaces in it
\item Return invisibly from void functions
\item Ensure that line comments invalidate block comments when
parsing for attributes
\item Eliminated spurious empty hello world function definition
in Rcpp.package.skeleton
}
\item Changes in Rcpp API:
\itemize{
\item The very central use of R API R_PreserveObject and
R_ReleaseObject has been replaced by a new system based on the
functions Rcpp_PreserveObject, Rcpp_ReleaseObject and Rcpp_ReplaceObject
which shows better performance and is implemented using a generic vector
treated as a stack instead of a pairlist in the R
implementation. However, as this preserve / release code is still
a little rough at the edges, a new #define is used (in config.h)
to disable it for now.
\item Platform-dependent code in Timer.cpp now recognises a few
more BSD variants thanks to contributed defined() test suggestions
\item Support for wide character strings has been added throughout the
API. In particular String, CharacterVector, wrap and as are aware of
wide character strings
}
}
}
\section{Changes in Rcpp version 0.10.2 (2012-12-21)}{
\itemize{
\item Changes in Rcpp API:
\itemize{
\item Source and header files were reorganized and consolidated so
that compile time are now significantly lower
\item Added additional check in \code{Rstreambuf} deletetion
\item Added support for \code{clang++} when using \code{libc++},
and for anc \code{icpc} in \code{std=c++11} mode, thanks to a
patch by Yan Zhou
\item New class \code{Rcpp::String} to facilitate working with a single
element of a character vector
\item New utility class sugar::IndexHash inspired from Simon
Urbanek's fastmatch package
\item Implementation of the equality operator between two Rcomplex
\item \code{RNGScope} now has an internal counter that enables it
to be safely used multiple times in the same stack frame.
\item New class \code{Rcpp::Timer} for benchmarking
}
\item Changes in Rcpp sugar:
\itemize{
\item More efficient version of \code{match} based on \code{IndexHash}
\item More efficient version of \code{unique} base on \code{IndexHash}
\item More efficient version of \code{in} base on \code{IndexHash}
\item More efficient version of \code{duplicated} base on \code{IndexHash}
\item More efficient version of \code{self_match} base on \code{IndexHash}
\item New function \code{collapse} that implements paste(., collapse= "" )
}
\item Changes in Rcpp attributes:
\itemize{
\item Use code generation rather than modules to implement
\code{sourceCpp} and \code{compileAttributes} (eliminates
problem with exceptions not being able to cross shared library
boundaries on Windows)
\item Exported functions now automatically establish an \code{RNGScope}
\item Functions exported by \code{sourceCpp} now directly
reference the external function pointer rather than rely on
dynlib lookup
\item On Windows, Rtools is automatically added to the PATH
during \code{sourceCpp} compilations
\item Diagnostics are printed to the console if \code{sourceCpp}
fails and C++ development tools are not installed
\item A warning is printed if when \code{compileAttributes} detects
\code{Rcpp::depends} attributes in source files that are not
matched by Depends/LinkingTo entries in the package DESCRIPTION
}
}
}
\section{Changes in Rcpp version 0.10.1 (2012-11-26)}{
\itemize{
\item Changes in Rcpp sugar:
\itemize{
\item New functions: \code{setdiff}, \code{union_}, \code{intersect}
\code{setequal}, \code{in}, \code{min}, \code{max}, \code{range},
\code{match}, \code{table}, \code{duplicated}
\item New function: \code{clamp} which combines pmin and pmax, e.g.
clamp( a, x, b) is the same as pmax( b, pmin(x, a) )
\item New function: \code{self_match} which implements something
similar to \code{match( x, unique( x ) )}
}
\item Changes in Rcpp API:
\itemize{
\item The \code{Vector} template class (hence \code{NumericVector}
...) get the \code{is_na} and the \code{get_na} static methods.
\item New helper class \code{no_init} that can be used to
create a vector without initializing its data, e.g. :
\code{ IntegerVector out = no_init(n) ; }
\item New exception constructor requiring only a message; \code{stop}
function to throw an exception
\item \code{DataFrame} gains a \code{nrows} method
}
\item Changes in Rcpp attributes:
\itemize{
\item Ability to embed R code chunks (via specially formatted
block comments) in C++ source files.
\item Allow specification of argument defaults for exported functions.
\item New scheme for more flexible mixing of generated and user composed
C++ headers.
\item Print warning if no export attributes are found in source file.
\item Updated vignette with additional documentation on exposing
C++ interfaces from packages and signaling errors.
}
\item Changes in Rcpp modules:
\itemize{
\item Enclose .External invocations in \code{BEGIN_RCPP}/\code{END_RCPP}
}
\item Changes in R code :
\itemize{
\item New function \code{areMacrosDefined}
\item Additions to \code{Rcpp.package.skeleton}:
\itemize{
\item \code{attributes} parameter to generate a version of
\code{rcpp_hello_world} that uses \code{Rcpp::export}.
\item \code{cpp_files} parameter to provide a list of C++
files to include the in the \code{src} directory of the package.
}
}
\item Miscellaneous changes:
\itemize{
\item New example 'pi simulation' using R and C++ via Rcpp attributes
}
}
}
\section{Changes in Rcpp version 0.10.0 (2012-11-13)}{
\itemize{
\item Support for C++11 style attributes (embedded in comments) to enable
use of C++ within interactive sessions and to automatically generate module
declarations for packages:
\itemize{
\item Rcpp::export attribute to export a C++ function to R
\item \code{sourceCpp()} function to source exported functions from a file
\item \code{cppFunction()} and \code{evalCpp()} functions for inline declarations
and execution
\item \code{compileAttribtes()} function to generate Rcpp modules from
exported functions within a package
\item Rcpp::depends attribute for specifying additional build
dependencies for \code{sourceCpp()}
\item Rcpp::interfaces attribute to specify the external bindings
\code{compileAttributes()} should generate (defaults to R-only but a
C++ include file using R_GetCCallable can also be generated)
\item New vignette "Rcpp-attribute"
}
\item Rcpp modules feature set has been expanded:
\itemize{
\item Functions and methods can now return objects from classes that
are exposed through modules. This uses the make_new_object template
internally. This feature requires that some class traits are declared
to indicate Rcpp's \code{wrap}/\code{as} system that these classes are covered
by modules. The macro RCPP_EXPOSED_CLASS and RCPP_EXPOSED_CLASS_NODECL
can be used to declared these type traits.
\item Classes exposed through modules can also be used as parameters
of exposed functions or methods.
\item Exposed classes can declare factories with ".factory". A factory
is a c++ function that returns a pointer to the target class. It is
assumed that these objects are allocated with new on the factory. On the
R side, factories are called just like other constructors, with the
"new" function. This feature allows an alternative way to construct
objects.
\item "converter" can be used to declare a way to convert an object
of a type to another type. This gets translated to the appropriate
"as" method on the R side.
\item Inheritance. A class can now declare that it inherits from
another class with the .derives<Parent>( "Parent" ) notation. As a result
the exposed class gains methods and properties (fields) from its
parent class.
}
\item New sugar functions:
\itemize{
\item \code{which_min} implements which.min. Traversing the sugar expression
and returning the index of the first time the minimum value is found.
\item \code{which_max} idem
\item \code{unique} uses unordered_set to find unique values. In particular,
the version for CharacterVector is found to be more efficient than
R's version
\item \code{sort_unique} calculates unique values and then sorts them.
}
\item Improvements to output facilities:
\itemize{
\item Implemented \code{sync()} so that flushing output streams works
\item Added \code{Rcerr} output stream (forwarding to
\code{REprintf})
}
\item Provide a namespace 'R' for the standalone Rmath library so
that Rcpp users can access those functions too; also added unit tests
\item Development releases sets variable RunAllRcppTests to yes to
run all tests (unless it was alredy set to 'no'); CRAN releases do
not and still require setting -- which helps with the desired CRAN
default of less testing at the CRAN server farm.
}
}
\section{Changes in Rcpp version 0.9.15 (2012-10-13)}{
\itemize{
\item Untangling the clang++ build issue about the location of the
exceptions header by directly checking for the include file -- an
approach provided by Martin Morgan in a kindly contributed patch
as unit tests for them.
\item The \code{Date} and \code{Datetime} types now correctly
handle \code{NA}, \code{NaN} and \code{Inf} representation; the
\code{Date} type switched to an internal representation via \code{double}
\item Added \code{Date} and \code{Datetime} unit tests for the new
features
\item An additional \code{PROTECT} was added for parsing exception
messages before returning them to R, following a report by Ben North
}
}
\section{Changes in Rcpp version 0.9.14 (2012-09-30)}{
\itemize{
\item Added new Rcpp sugar functions trunc(), round() and signif(), as well
as unit tests for them
\item Be more conservative about where we support clang++ and the inclusion
of exception_defines.h and prevent this from being attempted on OS X
where it failed for clang 3.1
\item Corrected a typo in Module.h which now again permits use of finalizers
\item Small correction for (unexported) bib() function (which provides a path
to the bibtex file that ships with Rcpp)
\item Converted NEWS to NEWS.Rd
}
}
\section{Changes in Rcpp version 0.9.13 (2012-06-28)}{
\itemize{
\item Truly corrected Rcpp::Environment class by having default constructor
use the global environment, and removing the default argument of
global environment from the SEXP constructor
\item Added tests for clang++ version to include bits/exception_defines.h
for versions 3.0 or higher (similar to g++ 4.6.0 or later), needed to
include one particular exceptions header
\item Made more regression tests conditional on the RunAllRcppTests to come
closer to the CRAN mandate of running tests in sixty seconds
\item Updated unit test wrapper tests/doRUnit.R as well as unitTests/runTests.R
}
}
\section{Changes in Rcpp version 0.9.12 (2012-06-23)}{
\itemize{
\item Corrected Rcpp::Environment class by removing (empty) ctor following
rev3592 (on May 2) where default argument for ctor was moved
\item Unit testing now checks for environment variable RunAllRcppTests being
set to "yes"; otherwise some tests are skipped. This is arguably not
the right thing to do, but CRAN maintainers insist on faster tests.
\item Unit test wrapper script runTests.R has new option --allTests to set
the environment variable
\item The cleanup script now also considers inst/unitTests/testRcppClass/src
}
}
\section{Changes in Rcpp version 0.9.11 (2012-06-22)}{
\itemize{
\item New member function for vectors (and lists etc) containsElementNamed()
which returns a boolean indicating if the given element name is present
\item Updated the Rcpp.package.skeleton() support for Rcpp modules by
carrying functions already present from the corresponding unit test
which was also slightly expanded; and added more comments to the code
\item Rcpp modules can now be loaded via loadRcppModules() from .onLoad(),
or via loadModule("moduleName") from any R file
\item Extended functionality to let R modify C++ clases imported via modules
documented in help(setRcppClass)
\item Support compilation in Cygwin thanks to a patch by Dario Buttari
\item Extensions to the Rcpp-FAQ and the Rcpp-modules vignettes
\item The minium version of R is now 2.15.1 which is required for some of
the Rcpp modules support
}
}
\section{Changes in Rcpp version 0.9.10 (2012-02-16)}{
\itemize{
\item Rearrange headers so that Rcpp::Rcout can be used by RcppArmadillo et al
\item New Rcpp sugar function mapply (limited to two or three input vectors)
\item Added custom version of the Rcpp sugar diff function for numeric vectors
skipping unncesserry checks for NA
\item Some internal code changes to reflect changes and stricter requirements
in R CMD check in the current R-devel versions
\item Corrected fixed-value initialization for IntegerVector (with thanks to
Gregor Kastner for spotting this)
\item New Rcpp-FAQ entry on simple way to set compiler option for cxxfunction
}
}
\section{Changes in Rcpp version 0.9.9 (2012-12-25)}{
\itemize{
\item Reverting the 'int64' changes from release 0.9.8 which adversely
affect packages using Rcpp: We will re-apply the 'int64' changes in a
way which should cooperate more easily with 'long' and 'unsigned long'.
\item Unit test output directory fallback changed to use Rcpp.Rcheck
\item Conditioned two unit tests to not run on Windows where they now break
whereas they passed before, and continue to pass on other OSs
}
}
\section{Changes in Rcpp version 0.9.8 (2011-12-21)}{
\itemize{
\item wrap now handles 64 bit integers (int64_t, uint64_t) and containers
of them, and Rcpp now depends on the int64 package (also on CRAN).
This work has been sponsored by the Google Open Source Programs
Office.
\item Added setRcppClass() function to create extended reference classes
with an interface to a C++ class (typically via Rcpp Module) which
can have R-based fields and methods in addition to those from the C++.
\item Applied patch by Jelmer Ypma which adds an output stream class
'Rcout' not unlike std::cout, but implemented via Rprintf to
cooperate with R and its output buffering.
\item New unit tests for pf(), pnf(), pchisq(), pnchisq() and pcauchy()
\item XPtr constructor now checks for corresponding type in SEXP
\item Updated vignettes for use with updated highlight package
\item Update linking command for older fastLm() example using external
Armadillo
}
}
\section{Changes in Rcpp version 0.9.7 (2011-09-29)}{
\itemize{
\item Applied two patches kindly provided by Martyn Plummer which provide
support for compilation on Solaris using the SunPro compiler
\item Minor code reorganisation in which exception specifiers are removed;
this effectively only implements a run-time (rather than compile-time)
check and is generally seen as a somewhat depreated C++ idiom. Thanks
to Darren Cook for alerting us to this issue.
\item New example 'OpenMPandInline.r' in the OpenMP/ directory, showing how
easily use OpenMP by modifying the RcppPlugin output
\item New example 'ifelseLooped.r' showing Rcpp can accelerate loops that may
be difficult to vectorise due to dependencies
\item New example directory examples/Misc/ regrouping the new example as
well as the fibonacci example added in Rcpp 0.9.6
\item New Rcpp-FAQ example warning of lossy conversion from 64-bit long
integer types into a 53-bit mantissa which has no clear fix yet.
\item New unit test for accessing a non-exported function from a namespace
}
}
\section{Changes in Rcpp version 0.9.6 (2011-07-26)}{
\itemize{
\item Added helper traits to facilitate implementation of the RcppEigen
package: The is_eigen_base traits identifies if a class derives from
EigenBase using SFINAE; and new dispatch layer was added to wrap() to
help RcppEigen
\item XPtr now accepts a second template parameter, which is a function
taking a pointer to the target class. This allows the developper to
supply his/her own finalizer. The template parameter has a default
value which retains the original behaviour (calling delete on the
pointer)
\item New example RcppGibbs, extending Sanjog Misra's Rcpp illustration of
Darren Wilkinson's comparison of MCMC Gibbs Sampler implementations;
also added short timing on Normal and Gaussian RNG draws between Rcpp
and GSL as R's rgamma() is seen to significantly slower
\item New example on recursively computing a Fibonacci number using Rcpp and
comparing this to R and byte-compiled R for a significant speed gain
}
}
\section{Changes in Rcpp version 0.9.5 (2011-07-05)}{
\itemize{
\item New Rcpp-FAQ examples on using the plugin maker for inline's
cxxfunction(), and on setting row and column names for matrices
\item New sugar functions: mean, var, sd
\item Minor correction and extension to STL documentation in Rcpp-quickref
\item wrap() is now resilient to NULL pointers passed as in const char *
\item loadRcppModules() gains a "direct" argument to expose the module instead
of exposing what is inside it
\item Suppress a spurious warning from R CMD check on packages created with
Rcpp.package.skeleton(..., module=TRUE)
\item Some fixes and improvements for Rcpp sugar function 'rlnorm()'
\item Beginnings of new example using OpenMP and recognising user interrupts
}
}
\section{Changes in Rcpp version 0.9.4 (2011-04-12)}{
\itemize{
\item New R function "loadRcppModules" to load Rcpp modules automatically
from a package. This function must be called from the .onLoad function
and works with the "RcppModules" field of the package's DESCRIPTION file
\item The Modules example wrapped the STL std::vector received some editing
to disambiguate some symbols the newer compilers did not like
\item Coercing of vectors of factors is now done with an explicit callback
to R's "as.character()" as Rf_coerceVector no longer plays along
\item A CITATION file for the published JSS paper has been added, and
references were added to Rcpp-package.Rd and the different vignettes
}
}
\section{Changes in Rcpp version 0.9.3 (2011-04-05)}{
\itemize{
\item Fixed a bug in which modules code was not behaving when compiled
twice as can easily happen with inline'ed version
\item Exceptions code includes exception_defines.h only when g++ is 4.5 or
younger as the file no longer exists with g++-4.6
\item The documentation Makefile now uses the $R_HOME environment variable
\item The documentation Makefile no longer calls clean in the all target
\item C++ conformance issue found by clang/llvm addressed by re-ordering
declarations in grow.h as unqualified names must be declared before
they are used, even when used within templates
\item The 'long long' typedef now depends on C++0x being enabled as this
was not a feature in C++98; this suppresses a new g++-4.5 warning
\item The Rcpp-introduction vignette was updated to the forthcoming JSS paper
}
}
\section{Changes in Rcpp version 0.9.2 (2011-02-23)}{
\itemize{
\item The unitTest runit.Module.client.package.R is now skipped on older OS
X releases as it triggers a bug with g++ 4.2.1 or older; OS X 10.6 is
fine but as it no longer support ppc we try to accomodate 10.5 too
Thanks to Simon Urbanek for pinning this down and Baptiste Auguie
and Ken Williams for additonal testing
\item RcppCommon.h now recognises the Intel Compiler thanks to a short
patch by Alexey Stukalov; this turns off Cxx0x and TR1 features too
\item Three more setup questions were added to the Rcpp-FAQ vignette
\item One question about RcppArmadillo was added to the Rcpp-FAQ vignette
}
}
\section{Changes in Rcpp version 0.9.1 (2011-02-14)}{
\itemize{
\item A number of internal changes to the memory allocation / protection of
temporary objects were made---with a heartfelt "Thank You!" to both
Doug Bates for very persistent debugging of Rcpp modules code, and to
Luke Tierney who added additional memory allocation debugging tools
to R-devel (which will be in R 2.13.0 and may also be in R 2.12.2)
\item Removed another GNU Make-specific variable from src/Makevars in order
to make the build more portable; this was noticed on FreeBSD
\item On *BSD, do not try to compute a stack trace but provide file and
line number (which is the same behaviour as implemented in Windows)
\item Fixed an int conversion bug reported by Daniel Sabanes Bove on r-devel,
added unit test as well
\item Added unit tests for complex-typed vectors (thanks to Christian Gunning)
\item Expanded the Rcpp-quickref vignette (with thanks to Christian Gunning)
\item Additional examples were added to the Rcpp-FAQ vignette
}
}
\section{Changes in Rcpp version 0.9.0 (2010-12-19)}{
\itemize{
\item The classic API was factored out into its own package RcppClassic which
is released concurrently with this version.
\item If an object is created but not initialized, attempting to use
it now gives a more sensible error message (by forwarding an
Rcpp::not_initialized exception to R).
\item SubMatrix fixed, and Matrix types now have a nested ::Sub typedef.
\item New unexported function SHLIB() to aid in creating a shared library on
the command-line or in Makefile (similar to CxxFlags() / LdFlags()).
\item Module gets a seven-argument ctor thanks to a patch from Tama Ma.
\item The (still incomplete) QuickRef vignette has grown thanks to a patch
by Christian Gunning.
\item Added a sprintf template intended for logging and error messages.
\item Date::getYear() corrected (where addition of 1900 was not called for);
corresponding change in constructor from three ints made as well.
\item Date() and Datetime() constructors from string received a missing
conversion to int and double following strptime. The default format
string for the Datetime() strptime call was also corrected.
\item A few minor fixes throughout, see ChangeLog.
}
}
\section{Changes in Rcpp version 0.8.9 (2010-11-27)}{
\itemize{
\item Many improvements were made in 'Rcpp modules':
- exposing multiple constructors
- overloaded methods
- self-documentation of classes, methods, constructors, fields and
functions.
- new R function "populate" to facilitate working with modules in
packages.
- formal argument specification of functions.
- updated support for Rcpp.package.skeleton.
- constructors can now take many more arguments.
\item The 'Rcpp-modules' vignette was updated as well and describe many
of the new features
\item New template class Rcpp::SubMatrix<RTYPE> and support syntax in Matrix
to extract a submatrix:
NumericMatrix x = ... ;
// extract the first three columns
SubMatrix<REALSXP> y = x( _ , Range(0,2) ) ;
// extract the first three rows
SubMatrix<REALSXP> y = x( Range(0,2), _ ) ;
// extract the top 3x3 sub matrix
SubMatrix<REALSXP> y = x( Range(0,2), Range(0,2) ) ;
\item Reference Classes no longer require a default constructor for
subclasses of C++ classes
\item Consistently revert to using backticks rather than shell expansion
to compute library file location when building packages against Rcpp
on the default platforms; this has been applied to internal test
packages as well as CRAN/BioC packages using Rcpp
}
}
\section{Changes in Rcpp version 0.8.8 (2010-11-01)}{
\itemize{
\item New syntactic shortcut to extract rows and columns of a Matrix.
x(i,_) extracts the i-th row and x(_,i) extracts the i-th column.
\item Matrix indexing is more efficient. However, faster indexing is
disabled if g++ 4.5.0 or later is used.
\item A few new Rcpp operators such as cumsum, operator=(sugar)
\item Variety of bug fixes:
- column indexing was incorrect in some cases
- compilation using clang/llvm (thanks to Karl Millar for the patch)
- instantation order of Module corrected
- POSIXct, POSIXt now correctly ordered for R 2.12.0
}
}
\section{Changes in Rcpp version 0.8.7 (2010-10-15)}{
\itemize{
\item As of this version, Rcpp depends on R 2.12 or greater as it interfaces
the new reference classes (see below) and also reflects the POSIXt
class reordering both of which appeared with R version 2.12.0
\item new Rcpp::Reference class, that allows internal manipulation of R
2.12.0 reference classes. The class exposes a constructor that takes
the name of the target reference class and a field(string) method
that implements the proxy pattern to get/set reference fields using
callbacks to the R operators "$" and "$<-" in order to preserve the
R-level encapsulation
\item the R side of the preceding item allows methods to be written in R as
per ?ReferenceClasses, accessing fields by name and assigning them
using "<<-". Classes extracted from modules are R reference classes.
They can be subclassed in R, and/or R methods can be defined using
the $methods(...) mechanism.
\item internal performance improvements for Rcpp sugar as well as an added
'noNA()' wrapper to omit tests for NA values -- see the included
examples in inst/examples/convolveBenchmarks for the speedups
\item more internal performance gains with Functions and Environments
}
}
\section{Changes in Rcpp version 0.8.6 (2010-09-09)}{
\itemize{
\item new macro RCPP_VERSION and Rcpp_Version to allow conditional compiling
based on the version of Rcpp
#if defined(RCPP_VERSION) && RCPP_VERSION >= Rcpp_Version(0,8,6)
#endif
\item new sugar functions for statistical distributions (d-p-q-r functions)
with distributions : unif, norm, gamma, chisq, lnorm, weibull, logis,
f, pois, binom, t, beta.
\item new ctor for Vector taking size and function pointer so that for example
NumericVector( 10, norm_rand )
generates a N(0,1) vector of size 10
\item added binary operators for complex numbers, as well as sugar support
\item more sugar math functions: sqrt, log, log10, exp, sin, cos, ...
\item started new vignette Rcpp-quickref : quick reference guide of Rcpp API
(still work in progress)
\item various patches to comply with solaris/suncc stricter standards
\item minor enhancements to ConvolutionBenchmark example
\item simplified src/Makefile to no longer require GNU make; packages using
Rcpp still do for the compile-time test of library locations
}
}
\section{Changes in Rcpp version 0.8.5 (2010-07-25)}{
\itemize{
\item speed improvements. Vector::names, RObject::slot have been improved
to take advantage of R API functions instead of callbacks to R
\item Some small updates to the Rd-based documentation which now points to
content in the vignettes. Also a small formatting change to suppress
a warning from the development version of R.
\item Minor changes to Date() code which may reenable SunStudio builds
}
}
\section{Changes in Rcpp version 0.8.4 (2010-07-09)}{
\itemize{
\item new sugar vector functions: rep, rep_len, rep_each, rev, head, tail,
diag
\item sugar has been extended to matrices: The Matrix class now extends the
Matrix_Base template that implements CRTP. Currently sugar functions
for matrices are: outer, col, row, lower_tri, upper_tri, diag
\item The unit tests have been reorganised into fewer files with one call
each to cxxfunction() (covering multiple tests) resulting in a
significant speedup
\item The Date class now uses the same mktime() replacement that R uses
(based on original code from the timezone library by Arthur Olson)
permitting wide date ranges on all operating systems
\item The FastLM example has been updated, a new benchmark based on the
historical Longley data set has been added
\item RcppStringVector now uses std::vector<std::string> internally
\item setting the .Data slot of S4 objects did not work properly
}
}
\section{Changes in Rcpp version 0.8.3 (2010-06-27)}{
\itemize{
\item This release adds Rcpp sugar which brings (a subset of) the R syntax
into C++. This supports :
- binary operators : <,>,<=,>=,==,!= between R vectors
- arithmetic operators: +,-,*,/ between compatible R vectors
- several functions that are similar to the R function of the same name:
abs, all, any, ceiling, diff, exp, ifelse, is_na, lapply, pmin, pmax,
pow, sapply, seq_along, seq_len, sign
Simple examples :
// two numeric vector of the same size
NumericVector x ;
NumericVector y ;
NumericVector res = ifelse( x < y, x*x, -(y*y) ) ;
// sapply'ing a C++ function
double square( double x )\{ return x*x ; \}
NumericVector res = sapply( x, square ) ;
Rcpp sugar uses the technique of expression templates, pioneered by the
Blitz++ library and used in many libraries (Boost::uBlas, Armadillo).
Expression templates allow lazy evaluation of expressions, which
coupled with inlining generates very efficient code, very closely
approaching the performance of hand written loop code, and often
much more efficient than the equivalent (vectorized) R code.
Rcpp sugar is curently limited to vectors, future releases will
include support for matrices with sugar functions such as outer, etc ...
Rcpp sugar is documented in the Rcpp-sugar vignette, which contains
implementation details.
\item New helper function so that "Rcpp?something" brings up Rcpp help
\item Rcpp Modules can now expose public data members
\item New classes Date, Datetime, DateVector and DatetimeVector with proper
'new' API integration such as as(), wrap(), iterators, ...
\item The so-called classic API headers have been moved to a subdirectory
classic/ This should not affect client-code as only Rcpp.h was ever
included.
\item RcppDate now has a constructor from SEXP as well
\item RcppDateVector and RcppDatetimeVector get constructors from int
and both const / non-const operator(int i) functions
\item New API class Rcpp::InternalFunction that can expose C++ functions
to R without modules. The function is exposed as an S4 object of
class C++Function
}
}
\section{Changes in Rcpp version 0.8.2 (2010-06-09)}{
\itemize{
\item Bug-fix release for suncc compiler with thanks to Brian Ripley for
additional testing.
}
}
\section{Changes in Rcpp version 0.8.1 (2010-06-08)}{
\itemize{
\item This release adds Rcpp modules. An Rcpp module is a collection of
internal (C++) functions and classes that are exposed to R. This
functionality has been inspired by Boost.Python.
Modules are created internally using the RCPP_MODULE macro and
retrieved in the R side with the Module function. This is a preview
release of the module functionality, which will keep improving until
the Rcpp 0.9.0 release.
The new vignette "Rcpp-modules" documents the current feature set of
Rcpp modules.
\item The new vignette "Rcpp-package" details the steps involved in making a
package that uses Rcpp.
\item The new vignette "Rcpp-FAQ" collects a number of frequently asked
questions and answers about Rcpp.
\item The new vignette "Rcpp-extending" documents how to extend Rcpp
with user defined types or types from third party libraries. Based on
our experience with RcppArmadillo
\item Rcpp.package.skeleton has been improved to generate a package using
an Rcpp module, controlled by the "module" argument
\item Evaluating a call inside an environment did not work properly
\item cppfunction has been withdrawn since the introduction of the more
flexible cxxfunction in the inline package (0.3.5). Rcpp no longer
depends on inline since many uses of Rcpp do not require inline at
all. We still use inline for unit tests but this is now handled
locally in the unit tests loader runTests.R.
Users of the now-withdrawn function cppfunction can redefine it as:
cppfunction <- function(...) cxxfunction( ..., plugin = "Rcpp" )
\item Support for std::complex was incomplete and has been enhanced.
\item The methods XPtr<T>::getTag and XPtr<T>::getProtected are deprecated,
and will be removed in Rcpp 0.8.2. The methods tag() and prot() should
be used instead. tag() and prot() support both LHS and RHS use.
\item END_RCPP now returns the R Nil values; new macro VOID_END_RCPP
replicates prior behabiour
}
}
\section{Changes in Rcpp version 0.8.0 (2010-05-17)}{
\itemize{
\item All Rcpp headers have been moved to the inst/include directory,
allowing use of 'LinkingTo: Rcpp'. But the Makevars and Makevars.win
are still needed to link against the user library.
\item Automatic exception forwarding has been withdrawn because of
portability issues (as it did not work on the Windows platform).
Exception forwarding is still possible but is now based on explicit
code of the form:
try \{
// user code
\} catch( std::exception& __ex__)\{
forward_exception_to_r( __ex___ ) ;
Alternatively, the macro BEGIN_RCPP and END_RCPP can use used to enclose
code so that it captures exceptions and forward them to R.
BEGIN_RCPP
// user code
END_RCPP
\item new __experimental__ macros
The macros RCPP_FUNCTION_0, ..., RCPP_FUNCTION_65 to help creating C++
functions hiding some code repetition:
RCPP_FUNCTION_2( int, foobar, int x, int y)\{
return x + y ;
The first argument is the output type, the second argument is the
name of the function, and the other arguments are arguments of the
C++ function. Behind the scenes, the RCPP_FUNCTION_2 macro creates an
intermediate function compatible with the .Call interface and handles
exceptions
Similarly, the macros RCPP_FUNCTION_VOID_0, ..., RCPP_FUNCTION_VOID_65
can be used when the C++ function to create returns void. The generated
R function will return R_NilValue in this case.
RCPP_FUNCTION_VOID_2( foobar, std::string foo )\{
// do something with foo
The macro RCPP_XP_FIELD_GET generates a .Call compatible function that
can be used to access the value of a field of a class handled by an
external pointer. For example with a class like this:
class Foo\{
public:
int bar ;
RCPP_XP_FIELD_GET( Foo_bar_get, Foo, bar ) ;
RCPP_XP_FIELD_GET will generate the .Call compatible function called
Foo_bar_get that can be used to retrieved the value of bar.
The macro RCPP_FIELD_SET generates a .Call compatible function that
can be used to set the value of a field. For example:
RCPP_XP_FIELD_SET( Foo_bar_set, Foo, bar ) ;
generates the .Call compatible function called "Foo_bar_set" that
can be used to set the value of bar
The macro RCPP_XP_FIELD generates both getter and setter. For example
RCPP_XP_FIELD( Foo_bar, Foo, bar )
generates the .Call compatible Foo_bar_get and Foo_bar_set using the
macros RCPP_XP_FIELD_GET and RCPP_XP_FIELD_SET previously described
The macros RCPP_XP_METHOD_0, ..., RCPP_XP_METHOD_65 faciliate
calling a method of an object that is stored in an external pointer. For
example:
RCPP_XP_METHOD_0( foobar, std::vector<int> , size )
creates the .Call compatible function called foobar that calls the
size method of the std::vector<int> class. This uses the Rcpp::XPtr<
std::vector<int> > class.
The macros RCPP_XP_METHOD_CAST_0, ... is similar but the result of
the method called is first passed to another function before being
wrapped to a SEXP. For example, if one wanted the result as a double
RCPP_XP_METHOD_CAST_0( foobar, std::vector<int> , size, double )
The macros RCPP_XP_METHOD_VOID_0, ... are used when calling the
method is only used for its side effect.
RCPP_XP_METHOD_VOID_1( foobar, std::vector<int>, push_back )
Assuming xp is an external pointer to a std::vector<int>, this could
be called like this :
.Call( "foobar", xp, 2L )
\item Rcpp now depends on inline (>= 0.3.4)
\item A new R function "cppfunction" was added which invokes cfunction from
inline with focus on Rcpp usage (enforcing .Call, adding the Rcpp
namespace, set up exception forwarding). cppfunction uses BEGIN_RCPP
and END_RCPP macros to enclose the user code
\item new class Rcpp::Formula to help building formulae in C++
\item new class Rcpp::DataFrame to help building data frames in C++
\item Rcpp.package.skeleton gains an argument "example_code" and can now be
used with an empty list, so that only the skeleton is generated. It
has also been reworked to show how to use LinkingTo: Rcpp
\item wrap now supports containers of the following types: long, long double,
unsigned long, short and unsigned short which are silently converted
to the most acceptable R type.
\item Revert to not double-quote protecting the path on Windows as this
breaks backticks expansion used n Makevars.win etc
\item Exceptions classes have been moved out of Rcpp classes,
e.g. Rcpp::RObject::not_a_matrix is now Rcpp::not_a_matrix
}
}
\section{Changes in Rcpp version 0.7.12 (2010-04-16)}{
\itemize{
\item Undo shQuote() to protect Windows path names (which may contain
spaces) as backticks use is still broken; use of $(shell ...) works
}
}
\section{Changes in Rcpp version 0.7.11 (2010-03-26)}{
\itemize{
\item Vector<> gains a set of templated factory methods "create" which
takes up to 20 arguments and can create named or unnamed vectors.
This greatly facilitates creating objects that are returned to R.
\item Matrix now has a diag() method to create diagonal matrices, and
a new constructor using a single int to create square matrices
\item Vector now has a new fill() method to propagate a single value
\item Named is no more a class but a templated function. Both interfaces
Named(.,.) and Named(.)=. are preserved, and extended to work also on
simple vectors (through Vector<>::create)
\item Applied patch by Alistair Gee to make ColDatum more robust
\item Fixed a bug in Vector that caused random behavior due to the lack of
copy constructor in the Vector template
}
}
\section{Changes in Rcpp version 0.7.10 (2010-03-15)}{
\itemize{
\item new class Rcpp::S4 whose constructor checks if the object is an S4
object
\item maximum number of templated arguments to the pairlist function, the
DottedPair constructor, the Language constructor and the Pairlist
constructor has been updated to 20 (was 5) and a script has been
added to the source tree should we want to change it again
\item use shQuote() to protect Windows path names (which may contain spaces)
}
}
\section{Changes in Rcpp version 0.7.9 (2010-03-12)}{
\itemize{
\item Another small improvement to Windows build flags
\item bugfix on 64 bit platforms. The traits classes (wrap_type_traits, etc)
used size_t when they needed to actually use unsigned int
\item fixed pre gcc 4.3 compatibility. The trait class that was used to
identify if a type is convertible to another had too many false
positives on pre gcc 4.3 (no tr1 or c++0x features). fixed by
implementing the section 2.7 of "Modern C++ Design" book.
}
}
\section{Changes in Rcpp version 0.7.8 (2010-03-09)}{
\itemize{
\item All vector classes are now generated from the same template class
Rcpp::Vector<int RTYPE> where RTYPE is one of LGLSXP, RAWSXP, STRSXP,
INTSXP, REALSXP, CPLXSXP, VECSXP and EXPRSXP. typedef are still
available : IntegerVector, ... All vector classes gain methods
inspired from the std::vector template : push_back, push_front,
erase, insert
\item New template class Rcpp::Matrix<RTYPE> deriving from
Rcpp::Vector<RTYPE>. These classes have the same functionality
as Vector but have a different set of constructors which checks
that the input SEXP is a matrix. Matrix<> however does/can not
guarantee that the object will allways be a matrix. typedef
are defined for convenience: Matrix<INTSXP> is IntegerMatrix, etc...
\item New class Rcpp::Row<int RTYPE> that represents a row of a matrix
of the same type. Row contains a reference to the underlying
Vector and exposes a nested iterator type that allows use of
STL algorithms on each element of a matrix row. The Vector class
gains a row(int) method that returns a Row instance. Usage
examples are available in the runit.Row.R unit test file
\item New class Rcpp::Column<int RTYPE> that represents a column of a
matrix. (similar to Rcpp::Row<int RTYPE>). Usage examples are
available in the runit.Column.R unit test file
\item The Rcpp::as template function has been reworked to be more
generic. It now handles more STL containers, such as deque and
list, and the genericity can be used to implement as for more
types. The package RcppArmadillo has examples of this
\item new template class Rcpp::fixed_call that can be used in STL algorithms
such as std::generate.
\item RcppExample et al have been moved to a new package RcppExamples;
src/Makevars and src/Makevars.win simplified accordingly
\item New class Rcpp::StringTransformer and helper function
Rcpp::make_string_transformer that can be used to create a function
that transforms a string character by character. For example
Rcpp::make_string_transformer(tolower) transforms each character
using tolower. The RcppExamples package has an example of this.
\item Improved src/Makevars.win thanks to Brian Ripley
\item New examples for 'fast lm' using compiled code:
- using GNU GSL and a C interface
- using Armadillo (http://arma.sf.net) and a C++ interface
Armadillo is seen as faster for lack of extra copying
\item A new package RcppArmadillo (to be released shortly) now serves
as a concrete example on how to extend Rcpp to work with a modern
C++ library such as the heavily-templated Armadillo library
\item Added a new vignette 'Rcpp-introduction' based on a just-submitted
overview article on Rcpp
}
}
\section{Changes in Rcpp version 0.7.7 (2010-02-14)}{
\itemize{
\item new template classes Rcpp::unary_call and Rcpp::binary_call
that facilitates using R language calls together
with STL algorithms.
\item fixed a bug in Language constructors taking a string as their
first argument. The created call was wrong.
}
}
\section{Changes in Rcpp version 0.7.6 (2010-02-12)}{
\itemize{
\item SEXP_Vector (and ExpressionVector and GenericVector, a.k.a List) now
have methods push_front, push_back and insert that are templated
\item SEXP_Vector now has int- and range-valued erase() members
\item Environment class has a default constructor (for RInside)
\item SEXP_Vector_Base factored out of SEXP_Vector (Effect. C++ #44)
\item SEXP_Vector_Base::iterator added as well as begin() and end()
so that STL algorithms can be applied to Rcpp objects
\item CharacterVector gains a random access iterator, begin() and end() to
support STL algorithms; iterator dereferences to a StringProxy
\item Restore Windows build; successfully tested on 32 and 64 bit;
\item Small fixes to inst/skeleton files for bootstrapping a package
\item RObject::asFoo deprecated in favour of Rcpp::as<Foo>
}
}
\section{Changes in Rcpp version 0.7.5 (2010-02-08)}{
\itemize{
\item wrap has been much improved. wrappable types now are :
- primitive types : int, double, Rbyte, Rcomplex, float, bool
- std::string
- STL containers which have iterators over wrappable types:
(e.g. std::vector<T>, std::deque<T>, std::list<T>, etc ...).
- STL maps keyed by std::string, e.g std::map<std::string,T>
- classes that have implicit conversion to SEXP
- classes for which the wrap template if fully or partly specialized
This allows composition, so for example this class is wrappable:
std::vector< std::map<std::string,T> > (if T is wrappable)
\item The range based version of wrap is now exposed at the Rcpp::
level with the following interface :
Rcpp::wrap( InputIterator first, InputIterator last )
This is dispatched internally to the most appropriate implementation
using traits
\item a new namespace Rcpp::traits has been added to host the various
type traits used by wrap
\item The doxygen documentation now shows the examples
\item A new file inst/THANKS acknowledges the kind help we got from others
\item The RcppSexp has been removed from the library.
\item The methods RObject::asFoo are deprecated and will be removed
in the next version. The alternative is to use as<Foo>.
\item The method RObject::slot can now be used to get or set the
associated slot. This is one more example of the proxy pattern
\item Rcpp::VectorBase gains a names() method that allows getting/setting
the names of a vector. This is yet another example of the
proxy pattern.
\item Rcpp::DottedPair gains templated operator<< and operator>> that
allow wrap and push_back or wrap and push_front of an object
\item Rcpp::DottedPair, Rcpp::Language, Rcpp::Pairlist are less
dependent on C++0x features. They gain constructors with up
to 5 templated arguments. 5 was choosed arbitrarily and might
be updated upon request.
\item function calls by the Rcpp::Function class is less dependent
on C++0x. It is now possible to call a function with up to
5 templated arguments (candidate for implicit wrap)
\item added support for 64-bit Windows (thanks to Brian Ripley and Uwe Ligges)
}
}
\section{Changes in Rcpp version 0.7.4 (2010-01-30)}{
\itemize{
\item matrix-like indexing using operator() for all vector
types : IntegerVector, NumericVector, RawVector, CharacterVector
LogicalVector, GenericVector and ExpressionVector.
\item new class Rcpp::Dimension to support creation of vectors with
dimensions. All vector classes gain a constructor taking a
Dimension reference.
\item an intermediate template class "SimpleVector" has been added. All
simple vector classes are now generated from the SimpleVector
template : IntegerVector, NumericVector, RawVector, CharacterVector
LogicalVector.
\item an intermediate template class "SEXP_Vector" has been added to
generate GenericVector and ExpressionVector.
\item the clone template function was introduced to explicitely
clone an RObject by duplicating the SEXP it encapsulates.
\item even smarter wrap programming using traits and template
meta-programming using a private header to be include only
RcppCommon.h
\item the as template is now smarter. The template now attempts to
build an object of the requested template parameter T by using the
constructor for the type taking a SEXP. This allows third party code
to create a class Foo with a constructor Foo(SEXP) to have
as<Foo> for free.
\item wrap becomes a template. For an object of type T, wrap<T> uses
implicit conversion to SEXP to first convert the object to a SEXP
and then uses the wrap(SEXP) function. This allows third party
code creating a class Bar with an operator SEXP() to have
wrap for free.
\item all specializations of wrap : wrap<double>, wrap< vector<double> >
use coercion to deal with missing values (NA) appropriately.
\item configure has been withdrawn. C++0x features can now be activated
by setting the RCPP_CXX0X environment variable to "yes".
\item new template r_cast<int> to facilitate conversion of one SEXP
type to another. This is mostly intended for internal use and
is used on all vector classes
\item Environment now takes advantage of the augmented smartness
of as and wrap templates. If as<Foo> makes sense, one can
directly extract a Foo from the environment. If wrap<Bar> makes
sense then one can insert a Bar directly into the environment.
Foo foo = env["x"] ; /* as<Foo> is used */
Bar bar ;
env["y"] = bar ; /* wrap<Bar> is used */
\item Environment::assign becomes a template and also uses wrap to
create a suitable SEXP
\item Many more unit tests for the new features; also added unit tests
for older API
}
}
\section{Changes in Rcpp version 0.7.3 (2010-01-21)}{
\itemize{
\item New R function Rcpp.package.skeleton, modelled after
utils::package.skeleton to help creating a package with support
for Rcpp use.
\item indexing is now faster for simple vectors due to inlining of
the operator[] and caching the array pointer
\item The class Rcpp::VectorBase was introduced. All vector classes
derive from it. The class handles behaviour that is common
to all vector types: length, names, etc ...
\item exception forwarding is extended to compilers other than GCC
but default values are used for the exception class
and the exception message, because we don't know how to do it.
\item Improved detection of C++0x capabilities
\item Rcpp::Pairlist gains a default constructor
\item Rcpp::Environment gains a new_child method to create a new
environment whose parent is this
\item Rcpp::Environment::Binding gains a templated implicit
conversion operator
\item Rcpp::ExpressionVector gains an eval method to evaluate itself
\item Rcpp::ExpressionVector gains a constructor taking a std::string
representing some R code to parse.
\item Rcpp::GenericVector::Proxy gains an assignment operator to deal
with Environment::Proxy objects
\item Rcpp::LdFlags() now defaults to static linking OS X, as it already
did on Windows; this default can be overridden.
}
}
\section{Changes in Rcpp version 0.7.2 (2010-01-12)}{
\itemize{
\item a new benchmark was added to the examples directory
around the classic convolution example from
Writing R extensions to compare C and C++ implementations
\item Rcpp::CharacterVector::StringProxy gains a += operator
\item Rcpp::Environment gains an operator[](string) to get/set
objects from the environment. operator[] returns an object
of class Rcpp::Environment::Binding which implements the proxy
pattern. Inspired from Item 30 of 'More Effective C++'
\item Rcpp::Pairlist and Rcpp::Language gain an operator[](int)
also using the proxy pattern
\item Rcpp::RObject.attr can now be used on the rhs or the lhs, to get
or set an attribute. This also uses the proxy pattern
\item Rcpp::Pairlist and Rcpp::Language gain new methods push_back
replace, length, size, remove, insert
\item wrap now returns an object of a suitable class, not just RObject
anymore. For example wrap( bool ) returns a LogicalVector
\item Rcpp::RObject gains methods to deal with S4 objects : isS4,
slot and hasSlot
\item new class Rcpp::ComplexVector to manage complex vectors (CPLXSXP)
\item new class Rcpp::Promise to manage promises (PROMSXP)
\item new class Rcpp::ExpressionVector to manage expression vectors
(EXPRSXP)
\item new class Rcpp::GenericVector to manage generic vectors, a.k.a
lists (VECSXP)
\item new class Rcpp::IntegerVector to manage integer vectors (INTSXP)
\item new class Rcpp::NumericVector to manage numeric vectors (REALSXP)
\item new class Rcpp::RawVector to manage raw vectors (RAWSXP)
\item new class Rcpp::CharacterVector to manage character vectors (STRSXP)
\item new class Rcpp::Function to manage functions
(CLOSXP, SPECIALSXP, BUILTINSXP)
\item new class Rcpp::Pairlist to manage pair lists (LISTSXP)
\item new class Rcpp::Language to manage calls (LANGSXP)
\item new specializations of wrap to deal with std::initializer lists
only available with GCC >= 4.4
\item new R function Rcpp:::capabilities that can query if various
features are available : exception handling, variadic templates
initializer lists
\item new set of functions wrap(T) converting from T to RObject
\item new template function as<T> that can be used to convert a SEXP
to type T. Many specializations implemented to deal with
C++ builtin and stl types. Factored out of RObject
\item new class Rcpp::Named to deal with named with named objects
in a pairlist, or a call
\item new class Rcpp::Symbol to manage symbols (SYMSXP)
\item The garbage collection has been improved and is now automatic
and hidden. The user needs not to worry about it at all.
\item Rcpp::Environment(SEXP) uses the as.environment R function
\item Doxygen-generated documentation is no longer included as it is both
too large and too volatile. Zipfiles are provided on the website.
}
}
\section{Changes in Rcpp version 0.7.1 (2010-01-02)}{
\itemize{
\item Romain is now a co-author of Rcpp
\item New base class Rcpp::RObject replace RcppSexp (which is provided for
backwards compatibility)
\item RObject has simple wrappers for object creation and conversion to SEXP
\item New classes Rcpp::Evaluator and Rcpp::Environment for expression
evaluation and environment access, respectively
\item New class Rcpp::XPtr for external pointers
\item Enhanced exception handling allows for trapping of exceptions outside
of try/catch blocks
\item Namespace support with a new namespace 'Rcpp'
\item Unit tests for most of the new classes, based on the RUnit package
\item Inline support now provided by the update inline package, so a new
Depends on 'inline (>= 0.3.4)' replaces the code in that was
temporarily in Rcpp
}
}
\section{Changes in Rcpp version 0.7.0 (2009-12-19)}{
\itemize{
\item Inline support via a modified version of 'cfunction' from Oleg
Sklyar's 'inline' package: simple C++ programs can now be compiled,
linked and loaded automagically from the R prompt, including support
for external packages. Also works on Windows (with R-tools installed)
\item New examples for the inline support based on 'Intro to HPC' tutorials
\item New type RcppSexp for simple int, double, std::string scalars and vectors
\item Every class is now in its own header and source file
\item Fix to RcppParams.Rd thanks to Frank S. Thomas
\item RcppVersion.R removed as redundant given DESCRIPTION and read.dcf()
\item Switched to R_PreserveObject and R_ReleaseObject for RcppSexp with
thanks to Romain
\item Licensing changed from LGPL 2.1 (or later) to GPL 2 (or later), file
COPYING updated
}
}
\section{Changes in Rcpp version 0.6.8 (2009-11-19)}{
\itemize{
\item Several classes now split off into their own header and source files
\item New header file RcppCommon.h regrouping common defines and includes
\item Makevars\{,.win\} updated to reflect src/ reorg
}
}
\section{Changes in Rcpp version 0.6.7 (2009-11-08)}{
\itemize{
\item New class RcppList for simple lists and data structures of different
types and dimensions, useful for RProtoBuf project on R-Forge
\item Started to split classes into their own header and source files
\item Added short README file about history and status
\item Small documentation markup fix thanks to Kurt; updated doxygen docs
\item New examples directory functionCallback/ for R function passed to C++
and being called
}
}
\section{Changes in Rcpp version 0.6.6 (2009-08-03)}{
\itemize{
\item Updated Doxygen documentation
\item RcppParams class gains a new exists() member function
}
}
\section{Changes in Rcpp version 0.6.5 (2009-04-01)}{
\itemize{
\item Small OS X build correction using R_ARCH variable
\item Include LGPL license as file COPYING
}
}
\section{Changes in Rcpp version 0.6.4 (2009-03-01)}{
\itemize{
\item Use std:: namespace throughout instead of 'using namespace std'
\item Define R_NO_REMAP so that R provides Rf_length() etc in lieu of length()
to minimise clashes with other projects having similar functions
\item Include Doxygen documentation, and Doxygen configuration file
\item Minor Windows build fix (with thanks to Uwe and Simon)
}
}
\section{Changes in Rcpp version 0.6.3 (2009-01-09)}{
\itemize{
\item OS X build fix with thanks to Simon
\item Added 'view-only' classes for int and double vector and matrix clases
as well as string vector classses, kindly suggsted / provided by
David Reiss
\item Add two shorter helper functions Rcpp:::CxxFlags() and
Rcpp:::LdFlags() for compilation and linker flags
}
}
\section{Changes in Rcpp version 0.6.2 (2008-12-02)}{
\itemize{
\item Small but important fix for Linux builds in Rcpp:::RcppLdFlags()
}
}
\section{Changes in Rcpp version 0.6.1 (2008-11-30)}{
\itemize{
\item Now src/Makevars replaces src/Makefile, this brings proper OS X
multi-arch support with thanks to Simon
\item Old #ifdef statements related to QuantLib removed; Rcpp is now
decoupled from QuantLib headers yet be used by RQuantLib
\item Added RcppLdPath() to return the lib. directory patch and on Linux
the rpath settings
\item Added new RcppVectorExample()
\item Augmented documentation on usage in Rcpp-package.Rd
}
}
\section{Changes in Rcpp version 0.6.0 (2008-11-05)}{
\itemize{
\item New maintainer, taking over RcppTemplate (which has been without an
update since Nov 2006) under its initial name Rcpp
\item New files src/Makefile\{,.win\} including functionality from both
configure and RcppSrc/Makefile; we now build two libraries, one for
use by the package which also runs the example, and one for users to
link against, and removed src/Makevars.in
\item Files src/Rcpp.\{cpp,h\} moved in from ../RcppSrc
\item Added new class RcppDatetime corresponding to POSIXct in with full
support for microsecond time resolution between R and C++
\item Several new manual pages added
\item Removed configure\{,.in,.win\} as src/Makefile* can handle this more
easily
\item Minor cleanup and reformatting for DESCRIPTION, Date: now uses
svn:keyword Date property
\item Renamed RcppTemplateVersion to RcppVersion, deleted RcppDemo
\item Directory demo/ removed as vignette("RcppAPI") is easier and more
reliable to show vignette documentation
\item RcppTemplateDemo() removed from R/zzz.R, vignette("RcppAPI") is easier;
man/RcppTemplateDemo.Rd removed as well
\item Some more code reindentation and formatting to R default arguments,
some renamed from RcppTemplate* to Rcpp*
\item Added footnote onto titlepage of inst/doc/RcppAPI.\{Rnw,pdf\} about how
this document has not (yet) been updated along with the channges made
}
}
| /inst/NEWS.Rd | no_license | murraystokely/Rcpp | R | false | false | 75,678 | rd | \name{NEWS}
\title{News for Package 'Rcpp'}
\newcommand{\cpkg}{\href{http://CRAN.R-project.org/package=#1}{\pkg{#1}}}
\section{Changes in [unreleased] Rcpp version 0.10.7 (2013-11-30)}{
\itemize{
\item Changes in Rcpp API:
\itemize{
\item New class \code{StretchyList} for pair lists with fast addition of
elements at the front and back. This abstracts the 3 functions
\code{NewList}, \code{GrowList} and \code{Insert} used in various
packages and in parsers in R.
\item The function \code{dnt}, \code{pnt}, \code{qnt} sugar
functions were incorrectly expanding to the no-degree-of-freedoms
variant.
\item Unit tests for \code{pnt} were added.
\item The sugar table function did not handle NAs and NaNs properly
for numeric vectors. Fixed and tests added.
\item The internal coercion mechanism mapping numerics to strings has
been updated to better match \R (specifically with \code{Inf}, \code{-Inf},
and \code{NaN}.)
\item Applied two bug fixes to Vector \code{sort()} and \code{RObject}
definition spotted and correct by Kevin Ushey
}
\item Changes in Rcpp documentation:
\itemize{
\item The Rcpp-FAQ vignette have been updated and expanded.
}
}
}
\section{Changes in Rcpp version 0.10.6 (2013-10-27)}{
\itemize{
\item Changes in Rcpp API:
\itemize{
\item The function \code{exposeClass} takes a description of the
constructors, fields and methods to be exposed from a C++
class, and writes C++ and R files in the package. Inherited
classes can be dealt with, but require data type information.
This approach avoids hand-coding module files.
\item Two missing \code{is<>()} templates for
\code{CharacterVector} and \code{CharacterMatrix} have been added,
and some tests for \code{is_na()} and \code{is_finite()} have been
corrected thanks to Thomas Tse.
}
\item Changes in R code:
\itemize{
\item Export linking helper function \code{LdFlags} as well as
\code{RcppLdFlags}.
\item Function \code{Rcpp.package.skeleton()} no longer passes a
\code{namespace} argument on to \code{package.skeleton()}
}
\item Changes in R setup:
\itemize{
\item Raise requirement for R itself to be version 3.0.0 or later
as needed by the vignette processing
}
\item Changes in Rcpp attributes:
\itemize{
\item \code{sourceCpp} now correctly binds to Rtools 3.0 and 3.1
}
}
}
\section{Changes in Rcpp version 0.10.5 (2013-09-28)}{
\itemize{
\item Changes in R code:
\itemize{
\item New R function \code{demangle} that calls the \code{DEMANGLE} macro.
\item New R function \code{sizeof} to query the byte size of a type. This
returns an object of S3 class \code{bytes} that has a \code{print} method
showing bytes and bits.
}
\item Changes in Rcpp API:
\itemize{
\item Add \code{defined(__sun)} to lists of operating systems to
test for when checking for lack of \code{backtrace()} needed for
stack traces.
\item \code{as<T*>}, \code{as<const T*>}, \code{as<T&>} and
\code{as<const T&>} are now supported, when
T is a class exposed by modules, i.e. with \code{RCPP_EXPOSED_CLASS}
\item \code{DoubleVector} as been added as an alias to
\code{NumericVector}
\item New template function \code{is<T>} to identify if an R object
can be seen as a \code{T}. For example \code{is<DataFrame>(x)}.
This is a building block for more expressive dispatch in various places
(modules and attributes functions).
\item \code{wrap} can now handle more types, i.e. types that iterate over
\code{std::pair<const KEY, VALUE>} where KEY can be converted to a
\code{String} and \code{VALUE} is either a primitive type (int, double)
or a type that wraps. Examples :
\itemize{
\item \code{std::map<int, double>} : we can make a String from an int,
and double is primitive
\item \code{boost::unordered_map<double, std::vector<double> >}: we can make
a String from a double and \code{std::vector<double>} can wrap itself
}
Other examples of this are included at the end of the \code{wrap} unit test
file (\code{runit.wrap.R} and \code{wrap.cpp}).
\item \code{wrap} now handles containers of classes handled by modules. e.g.
if you expose a class \code{Foo} via modules, then you can wrap
\code{vector<Foo>}, ... An example is included in the \code{wrap} unit test
file
\item \code{RcppLdFlags()}, often used in \code{Makevars} files of
packages using \pkg{Rcpp}, is now exported from the package namespace.
}
\item Changes in Attributes:
\itemize{
\item Objects exported by a module (i.e. by a \code{RCPP_MODULE} call
in a file that is processed by \code{sourceCpp}) are now directly
available in the environment. We used to make the module object
available, which was less useful.
\item A plugin for \code{openmp} has been added to support use of OpenMP.
\item \code{Rcpp::export} now takes advantage of the more flexible
\code{as<>}, handling constness and referenceness of the input types.
For users, it means that for the parameters of function exported by modules,
we can now use references, pointers and const versions of them.
The file \code{Module.cpp} file has an example.
\item{No longer call non-exported functions from the tools package}
\item{No longer search the inline package as a fallback when loading
plugins for the the \code{Rcpp::plugins} attribute}.
}
\item Changes in Modules:
\itemize{
\item We can now expose functions and methods that take
\code{T&} or \code{const T&} as arguments. In these situations
objects are no longer copied as they used to be.
}
\item Changes in sugar:
\itemize{
\item \code{is_na} supports classes \code{DatetimeVector} and
\code{DateVector}
}
\item Changes in Rcpp documentation:
\itemize{
\item The vignettes have been moved from \code{inst/doc/} to the
\code{vignettes} directory which is now preferred.
\item The appearance of the vignettes has been refreshed by
switching to the Bistream Charter font, and microtype package.
}
\item Deprecation of \code{RCPP_FUNCTION_*}:
\itemize{
\item The macros from the \code{preprocessor_generated.h} file
have been deprecated. They are still available, but they print a
message in addition to their expected behavior.
\item The macros will be permanently removed in the first \pkg{Rcpp}
release after July 2014.
\item Users of these macros should start replacing them with more
up-to-date code, such as using 'Rcpp attributes' or 'Rcpp modules'.
}
}
}
\section{Changes in Rcpp version 0.10.4 (2013-06-23)}{
\itemize{
\item Changes in R code: None beyond those detailed for Rcpp Attributes
\item Changes in Rcpp attributes:
\itemize{
\item Fixed problem whereby the interaction between the gc and the
RNGScope destructor could cause a crash.
\item Don't include package header file in generated C++ interface
header files.
\item Lookup plugins in \pkg{inline} package if they aren't found
within the \pkg{Rcpp} package.
\item Disallow compilation for files that don't have extensions
supported by R CMD SHLIB
}
\item Changes in Rcpp API:
\itemize{
\item The \code{DataFrame::create} set of functions has been reworked
to just use \code{List::create} and feed to the \code{DataFrame}
constructor
\item The \code{operator-()} semantics for \code{Date} and
\code{Datetime} are now more inline with standard C++ behaviour;
with thanks to Robin Girard for the report.
\item RNGScope counter now uses unsigned long rather than int.
\item \code{Vector<*>::erase(iterator, iterator)} was fixed. Now
it does not remove the element pointed by last (similar to what is
done on stl types and what was intended initially). Reported on
Rcpp-devel by Toni Giorgino.
\item Added equality operator between elements of
\code{CharacterVector}s.
}
\item Changes in Rcpp sugar:
\itemize{
\item New function \code{na_omit} based on the StackOverflow thread
\url{http://stackoverflow.com/questions/15953768/}
\item New function \code{is_finite} and \code{is_infinite} that
reproduces the behavior of R's \code{is.finite} and
\code{is.infinite} functions
}
\item Changes in Rcpp build tools:
\itemize{
\item Fix by Martyn Plummer for Solaris in handling of
\code{SingleLogicalResult}.
\item The \code{src/Makevars} file can now optionally override the
path for \code{/usr/bin/install_name_tool} which is used on OS X.
\item Vignettes are trying harder not to be built in parallel.
}
\item Changes in Rcpp documentation:
\itemize{
\item Updated the bibliography in \code{Rcpp.bib} (which is also
sourced by packages using Rcpp).
\item Updated the \code{THANKS} file.
}
\item Planned Deprecation of \code{RCPP_FUNCTION_*}:
\itemize{
\item The set of macros \code{RCPP_FUNCTION_} etc ... from the
\code{preprocessor_generated.h} file will be deprecated in the next version
of \pkg{Rcpp}, i.e they will still be available but will generate some
warning in addition to their expected behavior.
\item In the first release that is at least 12 months after this announcement, the
macros will be removed from \pkg{Rcpp}.
\item Users of these macros (if there are any) should start replacing them
with more up to date code, such as using Rcpp attributes or Rcpp
modules.
}
}
}
\section{Changes in Rcpp version 0.10.3 (2013-03-23)}{
\itemize{
\item Changes in R code:
\itemize{
\item Prevent build failures on Windowsn when Rcpp is installed
in a library path with spaces (transform paths in the same manner
that R does before passing them to the build system).
}
\item Changes in Rcpp attributes:
\itemize{
\item Rcpp modules can now be used with \code{sourceCpp}
\item Standalone roxygen chunks (e.g. to document a class) are now
transposed into RcppExports.R
\item Added \code{Rcpp::plugins} attribute for binding
directly to inline plugins. Plugins can be registered using
the new \code{registerPlugin} function.
\item Added built-in \code{cpp11} plugin for specifying
the use of C++11 in a translation unit
\item Merge existing values of build related environment
variables for sourceCpp
\item Add global package include file to RcppExports.cpp
if it exists
\item Stop with an error if the file name passed to
\code{sourceCpp} has spaces in it
\item Return invisibly from void functions
\item Ensure that line comments invalidate block comments when
parsing for attributes
\item Eliminated spurious empty hello world function definition
in Rcpp.package.skeleton
}
\item Changes in Rcpp API:
\itemize{
\item The very central use of R API R_PreserveObject and
R_ReleaseObject has been replaced by a new system based on the
functions Rcpp_PreserveObject, Rcpp_ReleaseObject and Rcpp_ReplaceObject
which shows better performance and is implemented using a generic vector
treated as a stack instead of a pairlist in the R
implementation. However, as this preserve / release code is still
a little rough at the edges, a new #define is used (in config.h)
to disable it for now.
\item Platform-dependent code in Timer.cpp now recognises a few
more BSD variants thanks to contributed defined() test suggestions
\item Support for wide character strings has been added throughout the
API. In particular String, CharacterVector, wrap and as are aware of
wide character strings
}
}
}
\section{Changes in Rcpp version 0.10.2 (2012-12-21)}{
\itemize{
\item Changes in Rcpp API:
\itemize{
\item Source and header files were reorganized and consolidated so
that compile time are now significantly lower
\item Added additional check in \code{Rstreambuf} deletetion
\item Added support for \code{clang++} when using \code{libc++},
and for anc \code{icpc} in \code{std=c++11} mode, thanks to a
patch by Yan Zhou
\item New class \code{Rcpp::String} to facilitate working with a single
element of a character vector
\item New utility class sugar::IndexHash inspired from Simon
Urbanek's fastmatch package
\item Implementation of the equality operator between two Rcomplex
\item \code{RNGScope} now has an internal counter that enables it
to be safely used multiple times in the same stack frame.
\item New class \code{Rcpp::Timer} for benchmarking
}
\item Changes in Rcpp sugar:
\itemize{
\item More efficient version of \code{match} based on \code{IndexHash}
\item More efficient version of \code{unique} base on \code{IndexHash}
\item More efficient version of \code{in} base on \code{IndexHash}
\item More efficient version of \code{duplicated} base on \code{IndexHash}
\item More efficient version of \code{self_match} base on \code{IndexHash}
\item New function \code{collapse} that implements paste(., collapse= "" )
}
\item Changes in Rcpp attributes:
\itemize{
\item Use code generation rather than modules to implement
\code{sourceCpp} and \code{compileAttributes} (eliminates
problem with exceptions not being able to cross shared library
boundaries on Windows)
\item Exported functions now automatically establish an \code{RNGScope}
\item Functions exported by \code{sourceCpp} now directly
reference the external function pointer rather than rely on
dynlib lookup
\item On Windows, Rtools is automatically added to the PATH
during \code{sourceCpp} compilations
\item Diagnostics are printed to the console if \code{sourceCpp}
fails and C++ development tools are not installed
\item A warning is printed if when \code{compileAttributes} detects
\code{Rcpp::depends} attributes in source files that are not
matched by Depends/LinkingTo entries in the package DESCRIPTION
}
}
}
\section{Changes in Rcpp version 0.10.1 (2012-11-26)}{
\itemize{
\item Changes in Rcpp sugar:
\itemize{
\item New functions: \code{setdiff}, \code{union_}, \code{intersect}
\code{setequal}, \code{in}, \code{min}, \code{max}, \code{range},
\code{match}, \code{table}, \code{duplicated}
\item New function: \code{clamp} which combines pmin and pmax, e.g.
clamp( a, x, b) is the same as pmax( b, pmin(x, a) )
\item New function: \code{self_match} which implements something
similar to \code{match( x, unique( x ) )}
}
\item Changes in Rcpp API:
\itemize{
\item The \code{Vector} template class (hence \code{NumericVector}
...) get the \code{is_na} and the \code{get_na} static methods.
\item New helper class \code{no_init} that can be used to
create a vector without initializing its data, e.g. :
\code{ IntegerVector out = no_init(n) ; }
\item New exception constructor requiring only a message; \code{stop}
function to throw an exception
\item \code{DataFrame} gains a \code{nrows} method
}
\item Changes in Rcpp attributes:
\itemize{
\item Ability to embed R code chunks (via specially formatted
block comments) in C++ source files.
\item Allow specification of argument defaults for exported functions.
\item New scheme for more flexible mixing of generated and user composed
C++ headers.
\item Print warning if no export attributes are found in source file.
\item Updated vignette with additional documentation on exposing
C++ interfaces from packages and signaling errors.
}
\item Changes in Rcpp modules:
\itemize{
\item Enclose .External invocations in \code{BEGIN_RCPP}/\code{END_RCPP}
}
\item Changes in R code :
\itemize{
\item New function \code{areMacrosDefined}
\item Additions to \code{Rcpp.package.skeleton}:
\itemize{
\item \code{attributes} parameter to generate a version of
\code{rcpp_hello_world} that uses \code{Rcpp::export}.
\item \code{cpp_files} parameter to provide a list of C++
files to include the in the \code{src} directory of the package.
}
}
\item Miscellaneous changes:
\itemize{
\item New example 'pi simulation' using R and C++ via Rcpp attributes
}
}
}
\section{Changes in Rcpp version 0.10.0 (2012-11-13)}{
\itemize{
\item Support for C++11 style attributes (embedded in comments) to enable
use of C++ within interactive sessions and to automatically generate module
declarations for packages:
\itemize{
\item Rcpp::export attribute to export a C++ function to R
\item \code{sourceCpp()} function to source exported functions from a file
\item \code{cppFunction()} and \code{evalCpp()} functions for inline declarations
and execution
\item \code{compileAttribtes()} function to generate Rcpp modules from
exported functions within a package
\item Rcpp::depends attribute for specifying additional build
dependencies for \code{sourceCpp()}
\item Rcpp::interfaces attribute to specify the external bindings
\code{compileAttributes()} should generate (defaults to R-only but a
C++ include file using R_GetCCallable can also be generated)
\item New vignette "Rcpp-attribute"
}
\item Rcpp modules feature set has been expanded:
\itemize{
\item Functions and methods can now return objects from classes that
are exposed through modules. This uses the make_new_object template
internally. This feature requires that some class traits are declared
to indicate Rcpp's \code{wrap}/\code{as} system that these classes are covered
by modules. The macro RCPP_EXPOSED_CLASS and RCPP_EXPOSED_CLASS_NODECL
can be used to declared these type traits.
\item Classes exposed through modules can also be used as parameters
of exposed functions or methods.
\item Exposed classes can declare factories with ".factory". A factory
is a c++ function that returns a pointer to the target class. It is
assumed that these objects are allocated with new on the factory. On the
R side, factories are called just like other constructors, with the
"new" function. This feature allows an alternative way to construct
objects.
\item "converter" can be used to declare a way to convert an object
of a type to another type. This gets translated to the appropriate
"as" method on the R side.
\item Inheritance. A class can now declare that it inherits from
another class with the .derives<Parent>( "Parent" ) notation. As a result
the exposed class gains methods and properties (fields) from its
parent class.
}
\item New sugar functions:
\itemize{
\item \code{which_min} implements which.min. Traversing the sugar expression
and returning the index of the first time the minimum value is found.
\item \code{which_max} idem
\item \code{unique} uses unordered_set to find unique values. In particular,
the version for CharacterVector is found to be more efficient than
R's version
\item \code{sort_unique} calculates unique values and then sorts them.
}
\item Improvements to output facilities:
\itemize{
\item Implemented \code{sync()} so that flushing output streams works
\item Added \code{Rcerr} output stream (forwarding to
\code{REprintf})
}
\item Provide a namespace 'R' for the standalone Rmath library so
that Rcpp users can access those functions too; also added unit tests
\item Development releases sets variable RunAllRcppTests to yes to
run all tests (unless it was alredy set to 'no'); CRAN releases do
not and still require setting -- which helps with the desired CRAN
default of less testing at the CRAN server farm.
}
}
\section{Changes in Rcpp version 0.9.15 (2012-10-13)}{
\itemize{
\item Untangling the clang++ build issue about the location of the
exceptions header by directly checking for the include file -- an
approach provided by Martin Morgan in a kindly contributed patch
as unit tests for them.
\item The \code{Date} and \code{Datetime} types now correctly
handle \code{NA}, \code{NaN} and \code{Inf} representation; the
\code{Date} type switched to an internal representation via \code{double}
\item Added \code{Date} and \code{Datetime} unit tests for the new
features
\item An additional \code{PROTECT} was added for parsing exception
messages before returning them to R, following a report by Ben North
}
}
\section{Changes in Rcpp version 0.9.14 (2012-09-30)}{
\itemize{
\item Added new Rcpp sugar functions trunc(), round() and signif(), as well
as unit tests for them
\item Be more conservative about where we support clang++ and the inclusion
of exception_defines.h and prevent this from being attempted on OS X
where it failed for clang 3.1
\item Corrected a typo in Module.h which now again permits use of finalizers
\item Small correction for (unexported) bib() function (which provides a path
to the bibtex file that ships with Rcpp)
\item Converted NEWS to NEWS.Rd
}
}
\section{Changes in Rcpp version 0.9.13 (2012-06-28)}{
\itemize{
\item Truly corrected Rcpp::Environment class by having default constructor
use the global environment, and removing the default argument of
global environment from the SEXP constructor
\item Added tests for clang++ version to include bits/exception_defines.h
for versions 3.0 or higher (similar to g++ 4.6.0 or later), needed to
include one particular exceptions header
\item Made more regression tests conditional on the RunAllRcppTests to come
closer to the CRAN mandate of running tests in sixty seconds
\item Updated unit test wrapper tests/doRUnit.R as well as unitTests/runTests.R
}
}
\section{Changes in Rcpp version 0.9.12 (2012-06-23)}{
\itemize{
\item Corrected Rcpp::Environment class by removing (empty) ctor following
rev3592 (on May 2) where default argument for ctor was moved
\item Unit testing now checks for environment variable RunAllRcppTests being
set to "yes"; otherwise some tests are skipped. This is arguably not
the right thing to do, but CRAN maintainers insist on faster tests.
\item Unit test wrapper script runTests.R has new option --allTests to set
the environment variable
\item The cleanup script now also considers inst/unitTests/testRcppClass/src
}
}
\section{Changes in Rcpp version 0.9.11 (2012-06-22)}{
\itemize{
\item New member function for vectors (and lists etc) containsElementNamed()
which returns a boolean indicating if the given element name is present
\item Updated the Rcpp.package.skeleton() support for Rcpp modules by
carrying functions already present from the corresponding unit test
which was also slightly expanded; and added more comments to the code
\item Rcpp modules can now be loaded via loadRcppModules() from .onLoad(),
or via loadModule("moduleName") from any R file
\item Extended functionality to let R modify C++ clases imported via modules
documented in help(setRcppClass)
\item Support compilation in Cygwin thanks to a patch by Dario Buttari
\item Extensions to the Rcpp-FAQ and the Rcpp-modules vignettes
\item The minium version of R is now 2.15.1 which is required for some of
the Rcpp modules support
}
}
\section{Changes in Rcpp version 0.9.10 (2012-02-16)}{
\itemize{
\item Rearrange headers so that Rcpp::Rcout can be used by RcppArmadillo et al
\item New Rcpp sugar function mapply (limited to two or three input vectors)
\item Added custom version of the Rcpp sugar diff function for numeric vectors
skipping unncesserry checks for NA
\item Some internal code changes to reflect changes and stricter requirements
in R CMD check in the current R-devel versions
\item Corrected fixed-value initialization for IntegerVector (with thanks to
Gregor Kastner for spotting this)
\item New Rcpp-FAQ entry on simple way to set compiler option for cxxfunction
}
}
\section{Changes in Rcpp version 0.9.9 (2012-12-25)}{
\itemize{
\item Reverting the 'int64' changes from release 0.9.8 which adversely
affect packages using Rcpp: We will re-apply the 'int64' changes in a
way which should cooperate more easily with 'long' and 'unsigned long'.
\item Unit test output directory fallback changed to use Rcpp.Rcheck
\item Conditioned two unit tests to not run on Windows where they now break
whereas they passed before, and continue to pass on other OSs
}
}
\section{Changes in Rcpp version 0.9.8 (2011-12-21)}{
\itemize{
\item wrap now handles 64 bit integers (int64_t, uint64_t) and containers
of them, and Rcpp now depends on the int64 package (also on CRAN).
This work has been sponsored by the Google Open Source Programs
Office.
\item Added setRcppClass() function to create extended reference classes
with an interface to a C++ class (typically via Rcpp Module) which
can have R-based fields and methods in addition to those from the C++.
\item Applied patch by Jelmer Ypma which adds an output stream class
'Rcout' not unlike std::cout, but implemented via Rprintf to
cooperate with R and its output buffering.
\item New unit tests for pf(), pnf(), pchisq(), pnchisq() and pcauchy()
\item XPtr constructor now checks for corresponding type in SEXP
\item Updated vignettes for use with updated highlight package
\item Update linking command for older fastLm() example using external
Armadillo
}
}
\section{Changes in Rcpp version 0.9.7 (2011-09-29)}{
\itemize{
\item Applied two patches kindly provided by Martyn Plummer which provide
support for compilation on Solaris using the SunPro compiler
\item Minor code reorganisation in which exception specifiers are removed;
this effectively only implements a run-time (rather than compile-time)
check and is generally seen as a somewhat depreated C++ idiom. Thanks
to Darren Cook for alerting us to this issue.
\item New example 'OpenMPandInline.r' in the OpenMP/ directory, showing how
easily use OpenMP by modifying the RcppPlugin output
\item New example 'ifelseLooped.r' showing Rcpp can accelerate loops that may
be difficult to vectorise due to dependencies
\item New example directory examples/Misc/ regrouping the new example as
well as the fibonacci example added in Rcpp 0.9.6
\item New Rcpp-FAQ example warning of lossy conversion from 64-bit long
integer types into a 53-bit mantissa which has no clear fix yet.
\item New unit test for accessing a non-exported function from a namespace
}
}
\section{Changes in Rcpp version 0.9.6 (2011-07-26)}{
\itemize{
\item Added helper traits to facilitate implementation of the RcppEigen
package: The is_eigen_base traits identifies if a class derives from
EigenBase using SFINAE; and new dispatch layer was added to wrap() to
help RcppEigen
\item XPtr now accepts a second template parameter, which is a function
taking a pointer to the target class. This allows the developper to
supply his/her own finalizer. The template parameter has a default
value which retains the original behaviour (calling delete on the
pointer)
\item New example RcppGibbs, extending Sanjog Misra's Rcpp illustration of
Darren Wilkinson's comparison of MCMC Gibbs Sampler implementations;
also added short timing on Normal and Gaussian RNG draws between Rcpp
and GSL as R's rgamma() is seen to significantly slower
\item New example on recursively computing a Fibonacci number using Rcpp and
comparing this to R and byte-compiled R for a significant speed gain
}
}
\section{Changes in Rcpp version 0.9.5 (2011-07-05)}{
\itemize{
\item New Rcpp-FAQ examples on using the plugin maker for inline's
cxxfunction(), and on setting row and column names for matrices
\item New sugar functions: mean, var, sd
\item Minor correction and extension to STL documentation in Rcpp-quickref
\item wrap() is now resilient to NULL pointers passed as in const char *
\item loadRcppModules() gains a "direct" argument to expose the module instead
of exposing what is inside it
\item Suppress a spurious warning from R CMD check on packages created with
Rcpp.package.skeleton(..., module=TRUE)
\item Some fixes and improvements for Rcpp sugar function 'rlnorm()'
\item Beginnings of new example using OpenMP and recognising user interrupts
}
}
\section{Changes in Rcpp version 0.9.4 (2011-04-12)}{
\itemize{
\item New R function "loadRcppModules" to load Rcpp modules automatically
from a package. This function must be called from the .onLoad function
and works with the "RcppModules" field of the package's DESCRIPTION file
\item The Modules example wrapped the STL std::vector received some editing
to disambiguate some symbols the newer compilers did not like
\item Coercing of vectors of factors is now done with an explicit callback
to R's "as.character()" as Rf_coerceVector no longer plays along
\item A CITATION file for the published JSS paper has been added, and
references were added to Rcpp-package.Rd and the different vignettes
}
}
\section{Changes in Rcpp version 0.9.3 (2011-04-05)}{
\itemize{
\item Fixed a bug in which modules code was not behaving when compiled
twice as can easily happen with inline'ed version
\item Exceptions code includes exception_defines.h only when g++ is 4.5 or
younger as the file no longer exists with g++-4.6
\item The documentation Makefile now uses the $R_HOME environment variable
\item The documentation Makefile no longer calls clean in the all target
\item C++ conformance issue found by clang/llvm addressed by re-ordering
declarations in grow.h as unqualified names must be declared before
they are used, even when used within templates
\item The 'long long' typedef now depends on C++0x being enabled as this
was not a feature in C++98; this suppresses a new g++-4.5 warning
\item The Rcpp-introduction vignette was updated to the forthcoming JSS paper
}
}
\section{Changes in Rcpp version 0.9.2 (2011-02-23)}{
\itemize{
\item The unitTest runit.Module.client.package.R is now skipped on older OS
X releases as it triggers a bug with g++ 4.2.1 or older; OS X 10.6 is
fine but as it no longer support ppc we try to accomodate 10.5 too
Thanks to Simon Urbanek for pinning this down and Baptiste Auguie
and Ken Williams for additonal testing
\item RcppCommon.h now recognises the Intel Compiler thanks to a short
patch by Alexey Stukalov; this turns off Cxx0x and TR1 features too
\item Three more setup questions were added to the Rcpp-FAQ vignette
\item One question about RcppArmadillo was added to the Rcpp-FAQ vignette
}
}
\section{Changes in Rcpp version 0.9.1 (2011-02-14)}{
\itemize{
\item A number of internal changes to the memory allocation / protection of
temporary objects were made---with a heartfelt "Thank You!" to both
Doug Bates for very persistent debugging of Rcpp modules code, and to
Luke Tierney who added additional memory allocation debugging tools
to R-devel (which will be in R 2.13.0 and may also be in R 2.12.2)
\item Removed another GNU Make-specific variable from src/Makevars in order
to make the build more portable; this was noticed on FreeBSD
\item On *BSD, do not try to compute a stack trace but provide file and
line number (which is the same behaviour as implemented in Windows)
\item Fixed an int conversion bug reported by Daniel Sabanes Bove on r-devel,
added unit test as well
\item Added unit tests for complex-typed vectors (thanks to Christian Gunning)
\item Expanded the Rcpp-quickref vignette (with thanks to Christian Gunning)
\item Additional examples were added to the Rcpp-FAQ vignette
}
}
\section{Changes in Rcpp version 0.9.0 (2010-12-19)}{
\itemize{
\item The classic API was factored out into its own package RcppClassic which
is released concurrently with this version.
\item If an object is created but not initialized, attempting to use
it now gives a more sensible error message (by forwarding an
Rcpp::not_initialized exception to R).
\item SubMatrix fixed, and Matrix types now have a nested ::Sub typedef.
\item New unexported function SHLIB() to aid in creating a shared library on
the command-line or in Makefile (similar to CxxFlags() / LdFlags()).
\item Module gets a seven-argument ctor thanks to a patch from Tama Ma.
\item The (still incomplete) QuickRef vignette has grown thanks to a patch
by Christian Gunning.
\item Added a sprintf template intended for logging and error messages.
\item Date::getYear() corrected (where addition of 1900 was not called for);
corresponding change in constructor from three ints made as well.
\item Date() and Datetime() constructors from string received a missing
conversion to int and double following strptime. The default format
string for the Datetime() strptime call was also corrected.
\item A few minor fixes throughout, see ChangeLog.
}
}
\section{Changes in Rcpp version 0.8.9 (2010-11-27)}{
\itemize{
\item Many improvements were made in 'Rcpp modules':
- exposing multiple constructors
- overloaded methods
- self-documentation of classes, methods, constructors, fields and
functions.
- new R function "populate" to facilitate working with modules in
packages.
- formal argument specification of functions.
- updated support for Rcpp.package.skeleton.
- constructors can now take many more arguments.
\item The 'Rcpp-modules' vignette was updated as well and describe many
of the new features
\item New template class Rcpp::SubMatrix<RTYPE> and support syntax in Matrix
to extract a submatrix:
NumericMatrix x = ... ;
// extract the first three columns
SubMatrix<REALSXP> y = x( _ , Range(0,2) ) ;
// extract the first three rows
SubMatrix<REALSXP> y = x( Range(0,2), _ ) ;
// extract the top 3x3 sub matrix
SubMatrix<REALSXP> y = x( Range(0,2), Range(0,2) ) ;
\item Reference Classes no longer require a default constructor for
subclasses of C++ classes
\item Consistently revert to using backticks rather than shell expansion
to compute library file location when building packages against Rcpp
on the default platforms; this has been applied to internal test
packages as well as CRAN/BioC packages using Rcpp
}
}
\section{Changes in Rcpp version 0.8.8 (2010-11-01)}{
\itemize{
\item New syntactic shortcut to extract rows and columns of a Matrix.
x(i,_) extracts the i-th row and x(_,i) extracts the i-th column.
\item Matrix indexing is more efficient. However, faster indexing is
disabled if g++ 4.5.0 or later is used.
\item A few new Rcpp operators such as cumsum, operator=(sugar)
\item Variety of bug fixes:
- column indexing was incorrect in some cases
- compilation using clang/llvm (thanks to Karl Millar for the patch)
- instantation order of Module corrected
- POSIXct, POSIXt now correctly ordered for R 2.12.0
}
}
\section{Changes in Rcpp version 0.8.7 (2010-10-15)}{
\itemize{
\item As of this version, Rcpp depends on R 2.12 or greater as it interfaces
the new reference classes (see below) and also reflects the POSIXt
class reordering both of which appeared with R version 2.12.0
\item new Rcpp::Reference class, that allows internal manipulation of R
2.12.0 reference classes. The class exposes a constructor that takes
the name of the target reference class and a field(string) method
that implements the proxy pattern to get/set reference fields using
callbacks to the R operators "$" and "$<-" in order to preserve the
R-level encapsulation
\item the R side of the preceding item allows methods to be written in R as
per ?ReferenceClasses, accessing fields by name and assigning them
using "<<-". Classes extracted from modules are R reference classes.
They can be subclassed in R, and/or R methods can be defined using
the $methods(...) mechanism.
\item internal performance improvements for Rcpp sugar as well as an added
'noNA()' wrapper to omit tests for NA values -- see the included
examples in inst/examples/convolveBenchmarks for the speedups
\item more internal performance gains with Functions and Environments
}
}
\section{Changes in Rcpp version 0.8.6 (2010-09-09)}{
\itemize{
\item new macro RCPP_VERSION and Rcpp_Version to allow conditional compiling
based on the version of Rcpp
#if defined(RCPP_VERSION) && RCPP_VERSION >= Rcpp_Version(0,8,6)
#endif
\item new sugar functions for statistical distributions (d-p-q-r functions)
with distributions : unif, norm, gamma, chisq, lnorm, weibull, logis,
f, pois, binom, t, beta.
\item new ctor for Vector taking size and function pointer so that for example
NumericVector( 10, norm_rand )
generates a N(0,1) vector of size 10
\item added binary operators for complex numbers, as well as sugar support
\item more sugar math functions: sqrt, log, log10, exp, sin, cos, ...
\item started new vignette Rcpp-quickref : quick reference guide of Rcpp API
(still work in progress)
\item various patches to comply with solaris/suncc stricter standards
\item minor enhancements to ConvolutionBenchmark example
\item simplified src/Makefile to no longer require GNU make; packages using
Rcpp still do for the compile-time test of library locations
}
}
\section{Changes in Rcpp version 0.8.5 (2010-07-25)}{
\itemize{
\item speed improvements. Vector::names, RObject::slot have been improved
to take advantage of R API functions instead of callbacks to R
\item Some small updates to the Rd-based documentation which now points to
content in the vignettes. Also a small formatting change to suppress
a warning from the development version of R.
\item Minor changes to Date() code which may reenable SunStudio builds
}
}
\section{Changes in Rcpp version 0.8.4 (2010-07-09)}{
\itemize{
\item new sugar vector functions: rep, rep_len, rep_each, rev, head, tail,
diag
\item sugar has been extended to matrices: The Matrix class now extends the
Matrix_Base template that implements CRTP. Currently sugar functions
for matrices are: outer, col, row, lower_tri, upper_tri, diag
\item The unit tests have been reorganised into fewer files with one call
each to cxxfunction() (covering multiple tests) resulting in a
significant speedup
\item The Date class now uses the same mktime() replacement that R uses
(based on original code from the timezone library by Arthur Olson)
permitting wide date ranges on all operating systems
\item The FastLM example has been updated, a new benchmark based on the
historical Longley data set has been added
\item RcppStringVector now uses std::vector<std::string> internally
\item setting the .Data slot of S4 objects did not work properly
}
}
\section{Changes in Rcpp version 0.8.3 (2010-06-27)}{
\itemize{
\item This release adds Rcpp sugar which brings (a subset of) the R syntax
into C++. This supports :
- binary operators : <,>,<=,>=,==,!= between R vectors
- arithmetic operators: +,-,*,/ between compatible R vectors
- several functions that are similar to the R function of the same name:
abs, all, any, ceiling, diff, exp, ifelse, is_na, lapply, pmin, pmax,
pow, sapply, seq_along, seq_len, sign
Simple examples :
// two numeric vector of the same size
NumericVector x ;
NumericVector y ;
NumericVector res = ifelse( x < y, x*x, -(y*y) ) ;
// sapply'ing a C++ function
double square( double x )\{ return x*x ; \}
NumericVector res = sapply( x, square ) ;
Rcpp sugar uses the technique of expression templates, pioneered by the
Blitz++ library and used in many libraries (Boost::uBlas, Armadillo).
Expression templates allow lazy evaluation of expressions, which
coupled with inlining generates very efficient code, very closely
approaching the performance of hand written loop code, and often
much more efficient than the equivalent (vectorized) R code.
Rcpp sugar is curently limited to vectors, future releases will
include support for matrices with sugar functions such as outer, etc ...
Rcpp sugar is documented in the Rcpp-sugar vignette, which contains
implementation details.
\item New helper function so that "Rcpp?something" brings up Rcpp help
\item Rcpp Modules can now expose public data members
\item New classes Date, Datetime, DateVector and DatetimeVector with proper
'new' API integration such as as(), wrap(), iterators, ...
\item The so-called classic API headers have been moved to a subdirectory
classic/ This should not affect client-code as only Rcpp.h was ever
included.
\item RcppDate now has a constructor from SEXP as well
\item RcppDateVector and RcppDatetimeVector get constructors from int
and both const / non-const operator(int i) functions
\item New API class Rcpp::InternalFunction that can expose C++ functions
to R without modules. The function is exposed as an S4 object of
class C++Function
}
}
\section{Changes in Rcpp version 0.8.2 (2010-06-09)}{
\itemize{
\item Bug-fix release for suncc compiler with thanks to Brian Ripley for
additional testing.
}
}
\section{Changes in Rcpp version 0.8.1 (2010-06-08)}{
\itemize{
\item This release adds Rcpp modules. An Rcpp module is a collection of
internal (C++) functions and classes that are exposed to R. This
functionality has been inspired by Boost.Python.
Modules are created internally using the RCPP_MODULE macro and
retrieved in the R side with the Module function. This is a preview
release of the module functionality, which will keep improving until
the Rcpp 0.9.0 release.
The new vignette "Rcpp-modules" documents the current feature set of
Rcpp modules.
\item The new vignette "Rcpp-package" details the steps involved in making a
package that uses Rcpp.
\item The new vignette "Rcpp-FAQ" collects a number of frequently asked
questions and answers about Rcpp.
\item The new vignette "Rcpp-extending" documents how to extend Rcpp
with user defined types or types from third party libraries. Based on
our experience with RcppArmadillo
\item Rcpp.package.skeleton has been improved to generate a package using
an Rcpp module, controlled by the "module" argument
\item Evaluating a call inside an environment did not work properly
\item cppfunction has been withdrawn since the introduction of the more
flexible cxxfunction in the inline package (0.3.5). Rcpp no longer
depends on inline since many uses of Rcpp do not require inline at
all. We still use inline for unit tests but this is now handled
locally in the unit tests loader runTests.R.
Users of the now-withdrawn function cppfunction can redefine it as:
cppfunction <- function(...) cxxfunction( ..., plugin = "Rcpp" )
\item Support for std::complex was incomplete and has been enhanced.
\item The methods XPtr<T>::getTag and XPtr<T>::getProtected are deprecated,
and will be removed in Rcpp 0.8.2. The methods tag() and prot() should
be used instead. tag() and prot() support both LHS and RHS use.
\item END_RCPP now returns the R Nil values; new macro VOID_END_RCPP
replicates prior behabiour
}
}
\section{Changes in Rcpp version 0.8.0 (2010-05-17)}{
\itemize{
\item All Rcpp headers have been moved to the inst/include directory,
allowing use of 'LinkingTo: Rcpp'. But the Makevars and Makevars.win
are still needed to link against the user library.
\item Automatic exception forwarding has been withdrawn because of
portability issues (as it did not work on the Windows platform).
Exception forwarding is still possible but is now based on explicit
code of the form:
try \{
// user code
\} catch( std::exception& __ex__)\{
forward_exception_to_r( __ex___ ) ;
Alternatively, the macro BEGIN_RCPP and END_RCPP can use used to enclose
code so that it captures exceptions and forward them to R.
BEGIN_RCPP
// user code
END_RCPP
\item new __experimental__ macros
The macros RCPP_FUNCTION_0, ..., RCPP_FUNCTION_65 to help creating C++
functions hiding some code repetition:
RCPP_FUNCTION_2( int, foobar, int x, int y)\{
return x + y ;
The first argument is the output type, the second argument is the
name of the function, and the other arguments are arguments of the
C++ function. Behind the scenes, the RCPP_FUNCTION_2 macro creates an
intermediate function compatible with the .Call interface and handles
exceptions
Similarly, the macros RCPP_FUNCTION_VOID_0, ..., RCPP_FUNCTION_VOID_65
can be used when the C++ function to create returns void. The generated
R function will return R_NilValue in this case.
RCPP_FUNCTION_VOID_2( foobar, std::string foo )\{
// do something with foo
The macro RCPP_XP_FIELD_GET generates a .Call compatible function that
can be used to access the value of a field of a class handled by an
external pointer. For example with a class like this:
class Foo\{
public:
int bar ;
RCPP_XP_FIELD_GET( Foo_bar_get, Foo, bar ) ;
RCPP_XP_FIELD_GET will generate the .Call compatible function called
Foo_bar_get that can be used to retrieved the value of bar.
The macro RCPP_FIELD_SET generates a .Call compatible function that
can be used to set the value of a field. For example:
RCPP_XP_FIELD_SET( Foo_bar_set, Foo, bar ) ;
generates the .Call compatible function called "Foo_bar_set" that
can be used to set the value of bar
The macro RCPP_XP_FIELD generates both getter and setter. For example
RCPP_XP_FIELD( Foo_bar, Foo, bar )
generates the .Call compatible Foo_bar_get and Foo_bar_set using the
macros RCPP_XP_FIELD_GET and RCPP_XP_FIELD_SET previously described
The macros RCPP_XP_METHOD_0, ..., RCPP_XP_METHOD_65 faciliate
calling a method of an object that is stored in an external pointer. For
example:
RCPP_XP_METHOD_0( foobar, std::vector<int> , size )
creates the .Call compatible function called foobar that calls the
size method of the std::vector<int> class. This uses the Rcpp::XPtr<
std::vector<int> > class.
The macros RCPP_XP_METHOD_CAST_0, ... is similar but the result of
the method called is first passed to another function before being
wrapped to a SEXP. For example, if one wanted the result as a double
RCPP_XP_METHOD_CAST_0( foobar, std::vector<int> , size, double )
The macros RCPP_XP_METHOD_VOID_0, ... are used when calling the
method is only used for its side effect.
RCPP_XP_METHOD_VOID_1( foobar, std::vector<int>, push_back )
Assuming xp is an external pointer to a std::vector<int>, this could
be called like this :
.Call( "foobar", xp, 2L )
\item Rcpp now depends on inline (>= 0.3.4)
\item A new R function "cppfunction" was added which invokes cfunction from
inline with focus on Rcpp usage (enforcing .Call, adding the Rcpp
namespace, set up exception forwarding). cppfunction uses BEGIN_RCPP
and END_RCPP macros to enclose the user code
\item new class Rcpp::Formula to help building formulae in C++
\item new class Rcpp::DataFrame to help building data frames in C++
\item Rcpp.package.skeleton gains an argument "example_code" and can now be
used with an empty list, so that only the skeleton is generated. It
has also been reworked to show how to use LinkingTo: Rcpp
\item wrap now supports containers of the following types: long, long double,
unsigned long, short and unsigned short which are silently converted
to the most acceptable R type.
\item Revert to not double-quote protecting the path on Windows as this
breaks backticks expansion used n Makevars.win etc
\item Exceptions classes have been moved out of Rcpp classes,
e.g. Rcpp::RObject::not_a_matrix is now Rcpp::not_a_matrix
}
}
\section{Changes in Rcpp version 0.7.12 (2010-04-16)}{
\itemize{
\item Undo shQuote() to protect Windows path names (which may contain
spaces) as backticks use is still broken; use of $(shell ...) works
}
}
\section{Changes in Rcpp version 0.7.11 (2010-03-26)}{
\itemize{
\item Vector<> gains a set of templated factory methods "create" which
takes up to 20 arguments and can create named or unnamed vectors.
This greatly facilitates creating objects that are returned to R.
\item Matrix now has a diag() method to create diagonal matrices, and
a new constructor using a single int to create square matrices
\item Vector now has a new fill() method to propagate a single value
\item Named is no more a class but a templated function. Both interfaces
Named(.,.) and Named(.)=. are preserved, and extended to work also on
simple vectors (through Vector<>::create)
\item Applied patch by Alistair Gee to make ColDatum more robust
\item Fixed a bug in Vector that caused random behavior due to the lack of
copy constructor in the Vector template
}
}
\section{Changes in Rcpp version 0.7.10 (2010-03-15)}{
\itemize{
\item new class Rcpp::S4 whose constructor checks if the object is an S4
object
\item maximum number of templated arguments to the pairlist function, the
DottedPair constructor, the Language constructor and the Pairlist
constructor has been updated to 20 (was 5) and a script has been
added to the source tree should we want to change it again
\item use shQuote() to protect Windows path names (which may contain spaces)
}
}
\section{Changes in Rcpp version 0.7.9 (2010-03-12)}{
\itemize{
\item Another small improvement to Windows build flags
\item bugfix on 64 bit platforms. The traits classes (wrap_type_traits, etc)
used size_t when they needed to actually use unsigned int
\item fixed pre gcc 4.3 compatibility. The trait class that was used to
identify if a type is convertible to another had too many false
positives on pre gcc 4.3 (no tr1 or c++0x features). fixed by
implementing the section 2.7 of "Modern C++ Design" book.
}
}
\section{Changes in Rcpp version 0.7.8 (2010-03-09)}{
\itemize{
\item All vector classes are now generated from the same template class
Rcpp::Vector<int RTYPE> where RTYPE is one of LGLSXP, RAWSXP, STRSXP,
INTSXP, REALSXP, CPLXSXP, VECSXP and EXPRSXP. typedef are still
available : IntegerVector, ... All vector classes gain methods
inspired from the std::vector template : push_back, push_front,
erase, insert
\item New template class Rcpp::Matrix<RTYPE> deriving from
Rcpp::Vector<RTYPE>. These classes have the same functionality
as Vector but have a different set of constructors which checks
that the input SEXP is a matrix. Matrix<> however does/can not
guarantee that the object will allways be a matrix. typedef
are defined for convenience: Matrix<INTSXP> is IntegerMatrix, etc...
\item New class Rcpp::Row<int RTYPE> that represents a row of a matrix
of the same type. Row contains a reference to the underlying
Vector and exposes a nested iterator type that allows use of
STL algorithms on each element of a matrix row. The Vector class
gains a row(int) method that returns a Row instance. Usage
examples are available in the runit.Row.R unit test file
\item New class Rcpp::Column<int RTYPE> that represents a column of a
matrix. (similar to Rcpp::Row<int RTYPE>). Usage examples are
available in the runit.Column.R unit test file
\item The Rcpp::as template function has been reworked to be more
generic. It now handles more STL containers, such as deque and
list, and the genericity can be used to implement as for more
types. The package RcppArmadillo has examples of this
\item new template class Rcpp::fixed_call that can be used in STL algorithms
such as std::generate.
\item RcppExample et al have been moved to a new package RcppExamples;
src/Makevars and src/Makevars.win simplified accordingly
\item New class Rcpp::StringTransformer and helper function
Rcpp::make_string_transformer that can be used to create a function
that transforms a string character by character. For example
Rcpp::make_string_transformer(tolower) transforms each character
using tolower. The RcppExamples package has an example of this.
\item Improved src/Makevars.win thanks to Brian Ripley
\item New examples for 'fast lm' using compiled code:
- using GNU GSL and a C interface
- using Armadillo (http://arma.sf.net) and a C++ interface
Armadillo is seen as faster for lack of extra copying
\item A new package RcppArmadillo (to be released shortly) now serves
as a concrete example on how to extend Rcpp to work with a modern
C++ library such as the heavily-templated Armadillo library
\item Added a new vignette 'Rcpp-introduction' based on a just-submitted
overview article on Rcpp
}
}
\section{Changes in Rcpp version 0.7.7 (2010-02-14)}{
\itemize{
\item new template classes Rcpp::unary_call and Rcpp::binary_call
that facilitates using R language calls together
with STL algorithms.
\item fixed a bug in Language constructors taking a string as their
first argument. The created call was wrong.
}
}
\section{Changes in Rcpp version 0.7.6 (2010-02-12)}{
\itemize{
\item SEXP_Vector (and ExpressionVector and GenericVector, a.k.a List) now
have methods push_front, push_back and insert that are templated
\item SEXP_Vector now has int- and range-valued erase() members
\item Environment class has a default constructor (for RInside)
\item SEXP_Vector_Base factored out of SEXP_Vector (Effect. C++ #44)
\item SEXP_Vector_Base::iterator added as well as begin() and end()
so that STL algorithms can be applied to Rcpp objects
\item CharacterVector gains a random access iterator, begin() and end() to
support STL algorithms; iterator dereferences to a StringProxy
\item Restore Windows build; successfully tested on 32 and 64 bit;
\item Small fixes to inst/skeleton files for bootstrapping a package
\item RObject::asFoo deprecated in favour of Rcpp::as<Foo>
}
}
\section{Changes in Rcpp version 0.7.5 (2010-02-08)}{
\itemize{
\item wrap has been much improved. wrappable types now are :
- primitive types : int, double, Rbyte, Rcomplex, float, bool
- std::string
- STL containers which have iterators over wrappable types:
(e.g. std::vector<T>, std::deque<T>, std::list<T>, etc ...).
- STL maps keyed by std::string, e.g std::map<std::string,T>
- classes that have implicit conversion to SEXP
- classes for which the wrap template if fully or partly specialized
This allows composition, so for example this class is wrappable:
std::vector< std::map<std::string,T> > (if T is wrappable)
\item The range based version of wrap is now exposed at the Rcpp::
level with the following interface :
Rcpp::wrap( InputIterator first, InputIterator last )
This is dispatched internally to the most appropriate implementation
using traits
\item a new namespace Rcpp::traits has been added to host the various
type traits used by wrap
\item The doxygen documentation now shows the examples
\item A new file inst/THANKS acknowledges the kind help we got from others
\item The RcppSexp has been removed from the library.
\item The methods RObject::asFoo are deprecated and will be removed
in the next version. The alternative is to use as<Foo>.
\item The method RObject::slot can now be used to get or set the
associated slot. This is one more example of the proxy pattern
\item Rcpp::VectorBase gains a names() method that allows getting/setting
the names of a vector. This is yet another example of the
proxy pattern.
\item Rcpp::DottedPair gains templated operator<< and operator>> that
allow wrap and push_back or wrap and push_front of an object
\item Rcpp::DottedPair, Rcpp::Language, Rcpp::Pairlist are less
dependent on C++0x features. They gain constructors with up
to 5 templated arguments. 5 was choosed arbitrarily and might
be updated upon request.
\item function calls by the Rcpp::Function class is less dependent
on C++0x. It is now possible to call a function with up to
5 templated arguments (candidate for implicit wrap)
\item added support for 64-bit Windows (thanks to Brian Ripley and Uwe Ligges)
}
}
\section{Changes in Rcpp version 0.7.4 (2010-01-30)}{
\itemize{
\item matrix-like indexing using operator() for all vector
types : IntegerVector, NumericVector, RawVector, CharacterVector
LogicalVector, GenericVector and ExpressionVector.
\item new class Rcpp::Dimension to support creation of vectors with
dimensions. All vector classes gain a constructor taking a
Dimension reference.
\item an intermediate template class "SimpleVector" has been added. All
simple vector classes are now generated from the SimpleVector
template : IntegerVector, NumericVector, RawVector, CharacterVector
LogicalVector.
\item an intermediate template class "SEXP_Vector" has been added to
generate GenericVector and ExpressionVector.
\item the clone template function was introduced to explicitely
clone an RObject by duplicating the SEXP it encapsulates.
\item even smarter wrap programming using traits and template
meta-programming using a private header to be include only
RcppCommon.h
\item the as template is now smarter. The template now attempts to
build an object of the requested template parameter T by using the
constructor for the type taking a SEXP. This allows third party code
to create a class Foo with a constructor Foo(SEXP) to have
as<Foo> for free.
\item wrap becomes a template. For an object of type T, wrap<T> uses
implicit conversion to SEXP to first convert the object to a SEXP
and then uses the wrap(SEXP) function. This allows third party
code creating a class Bar with an operator SEXP() to have
wrap for free.
\item all specializations of wrap : wrap<double>, wrap< vector<double> >
use coercion to deal with missing values (NA) appropriately.
\item configure has been withdrawn. C++0x features can now be activated
by setting the RCPP_CXX0X environment variable to "yes".
\item new template r_cast<int> to facilitate conversion of one SEXP
type to another. This is mostly intended for internal use and
is used on all vector classes
\item Environment now takes advantage of the augmented smartness
of as and wrap templates. If as<Foo> makes sense, one can
directly extract a Foo from the environment. If wrap<Bar> makes
sense then one can insert a Bar directly into the environment.
Foo foo = env["x"] ; /* as<Foo> is used */
Bar bar ;
env["y"] = bar ; /* wrap<Bar> is used */
\item Environment::assign becomes a template and also uses wrap to
create a suitable SEXP
\item Many more unit tests for the new features; also added unit tests
for older API
}
}
\section{Changes in Rcpp version 0.7.3 (2010-01-21)}{
\itemize{
\item New R function Rcpp.package.skeleton, modelled after
utils::package.skeleton to help creating a package with support
for Rcpp use.
\item indexing is now faster for simple vectors due to inlining of
the operator[] and caching the array pointer
\item The class Rcpp::VectorBase was introduced. All vector classes
derive from it. The class handles behaviour that is common
to all vector types: length, names, etc ...
\item exception forwarding is extended to compilers other than GCC
but default values are used for the exception class
and the exception message, because we don't know how to do it.
\item Improved detection of C++0x capabilities
\item Rcpp::Pairlist gains a default constructor
\item Rcpp::Environment gains a new_child method to create a new
environment whose parent is this
\item Rcpp::Environment::Binding gains a templated implicit
conversion operator
\item Rcpp::ExpressionVector gains an eval method to evaluate itself
\item Rcpp::ExpressionVector gains a constructor taking a std::string
representing some R code to parse.
\item Rcpp::GenericVector::Proxy gains an assignment operator to deal
with Environment::Proxy objects
\item Rcpp::LdFlags() now defaults to static linking OS X, as it already
did on Windows; this default can be overridden.
}
}
\section{Changes in Rcpp version 0.7.2 (2010-01-12)}{
\itemize{
\item a new benchmark was added to the examples directory
around the classic convolution example from
Writing R extensions to compare C and C++ implementations
\item Rcpp::CharacterVector::StringProxy gains a += operator
\item Rcpp::Environment gains an operator[](string) to get/set
objects from the environment. operator[] returns an object
of class Rcpp::Environment::Binding which implements the proxy
pattern. Inspired from Item 30 of 'More Effective C++'
\item Rcpp::Pairlist and Rcpp::Language gain an operator[](int)
also using the proxy pattern
\item Rcpp::RObject.attr can now be used on the rhs or the lhs, to get
or set an attribute. This also uses the proxy pattern
\item Rcpp::Pairlist and Rcpp::Language gain new methods push_back
replace, length, size, remove, insert
\item wrap now returns an object of a suitable class, not just RObject
anymore. For example wrap( bool ) returns a LogicalVector
\item Rcpp::RObject gains methods to deal with S4 objects : isS4,
slot and hasSlot
\item new class Rcpp::ComplexVector to manage complex vectors (CPLXSXP)
\item new class Rcpp::Promise to manage promises (PROMSXP)
\item new class Rcpp::ExpressionVector to manage expression vectors
(EXPRSXP)
\item new class Rcpp::GenericVector to manage generic vectors, a.k.a
lists (VECSXP)
\item new class Rcpp::IntegerVector to manage integer vectors (INTSXP)
\item new class Rcpp::NumericVector to manage numeric vectors (REALSXP)
\item new class Rcpp::RawVector to manage raw vectors (RAWSXP)
\item new class Rcpp::CharacterVector to manage character vectors (STRSXP)
\item new class Rcpp::Function to manage functions
(CLOSXP, SPECIALSXP, BUILTINSXP)
\item new class Rcpp::Pairlist to manage pair lists (LISTSXP)
\item new class Rcpp::Language to manage calls (LANGSXP)
\item new specializations of wrap to deal with std::initializer lists
only available with GCC >= 4.4
\item new R function Rcpp:::capabilities that can query if various
features are available : exception handling, variadic templates
initializer lists
\item new set of functions wrap(T) converting from T to RObject
\item new template function as<T> that can be used to convert a SEXP
to type T. Many specializations implemented to deal with
C++ builtin and stl types. Factored out of RObject
\item new class Rcpp::Named to deal with named with named objects
in a pairlist, or a call
\item new class Rcpp::Symbol to manage symbols (SYMSXP)
\item The garbage collection has been improved and is now automatic
and hidden. The user needs not to worry about it at all.
\item Rcpp::Environment(SEXP) uses the as.environment R function
\item Doxygen-generated documentation is no longer included as it is both
too large and too volatile. Zipfiles are provided on the website.
}
}
\section{Changes in Rcpp version 0.7.1 (2010-01-02)}{
\itemize{
\item Romain is now a co-author of Rcpp
\item New base class Rcpp::RObject replace RcppSexp (which is provided for
backwards compatibility)
\item RObject has simple wrappers for object creation and conversion to SEXP
\item New classes Rcpp::Evaluator and Rcpp::Environment for expression
evaluation and environment access, respectively
\item New class Rcpp::XPtr for external pointers
\item Enhanced exception handling allows for trapping of exceptions outside
of try/catch blocks
\item Namespace support with a new namespace 'Rcpp'
\item Unit tests for most of the new classes, based on the RUnit package
\item Inline support now provided by the update inline package, so a new
Depends on 'inline (>= 0.3.4)' replaces the code in that was
temporarily in Rcpp
}
}
\section{Changes in Rcpp version 0.7.0 (2009-12-19)}{
\itemize{
\item Inline support via a modified version of 'cfunction' from Oleg
Sklyar's 'inline' package: simple C++ programs can now be compiled,
linked and loaded automagically from the R prompt, including support
for external packages. Also works on Windows (with R-tools installed)
\item New examples for the inline support based on 'Intro to HPC' tutorials
\item New type RcppSexp for simple int, double, std::string scalars and vectors
\item Every class is now in its own header and source file
\item Fix to RcppParams.Rd thanks to Frank S. Thomas
\item RcppVersion.R removed as redundant given DESCRIPTION and read.dcf()
\item Switched to R_PreserveObject and R_ReleaseObject for RcppSexp with
thanks to Romain
\item Licensing changed from LGPL 2.1 (or later) to GPL 2 (or later), file
COPYING updated
}
}
\section{Changes in Rcpp version 0.6.8 (2009-11-19)}{
\itemize{
\item Several classes now split off into their own header and source files
\item New header file RcppCommon.h regrouping common defines and includes
\item Makevars\{,.win\} updated to reflect src/ reorg
}
}
\section{Changes in Rcpp version 0.6.7 (2009-11-08)}{
\itemize{
\item New class RcppList for simple lists and data structures of different
types and dimensions, useful for RProtoBuf project on R-Forge
\item Started to split classes into their own header and source files
\item Added short README file about history and status
\item Small documentation markup fix thanks to Kurt; updated doxygen docs
\item New examples directory functionCallback/ for R function passed to C++
and being called
}
}
\section{Changes in Rcpp version 0.6.6 (2009-08-03)}{
\itemize{
\item Updated Doxygen documentation
\item RcppParams class gains a new exists() member function
}
}
\section{Changes in Rcpp version 0.6.5 (2009-04-01)}{
\itemize{
\item Small OS X build correction using R_ARCH variable
\item Include LGPL license as file COPYING
}
}
\section{Changes in Rcpp version 0.6.4 (2009-03-01)}{
\itemize{
\item Use std:: namespace throughout instead of 'using namespace std'
\item Define R_NO_REMAP so that R provides Rf_length() etc in lieu of length()
to minimise clashes with other projects having similar functions
\item Include Doxygen documentation, and Doxygen configuration file
\item Minor Windows build fix (with thanks to Uwe and Simon)
}
}
\section{Changes in Rcpp version 0.6.3 (2009-01-09)}{
\itemize{
\item OS X build fix with thanks to Simon
\item Added 'view-only' classes for int and double vector and matrix clases
as well as string vector classses, kindly suggsted / provided by
David Reiss
\item Add two shorter helper functions Rcpp:::CxxFlags() and
Rcpp:::LdFlags() for compilation and linker flags
}
}
\section{Changes in Rcpp version 0.6.2 (2008-12-02)}{
\itemize{
\item Small but important fix for Linux builds in Rcpp:::RcppLdFlags()
}
}
\section{Changes in Rcpp version 0.6.1 (2008-11-30)}{
\itemize{
\item Now src/Makevars replaces src/Makefile, this brings proper OS X
multi-arch support with thanks to Simon
\item Old #ifdef statements related to QuantLib removed; Rcpp is now
decoupled from QuantLib headers yet be used by RQuantLib
\item Added RcppLdPath() to return the lib. directory patch and on Linux
the rpath settings
\item Added new RcppVectorExample()
\item Augmented documentation on usage in Rcpp-package.Rd
}
}
\section{Changes in Rcpp version 0.6.0 (2008-11-05)}{
\itemize{
\item New maintainer, taking over RcppTemplate (which has been without an
update since Nov 2006) under its initial name Rcpp
\item New files src/Makefile\{,.win\} including functionality from both
configure and RcppSrc/Makefile; we now build two libraries, one for
use by the package which also runs the example, and one for users to
link against, and removed src/Makevars.in
\item Files src/Rcpp.\{cpp,h\} moved in from ../RcppSrc
\item Added new class RcppDatetime corresponding to POSIXct in with full
support for microsecond time resolution between R and C++
\item Several new manual pages added
\item Removed configure\{,.in,.win\} as src/Makefile* can handle this more
easily
\item Minor cleanup and reformatting for DESCRIPTION, Date: now uses
svn:keyword Date property
\item Renamed RcppTemplateVersion to RcppVersion, deleted RcppDemo
\item Directory demo/ removed as vignette("RcppAPI") is easier and more
reliable to show vignette documentation
\item RcppTemplateDemo() removed from R/zzz.R, vignette("RcppAPI") is easier;
man/RcppTemplateDemo.Rd removed as well
\item Some more code reindentation and formatting to R default arguments,
some renamed from RcppTemplate* to Rcpp*
\item Added footnote onto titlepage of inst/doc/RcppAPI.\{Rnw,pdf\} about how
this document has not (yet) been updated along with the channges made
}
}
|
# Stolen from statebins
geom_rtile <- function(mapping = NULL, data = NULL,
stat = "identity", position = "identity",
radius = grid::unit(6, "pt"),
...,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
ggplot2::layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomRtile,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
radius = radius,
na.rm = na.rm,
...
)
)
}
GeomRtile <- ggplot2::ggproto("GeomRtile", GeomRrect,
extra_params = c("na.rm", "width", "height"),
setup_data = function(data, params) {
data$width <- data$width %||% params$width %||% ggplot2::resolution(data$x, FALSE)
data$height <- data$height %||% params$height %||% ggplot2::resolution(data$y, FALSE)
transform(data,
xmin = x - width / 2, xmax = x + width / 2, width = NULL,
ymin = y - height / 2, ymax = y + height / 2, height = NULL
)
},
default_aes = ggplot2::aes(
fill = "grey20", colour = NA, size = 0.1, linetype = 1, alpha = NA
),
required_aes = c("x", "y"),
draw_key = ggplot2::draw_key_polygon
)
| /R/geom_rtile.R | no_license | delabj/AfricaCountryBins | R | false | false | 1,691 | r | # Stolen from statebins
geom_rtile <- function(mapping = NULL, data = NULL,
stat = "identity", position = "identity",
radius = grid::unit(6, "pt"),
...,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
ggplot2::layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomRtile,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
radius = radius,
na.rm = na.rm,
...
)
)
}
GeomRtile <- ggplot2::ggproto("GeomRtile", GeomRrect,
extra_params = c("na.rm", "width", "height"),
setup_data = function(data, params) {
data$width <- data$width %||% params$width %||% ggplot2::resolution(data$x, FALSE)
data$height <- data$height %||% params$height %||% ggplot2::resolution(data$y, FALSE)
transform(data,
xmin = x - width / 2, xmax = x + width / 2, width = NULL,
ymin = y - height / 2, ymax = y + height / 2, height = NULL
)
},
default_aes = ggplot2::aes(
fill = "grey20", colour = NA, size = 0.1, linetype = 1, alpha = NA
),
required_aes = c("x", "y"),
draw_key = ggplot2::draw_key_polygon
)
|
#########################################################################
#
# Rasterize vector data for European protected areas
#
# Samantha Franks
# 24 Dec 2013
#
#########################################################################
rm(list=ls())
### LOAD PACKAGES
library(sp)
library(raster)
library(rgdal)
library(rgeos)
### Load shapefile
# set working directory (alter if working on cluster or not)
cluster <- FALSE
if (!cluster)
GISwd <- c("D:/Sam Franks/GIS/cuckoos")
if (cluster)
GISwd <- c("/users1/samf/cuckoos")
PAs <- readOGR(GISwd, "terrestrial PAs mainland W Europe corine countries only EPSG 3035")
### Clip shapefile to extent of corine map
# crop with SPDF (rather than raster) does I think the same as gIntersection in library(rgeos)
#clipPA <- drawExtent()
clipPA <- extent(2483500,5890400,1276600,4286800) # same extent as Europeraster (corine clipped raster layer)
EuropePA <- crop(PAs,clipPA)
### Rasterize the SpatialPolygons shapefile - 50m x 50m raster output
## Set up a raster "template" to use in rasterize()
extpoly <- extent(EuropePA)
col50 <- ceiling((extpoly@xmax-extpoly@xmin)/50) # create number of columns for raster (aim for approx 50m blocks)
row50 <- ceiling((extpoly@ymax-extpoly@ymin)/50) # create number of rows
newraster50 <- raster(extpoly, ncol=col50, nrow=row50)
## Rasterize the shapefile polygons
rastertime50 <- system.time({
PArasterized50 <-rasterize(EuropePA, newraster50)
})
rastertime50
# colors <- c("white",rep("blue",9407))
# rpoly50@legend@colortable <- colors
#newrpoly <- crop(rpoly,clipUKr)
# default of rpoly colortable is logical(0)
setwd(GISwd)
writeRaster(PArasterized50, filename="Europe PA raster 50m x 50m.tif", format="GTiff", overwrite=TRUE)
######################################################################
################################### TEST CODE ###############################
######################################################################
# ### Load shapefile
#
# GISwd <- c("D:/Sam Franks/GIS/cuckoos")
#
# testshp <- readOGR(GISwd, "terrestrial PAs UK EPSG 4326")
#
# #proj4string(testshp) <- CRS("+init=epsg:4326")
#
# PAtrans <- spTransform(testshp,CRS("+init=epsg:3035"))
#
# ## crop polygon - crop with SPDF does I think the same as gIntersection in library(rgeos)
# #clipPA <- drawExtent()
# clipPA <- extent(3039340,3879513,3008405,4308673)
# UK.PAs <- crop(PAtrans,clipPA)
#
# ## create SpatialLinesDataFrame
# UKPAlines <- as(UK.PAs, "SpatialLinesDataFrame")
#
# ### Create raster using SpatialLines
# ## Set up a raster "template" to use in rasterize()
# extlines <- extent(UKPAlines)
# col <- ceiling((extlines@xmax-extlines@xmin)/100) # create number of columns for raster (aim for approx 100m blocks)
# row <- ceiling((extlines@ymax-extlines@ymin)/100) # create number of rows
# newrasterlines <- raster(extlines, ncol=col, nrow=row)
#
# ## Rasterize the shapefile lines
#
# rastertimelines <- system.time({
#
# rlines <-rasterize(UKPAlines, newrasterlines)
#
# })
#
# ### Create raster using SpatialPolygons - 100m x 100m
# ## Set up a raster "template" to use in rasterize()
# extpoly <- extent(UK.PAs)
# col <- ceiling((extpoly@xmax-extpoly@xmin)/100) # create number of columns for raster (aim for approx 100m blocks)
# row <- ceiling((extpoly@ymax-extpoly@ymin)/100) # create number of rows
# newrasterpoly <- raster(extpoly, ncol=col, nrow=row)
#
# ## Rasterize the shapefile polygons
#
# rastertimepoly <- system.time({
#
# rpoly <-rasterize(UK.PAs, newrasterpoly)
#
# })
#
# colors <- c("white",rep("blue",9407))
# rpoly@legend@colortable <- colors
#
# #newrpoly <- crop(rpoly,clipUKr)
# # default of rpoly colortable is logical(0)
#
# setwd(GISwd)
# writeRaster(rpoly, filename="UK PAs test raster 100m x 100m.tif", format="GTiff", overwrite=TRUE)
#
# ### Create raster using SpatialPolygons - 50m x 50m
# ## Set up a raster "template" to use in rasterize()
# extpoly <- extent(UK.PAs)
# col50 <- ceiling((extpoly@xmax-extpoly@xmin)/50) # create number of columns for raster (aim for approx 100m blocks)
# row50 <- ceiling((extpoly@ymax-extpoly@ymin)/50) # create number of rows
# newrasterpoly50 <- raster(extpoly, ncol=col50, nrow=row50)
#
# ## Rasterize the shapefile polygons
#
# rastertimepoly50 <- system.time({
#
# rpoly50 <-rasterize(UK.PAs, newrasterpoly50)
#
# })
#
# colors <- c("white",rep("blue",9407))
# rpoly50@legend@colortable <- colors
#
# #newrpoly <- crop(rpoly,clipUKr)
# # default of rpoly colortable is logical(0)
#
# setwd(GISwd)
# writeRaster(rpoly50, filename="UK PAs test raster 50m x 50m.tif", format="GTiff", overwrite=TRUE)
#
#
# ### Create raster using SpatialPolygons - 200m x 200m
# ## Set up a raster "template" to use in rasterize()
# extpoly <- extent(UK.PAs)
# col200 <- ceiling((extpoly@xmax-extpoly@xmin)/200) # create number of columns for raster (aim for approx 100m blocks)
# row200 <- ceiling((extpoly@ymax-extpoly@ymin)/200) # create number of rows
# newrasterpoly200 <- raster(extpoly, ncol=col200, nrow=row200)
#
# ## Rasterize the shapefile polygons
#
# rastertimepoly200 <- system.time({
#
# rpoly200 <-rasterize(UK.PAs, newrasterpoly200)
#
# })
#
# ### Create raster using SpatialPolygons - 500m x 500m
# ## Set up a raster "template" to use in rasterize()
# extpoly <- extent(UK.PAs)
# col500 <- ceiling((extpoly@xmax-extpoly@xmin)/500) # create number of columns for raster (aim for approx 100m blocks)
# row500 <- ceiling((extpoly@ymax-extpoly@ymin)/500) # create number of rows
# newrasterpoly500 <- raster(extpoly, ncol=col500, nrow=row500)
#
# ## Rasterize the shapefile polygons
#
# rastertimepoly500 <- system.time({
#
# rpoly500 <-rasterize(UK.PAs, newrasterpoly200)
#
# })
#
#
# ###################################
# ###################################
#
# proj4string(shp) <- CRS("+init=epsg:4326")
# PAtrans <- spTransform(shp,CRS("+init=epsg:3035"))
#
#
# ### create color table for raster
#
# colors <- c("white",rep("blue",9407))
#
#
# rpoly@legend@colortable <- colors
#
# setwd(GISwd)
# tiff("UK SPA test plot 50m x 50m .tiff", width=3000, height=3000, units="px", res=300)
# plot(rpoly)
# dev.off()
#
# rr@legend@colortable <- logical(0)
#
# tiff("UK SPA test plot 2.tiff", width=3000, height=3000, units="px", res=300)
# plot(rr, col="blue")
# dev.off()
#
# tiff("UK SPA test plot vector.tiff", width=3000, height=3000, units="px", res=300)
# plot(UK.PAs, col="blue")
# dev.off()
#
# #################
#
# PAs <- readOGR(GISwd, "terrestrial PAs mainland W Europe corine countries only EPSG 3035")
#
#
# rastertime <- system.time({
#
# ## Set up a raster "template" to use in rasterize()
# ext <- extent(1500000,7400000,,5500000)
# newraster <- raster(ext, ncol=46000, nrow=59000)
#
# ## Rasterize the shapefile
# rr <-rasterize(testshp, newraster)
#
# })
#
# ## A couple of outputs
# writeRaster(rr, "teow.asc")
# plot(rr)
#
# testshp <- readOGR("C:/Users/samf/Documents/GIS/cuckoos/official_teow","wwf_terr_ecos")
#
# ## Set up a raster "template" to use in rasterize()
# ext <- extent (-95, -50, 24, 63)
# xy <- abs(apply(as.matrix(bbox(ext)), 1, diff))
# n <- 5
# ras <- raster(ext, ncol=xy[1]*5, nrow=xy[2]*5)
#
# ## Rasterize the shapefile
# rr <-rasterize(testshp, ras)
#
# ## A couple of outputs
# writeRaster(rr, "teow.asc")
# plot(rr)
| /rasterize_protected_areas_shapefile.R | no_license | guzhongru/cuckoo_habitatuse_scripts | R | false | false | 7,349 | r | #########################################################################
#
# Rasterize vector data for European protected areas
#
# Samantha Franks
# 24 Dec 2013
#
#########################################################################
rm(list=ls())
### LOAD PACKAGES
library(sp)
library(raster)
library(rgdal)
library(rgeos)
### Load shapefile
# set working directory (alter if working on cluster or not)
cluster <- FALSE
if (!cluster)
GISwd <- c("D:/Sam Franks/GIS/cuckoos")
if (cluster)
GISwd <- c("/users1/samf/cuckoos")
PAs <- readOGR(GISwd, "terrestrial PAs mainland W Europe corine countries only EPSG 3035")
### Clip shapefile to extent of corine map
# crop with SPDF (rather than raster) does I think the same as gIntersection in library(rgeos)
#clipPA <- drawExtent()
clipPA <- extent(2483500,5890400,1276600,4286800) # same extent as Europeraster (corine clipped raster layer)
EuropePA <- crop(PAs,clipPA)
### Rasterize the SpatialPolygons shapefile - 50m x 50m raster output
## Set up a raster "template" to use in rasterize()
extpoly <- extent(EuropePA)
col50 <- ceiling((extpoly@xmax-extpoly@xmin)/50) # create number of columns for raster (aim for approx 50m blocks)
row50 <- ceiling((extpoly@ymax-extpoly@ymin)/50) # create number of rows
newraster50 <- raster(extpoly, ncol=col50, nrow=row50)
## Rasterize the shapefile polygons
rastertime50 <- system.time({
PArasterized50 <-rasterize(EuropePA, newraster50)
})
rastertime50
# colors <- c("white",rep("blue",9407))
# rpoly50@legend@colortable <- colors
#newrpoly <- crop(rpoly,clipUKr)
# default of rpoly colortable is logical(0)
setwd(GISwd)
writeRaster(PArasterized50, filename="Europe PA raster 50m x 50m.tif", format="GTiff", overwrite=TRUE)
######################################################################
################################### TEST CODE ###############################
######################################################################
# ### Load shapefile
#
# GISwd <- c("D:/Sam Franks/GIS/cuckoos")
#
# testshp <- readOGR(GISwd, "terrestrial PAs UK EPSG 4326")
#
# #proj4string(testshp) <- CRS("+init=epsg:4326")
#
# PAtrans <- spTransform(testshp,CRS("+init=epsg:3035"))
#
# ## crop polygon - crop with SPDF does I think the same as gIntersection in library(rgeos)
# #clipPA <- drawExtent()
# clipPA <- extent(3039340,3879513,3008405,4308673)
# UK.PAs <- crop(PAtrans,clipPA)
#
# ## create SpatialLinesDataFrame
# UKPAlines <- as(UK.PAs, "SpatialLinesDataFrame")
#
# ### Create raster using SpatialLines
# ## Set up a raster "template" to use in rasterize()
# extlines <- extent(UKPAlines)
# col <- ceiling((extlines@xmax-extlines@xmin)/100) # create number of columns for raster (aim for approx 100m blocks)
# row <- ceiling((extlines@ymax-extlines@ymin)/100) # create number of rows
# newrasterlines <- raster(extlines, ncol=col, nrow=row)
#
# ## Rasterize the shapefile lines
#
# rastertimelines <- system.time({
#
# rlines <-rasterize(UKPAlines, newrasterlines)
#
# })
#
# ### Create raster using SpatialPolygons - 100m x 100m
# ## Set up a raster "template" to use in rasterize()
# extpoly <- extent(UK.PAs)
# col <- ceiling((extpoly@xmax-extpoly@xmin)/100) # create number of columns for raster (aim for approx 100m blocks)
# row <- ceiling((extpoly@ymax-extpoly@ymin)/100) # create number of rows
# newrasterpoly <- raster(extpoly, ncol=col, nrow=row)
#
# ## Rasterize the shapefile polygons
#
# rastertimepoly <- system.time({
#
# rpoly <-rasterize(UK.PAs, newrasterpoly)
#
# })
#
# colors <- c("white",rep("blue",9407))
# rpoly@legend@colortable <- colors
#
# #newrpoly <- crop(rpoly,clipUKr)
# # default of rpoly colortable is logical(0)
#
# setwd(GISwd)
# writeRaster(rpoly, filename="UK PAs test raster 100m x 100m.tif", format="GTiff", overwrite=TRUE)
#
# ### Create raster using SpatialPolygons - 50m x 50m
# ## Set up a raster "template" to use in rasterize()
# extpoly <- extent(UK.PAs)
# col50 <- ceiling((extpoly@xmax-extpoly@xmin)/50) # create number of columns for raster (aim for approx 100m blocks)
# row50 <- ceiling((extpoly@ymax-extpoly@ymin)/50) # create number of rows
# newrasterpoly50 <- raster(extpoly, ncol=col50, nrow=row50)
#
# ## Rasterize the shapefile polygons
#
# rastertimepoly50 <- system.time({
#
# rpoly50 <-rasterize(UK.PAs, newrasterpoly50)
#
# })
#
# colors <- c("white",rep("blue",9407))
# rpoly50@legend@colortable <- colors
#
# #newrpoly <- crop(rpoly,clipUKr)
# # default of rpoly colortable is logical(0)
#
# setwd(GISwd)
# writeRaster(rpoly50, filename="UK PAs test raster 50m x 50m.tif", format="GTiff", overwrite=TRUE)
#
#
# ### Create raster using SpatialPolygons - 200m x 200m
# ## Set up a raster "template" to use in rasterize()
# extpoly <- extent(UK.PAs)
# col200 <- ceiling((extpoly@xmax-extpoly@xmin)/200) # create number of columns for raster (aim for approx 100m blocks)
# row200 <- ceiling((extpoly@ymax-extpoly@ymin)/200) # create number of rows
# newrasterpoly200 <- raster(extpoly, ncol=col200, nrow=row200)
#
# ## Rasterize the shapefile polygons
#
# rastertimepoly200 <- system.time({
#
# rpoly200 <-rasterize(UK.PAs, newrasterpoly200)
#
# })
#
# ### Create raster using SpatialPolygons - 500m x 500m
# ## Set up a raster "template" to use in rasterize()
# extpoly <- extent(UK.PAs)
# col500 <- ceiling((extpoly@xmax-extpoly@xmin)/500) # create number of columns for raster (aim for approx 100m blocks)
# row500 <- ceiling((extpoly@ymax-extpoly@ymin)/500) # create number of rows
# newrasterpoly500 <- raster(extpoly, ncol=col500, nrow=row500)
#
# ## Rasterize the shapefile polygons
#
# rastertimepoly500 <- system.time({
#
# rpoly500 <-rasterize(UK.PAs, newrasterpoly200)
#
# })
#
#
# ###################################
# ###################################
#
# proj4string(shp) <- CRS("+init=epsg:4326")
# PAtrans <- spTransform(shp,CRS("+init=epsg:3035"))
#
#
# ### create color table for raster
#
# colors <- c("white",rep("blue",9407))
#
#
# rpoly@legend@colortable <- colors
#
# setwd(GISwd)
# tiff("UK SPA test plot 50m x 50m .tiff", width=3000, height=3000, units="px", res=300)
# plot(rpoly)
# dev.off()
#
# rr@legend@colortable <- logical(0)
#
# tiff("UK SPA test plot 2.tiff", width=3000, height=3000, units="px", res=300)
# plot(rr, col="blue")
# dev.off()
#
# tiff("UK SPA test plot vector.tiff", width=3000, height=3000, units="px", res=300)
# plot(UK.PAs, col="blue")
# dev.off()
#
# #################
#
# PAs <- readOGR(GISwd, "terrestrial PAs mainland W Europe corine countries only EPSG 3035")
#
#
# rastertime <- system.time({
#
# ## Set up a raster "template" to use in rasterize()
# ext <- extent(1500000,7400000,,5500000)
# newraster <- raster(ext, ncol=46000, nrow=59000)
#
# ## Rasterize the shapefile
# rr <-rasterize(testshp, newraster)
#
# })
#
# ## A couple of outputs
# writeRaster(rr, "teow.asc")
# plot(rr)
#
# testshp <- readOGR("C:/Users/samf/Documents/GIS/cuckoos/official_teow","wwf_terr_ecos")
#
# ## Set up a raster "template" to use in rasterize()
# ext <- extent (-95, -50, 24, 63)
# xy <- abs(apply(as.matrix(bbox(ext)), 1, diff))
# n <- 5
# ras <- raster(ext, ncol=xy[1]*5, nrow=xy[2]*5)
#
# ## Rasterize the shapefile
# rr <-rasterize(testshp, ras)
#
# ## A couple of outputs
# writeRaster(rr, "teow.asc")
# plot(rr)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{prox}
\alias{prox}
\title{Perform proximal mapping of rho tau}
\usage{
prox(xi, alpha, tau)
}
\arguments{
\item{xi}{a single number}
\item{alpha}{a number}
\item{tau}{a number between 0 and 1, the quantile of interest}
}
\value{
output of the proximal mapping of rho tau
}
\description{
Perform proximal mapping of rho tau
}
| /man/prox.Rd | permissive | fboehm/openFHDQR | R | false | true | 425 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{prox}
\alias{prox}
\title{Perform proximal mapping of rho tau}
\usage{
prox(xi, alpha, tau)
}
\arguments{
\item{xi}{a single number}
\item{alpha}{a number}
\item{tau}{a number between 0 and 1, the quantile of interest}
}
\value{
output of the proximal mapping of rho tau
}
\description{
Perform proximal mapping of rho tau
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bonsai_error_functions.R
\name{CV_Mean_Error}
\alias{CV_Mean_Error}
\title{Error functions}
\usage{
CV_Mean_Error(y = NULL, error)
}
\arguments{
\item{y}{A time series}
\item{error}{the error created by some cross-validation method}
\item{h}{the horizon to forecast}
}
\value{
A list with all of the forecast means
}
\description{
Functions to be used in conjunction with bonsai_calculate_errors
}
| /man/CV_Mean_Error.Rd | no_license | brunocarlin/forecast.bonsai | R | false | true | 478 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bonsai_error_functions.R
\name{CV_Mean_Error}
\alias{CV_Mean_Error}
\title{Error functions}
\usage{
CV_Mean_Error(y = NULL, error)
}
\arguments{
\item{y}{A time series}
\item{error}{the error created by some cross-validation method}
\item{h}{the horizon to forecast}
}
\value{
A list with all of the forecast means
}
\description{
Functions to be used in conjunction with bonsai_calculate_errors
}
|
library(TCGAbiolinks)
library(SummarizedExperiment)
library(tidyverse)
#------------------------------------
# 获取 DNA 同时检测甲基化和表达的样本
#------------------------------------
# 结肠和直肠数据
lgg.samples <- matchedMetExp("TCGA-LGG", n = 10)
gbm.samples <- matchedMetExp("TCGA-GBM", n = 10)
samples <- c(lgg.samples,gbm.samples)
#-----------------------------------
# 1 - Methylation
# ----------------------------------
query <- GDCquery(
project = c("TCGA-LGG","TCGA-GBM"),
data.category = "DNA methylation",
platform = "Illumina Human Methylation 450",
legacy = TRUE,
barcode = samples
)
GDCdownload(query)
met <- GDCprepare(query, save = FALSE)
# 我们以 chr9 为例
met <- subset(met,subset = as.character(seqnames(met)) %in% c("chr9"))
# 删除 NA 值
met <- subset(met,subset = (rowSums(is.na(assay(met))) == 0))
# 去除重复样本
met <- met[, substr(colnames(met), 14, 16) != "01B"]
#----------------------------
# Mean methylation
#----------------------------
TCGAvisualize_meanMethylation(
met,
groupCol = "project_id",
group.legend = "Groups",
filename = NULL,
print.pvalue = TRUE
)
#------- 识别差异甲基化位点 ----------
res <- TCGAanalyze_DMC(
met,
# colData 函数获取的矩阵中分组列名
groupCol = "project_id",
group1 = "TCGA-GBM",
group2 = "TCGA-LGG",
p.cut = 0.05,
diffmean.cut = 0.15,
save = FALSE,
legend = "State",
plot.filename = "~/Downloads/COAD_READ_metvolcano.png",
cores = 1
)
#--------------------------
# DNA Methylation heatmap
#-------------------------
library(ComplexHeatmap)
coad_clin <- GDCquery_clinic(project = "TCGA-COAD", type = "Clinical")
read_clin <- GDCquery_clinic(project = "TCGA-READ", type = "Clinical")
use_cols <- c("bcr_patient_barcode", "disease","gender","vital_status","race")
clinical <- coad_clin %>%
dplyr::select(use_cols) %>%
add_row(dplyr::select(read_clin, use_cols)) %>%
subset(bcr_patient_barcode %in% substr(samples, 1, 12))
# 获取 Hypermethylated 和 Hypomethylated 的探针
sig_met <- filter(res, status != "Not Significant")
res_data <- subset(met,subset = (rownames(met) %in% rownames(sig_met)))
ta <- HeatmapAnnotation(
df = clinical[, c("disease", "gender", "vital_status", "race")],
col = list(
disease = c("COAD" = "grey", "READ" = "black"),
gender = c("male" = "blue", "female" = "pink")
))
ra <- rowAnnotation(
df = sig_met$status,
col = list(
"status" =
c("Hypomethylated" = "orange",
"Hypermethylated" = "darkgreen")
),
width = unit(1, "cm")
)
heatmap <- Heatmap(
assay(res_data),
name = "DNA methylation",
col = matlab::jet.colors(200),
show_row_names = FALSE,
cluster_rows = TRUE,
cluster_columns = FALSE,
show_column_names = FALSE,
bottom_annotation = ta,
column_title = "DNA Methylation"
)
# Save to pdf
png("~/Downloads/heatmap.png",width = 600, height = 400)
draw(heatmap, annotation_legend_side = "bottom")
dev.off()
save(sig_met, res_data, file = "~/Downloads/CRC.rda")
#---------------------------
# motif 分析
#---------------------------
library(rGADEM)
library(GenomicRanges)
library(BSgenome.Hsapiens.UCSC.hg19)
library(motifStack)
probes <- rowRanges(res_data)
sequence <- GRanges(
seqnames = as.character(seqnames(probes)),
ranges = IRanges(start = start(ranges(probes)) - 100,
end = start(ranges(probes)) + 100),
strand = "*"
)
#look for motifs
gadem <- GADEM(sequence, verbose = FALSE, genome = Hsapiens)
nMotifs(gadem)
# 打印模体
pwm <- getPWM(gadem)
pfm <- new("pfm",mat=pwm[[1]],name="Novel Site 1")
plotMotifLogo(pfm)
# 配对分析
library(MotIV)
analysis.jaspar <- motifMatch(pwm)
summary(analysis.jaspar)
alignment <- viewAlignments(analysis.jaspar)
print(alignment[[1]])
| /R/TCGA/TCGA_methy_motif.R | no_license | CuncanDeng/learn | R | false | false | 3,808 | r | library(TCGAbiolinks)
library(SummarizedExperiment)
library(tidyverse)
#------------------------------------
# 获取 DNA 同时检测甲基化和表达的样本
#------------------------------------
# 结肠和直肠数据
lgg.samples <- matchedMetExp("TCGA-LGG", n = 10)
gbm.samples <- matchedMetExp("TCGA-GBM", n = 10)
samples <- c(lgg.samples,gbm.samples)
#-----------------------------------
# 1 - Methylation
# ----------------------------------
query <- GDCquery(
project = c("TCGA-LGG","TCGA-GBM"),
data.category = "DNA methylation",
platform = "Illumina Human Methylation 450",
legacy = TRUE,
barcode = samples
)
GDCdownload(query)
met <- GDCprepare(query, save = FALSE)
# 我们以 chr9 为例
met <- subset(met,subset = as.character(seqnames(met)) %in% c("chr9"))
# 删除 NA 值
met <- subset(met,subset = (rowSums(is.na(assay(met))) == 0))
# 去除重复样本
met <- met[, substr(colnames(met), 14, 16) != "01B"]
#----------------------------
# Mean methylation
#----------------------------
TCGAvisualize_meanMethylation(
met,
groupCol = "project_id",
group.legend = "Groups",
filename = NULL,
print.pvalue = TRUE
)
#------- 识别差异甲基化位点 ----------
res <- TCGAanalyze_DMC(
met,
# colData 函数获取的矩阵中分组列名
groupCol = "project_id",
group1 = "TCGA-GBM",
group2 = "TCGA-LGG",
p.cut = 0.05,
diffmean.cut = 0.15,
save = FALSE,
legend = "State",
plot.filename = "~/Downloads/COAD_READ_metvolcano.png",
cores = 1
)
#--------------------------
# DNA Methylation heatmap
#-------------------------
library(ComplexHeatmap)
coad_clin <- GDCquery_clinic(project = "TCGA-COAD", type = "Clinical")
read_clin <- GDCquery_clinic(project = "TCGA-READ", type = "Clinical")
use_cols <- c("bcr_patient_barcode", "disease","gender","vital_status","race")
clinical <- coad_clin %>%
dplyr::select(use_cols) %>%
add_row(dplyr::select(read_clin, use_cols)) %>%
subset(bcr_patient_barcode %in% substr(samples, 1, 12))
# 获取 Hypermethylated 和 Hypomethylated 的探针
sig_met <- filter(res, status != "Not Significant")
res_data <- subset(met,subset = (rownames(met) %in% rownames(sig_met)))
ta <- HeatmapAnnotation(
df = clinical[, c("disease", "gender", "vital_status", "race")],
col = list(
disease = c("COAD" = "grey", "READ" = "black"),
gender = c("male" = "blue", "female" = "pink")
))
ra <- rowAnnotation(
df = sig_met$status,
col = list(
"status" =
c("Hypomethylated" = "orange",
"Hypermethylated" = "darkgreen")
),
width = unit(1, "cm")
)
heatmap <- Heatmap(
assay(res_data),
name = "DNA methylation",
col = matlab::jet.colors(200),
show_row_names = FALSE,
cluster_rows = TRUE,
cluster_columns = FALSE,
show_column_names = FALSE,
bottom_annotation = ta,
column_title = "DNA Methylation"
)
# Save to pdf
png("~/Downloads/heatmap.png",width = 600, height = 400)
draw(heatmap, annotation_legend_side = "bottom")
dev.off()
save(sig_met, res_data, file = "~/Downloads/CRC.rda")
#---------------------------
# motif 分析
#---------------------------
library(rGADEM)
library(GenomicRanges)
library(BSgenome.Hsapiens.UCSC.hg19)
library(motifStack)
probes <- rowRanges(res_data)
sequence <- GRanges(
seqnames = as.character(seqnames(probes)),
ranges = IRanges(start = start(ranges(probes)) - 100,
end = start(ranges(probes)) + 100),
strand = "*"
)
#look for motifs
gadem <- GADEM(sequence, verbose = FALSE, genome = Hsapiens)
nMotifs(gadem)
# 打印模体
pwm <- getPWM(gadem)
pfm <- new("pfm",mat=pwm[[1]],name="Novel Site 1")
plotMotifLogo(pfm)
# 配对分析
library(MotIV)
analysis.jaspar <- motifMatch(pwm)
summary(analysis.jaspar)
alignment <- viewAlignments(analysis.jaspar)
print(alignment[[1]])
|
#This is a modified copy of the file CART.R in the git repository https://github.com/atrisarkar/ces
#
# Author: Atri
###############################################################################
# rpart() function is a built in R library fuction for Recursive Partitioning and Regression Trees
# CART: building a binary tree, each node is a feature, each path is a configuration,
# each leave is the performance of the corresponding path configuration
library(rpart)
library(randomForest)
library(gbm)
library(rattle)
source(file="/Users/jula/Github/Cross_ML/ces_modified/path_settings.R")
source(file=script_CART)
# Initialization ##################################################################################
initData <- function(testSet){
#cat("Please enter full address of the dataset (for example: /Users/Data/Dataset.csv)", '\n')
#fileAddress <<- scan(file = "", what = " ", n = 1, quiet = TRUE)
#fileAddress <- "/Users/jula/Github/ces/data/Benchmark/Input/Apache.csv"
fileAddress <- testSet
#cat("Please enter address of output folder (for example: /Useres/Data/Output)", '\n')
#outputAddress <<- scan(file = "", what = " ", n = 1, quiet = TRUE)
outputAddress <<- Output_CART
# added one <
#cat("Please enter output filename", "\n")
#outputFilename <<- scan(file = "", what = " ", n = 1, quiet = TRUE)
outputFilename <<- "output_CART"
# Load the data
dataAddr <<- paste("file:///", fileAddress, sep="")
crs$dataset <- read.csv(dataAddr, na.strings=c(".", "NA", "", "?"), strip.white=TRUE, encoding="UTF-8")
# Calculate number of features
featureCount <<- ncol(crs$dataset) - 1
# Calculate number of observations
obsCount <<- nrow(crs$dataset) - 1
}
initGeneralParams <- function(){
#print("Please enter number of times experiment should be repeated")
#seedRepetitions <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
seedRepetitions <<- numberOfRepPerRound #5
#print("Please enter name of the method that will be used for experiment")
#methodName <<- scan(file = "", what = " ", n = 1, quiet = TRUE)
methodName <<- "anova"
}
initSamplingParams <- function(){
#print("Please enter sampling units (1 - observations; 2 - percentage; 3 - coefficient)")
#samplingType <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
samplingType <<- 1
#print("Please enter sampling progression (1 - arithmetic; 2 - geometric)")
#samplingProgression <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
samplingProgression <<- 1
#print("Please enter progression base")
#samplingProgressionBase <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
samplingProgressionBase <<-1 # added one <
#cat("Please enter sampling range lower value", '\n')
#samplingLower <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
samplingLower <<- 1 # added one <
#cat("Please enter sampling range upper value", '\n')
#samplingUpper <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
samplingUpper <<- numberOfRounds
}
initMinSplitParams <- function(){
#cat("Please enter minSplit range lower value", '\n')
#minSplitLower <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
# minSplitLower <- 2
#cat("Please enter minSplit range upper value", '\n')
#minSplitUpper <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
# minSplitUpper <- 5
}
initMinBucketParams <- function(){
#cat("Please enter minBucket range lower value", '\n')
#minBucketLower <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
# minBucketLower <- 2
#cat("Please enter minBucket range upper value", '\n')
#minBucketUpper <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
# minBucketUpper <- 5
}
initMaxDepthParams <- function(){
#cat("Please enter maxDepth range lower value", '\n')
#maxDepthLower <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
# maxDepthLower <- 25
#cat("Please enter maxDepth range upper value", '\n')
#maxDepthUpper <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
# maxDepthUpper <- 30
}
initComplexityParams <- function(){
#cat("Please enter complexity range lower value", '\n')
#complexLower <<- scan(file = "", what = numeric(), n = 1, quiet = FALSE)
complexLower <- 0
#cat("Please enter complexity range upper value", '\n')
#complexUpper <<- scan(file = "", what = numeric(), n = 1, quiet = FALSE)
complexUpper <- 0.001
#cat("Please enter complexity step", '\n')
#complexStep <<- scan(file = "", what = numeric(), n = 1, quiet = FALSE)
complexStep <<- minImprovementPerRound #0.0001
}
initCARTParams <- function(){
initMinSplitParams()
initMinBucketParams()
initMaxDepthParams()
initComplexityParams()
}
initParams <- function(){
initGeneralParams()
initSamplingParams()
initCARTParams()
}
init <- function(){
initData(testSet)
initParams()
}
# Analysis ########################################################################################
analyse <- function(){
# Calculate sampling progression ##############################################################
samplingVector <<- NULL
samplingAcc <- samplingLower
while(samplingAcc <= samplingUpper)
{
samplingVector <<- c(samplingVector, samplingAcc)
if(samplingProgression == 1) # Arithmetic progression
{
samplingAcc <- samplingAcc + samplingProgressionBase
}
else # Geometric progression
{
samplingAcc <- samplingAcc * samplingProgressionBase
}
}
if(samplingType == 1) # Observations
{
samplingVector <<- samplingVector
}
if(samplingType == 2) # Percentage
{
samplingVector <<- round(samplingVector * obsCount / 100, digits = 0)
}
if(samplingType == 3) # Coefficient
{
samplingVector <<- samplingVector * featureCount
}
# Analyse data ################################################################################
analyseCART()
}
analyseCART <- function()
{
# Utility variables ###########################################################################
faultRate_old <- 0
faultDataset <- NULL
resultDataset <- NULL
resultDataset <- rbind(resultDataset, c("Sampling Amount", "Fault Rate"))
terminationReason <- c("Termination reason", "numberOfRounds")
# Main loop ###################################################################################
for(samplingIter in samplingVector){
current.faultset <- NULL
for(seedIter in 1:seedRepetitions){
# Build the training/validate/test datasets ###############################################
#set.seed(seedIter)
crs$nobs <- nrow(crs$dataset)
crs$sample <- crs$train <- sample(nrow(crs$dataset), samplingIter)
crs$validate <- NULL
crs$train.test.diff <- setdiff(setdiff(seq_len(nrow(crs$dataset)), crs$train), crs$validate)
size<-length(crs$train)
if(size<=100){
mb <- floor(size/10 + 1/2)
ms <- mb * 2
} else {
ms <- floor(size/10 + 1/2)
mb <- floor(ms/2)
}
features.size <- length(colnames(crs$dataset)) - 1
crs$test <- sample(crs$train.test.diff, size)
# Select the variables
crs$input <- setdiff(colnames(crs$dataset), "PERF") # 'PERF' -> Function to evaluate the performance
crs$numeric <- NULL
crs$categoric <- NULL
crs$target <- "PERF"
crs$risk <- NULL
crs$ident <- NULL
crs$ignore <- NULL
crs$weights <- NULL
print("Training Done")
# Building a CART model ###################################################################
require(rpart, quietly=TRUE)
#set.seed(seedIter)
crs$rpart <- rpart(PERF ~ .,
data=crs$dataset[crs$train, c(crs$input, crs$target)],method="anova",
parms=list(split="information"),
control=rpart.control(
minsplit=ms,
minbucket=mb,
maxdepth=30,
cp=0,
usesurrogate=0,
maxsurrogate=0))
print("Building Done")
# Evaluate the CART model #################################################################
# Obtain predictions for the Decision Tree model on BerkeleyC.csv [test]
crs$pr <- predict(crs$rpart, newdata=crs$dataset[crs$test, c(crs$input)])
#print(crs$rpart) # <<<<<<<<neeeew
# Extract the relevant variables from the dataset
sdata <- subset(crs$dataset[crs$test,], select=c("PERF"))
faultRate <- abs(sdata - crs$pr) / sdata * 100
if(is.null(faultDataset)){
faultDataset <- faultRate
}else{
faultDataset <- cbind(faultDataset, faultRate)
}
if(is.null(current.faultset)){
current.faultset <- faultRate
}else{
current.faultset <- cbind(current.faultset, faultRate)
}
# Process all results #########################################################################
#outputFilename.split <- paste(outputFilename,samplingIter, sep="_")
#address01 <- paste(outputAddress, "/", outputFilename.split, ".csv", sep="")
#faultSet.row <- t(as.matrix(colMeans(current.faultset)))
#write.csv(faultSet.row, file=address01,row.names=FALSE)
#
faultRate <- mean(rowMeans(faultDataset))
# print(faultRate)
resultDataset <- rbind(resultDataset, c(samplingIter, faultRate))
#print("faultRate")
#print(faultRate)
faultDataset <- NULL
}# for(seedIter in 1:seedRepetitions)
if(abs(faultRate-faultRate_old)<complexStep){
terminationReason <- c("Termination reason", "minImprovementPerRound")
print(terminationReason)
break
}
faultRate_old <- faultRate
} # for(samplingIter in samplingLower:samplingUpper)
# Output the combined data ####################################################################
address00 <- paste(outputAddress, "/", outputFilename, ".csv", sep="")
write.csv(rbind(terminationReason, resultDataset), file=address00, row.names=FALSE)
}
#plot(mydata$SampleSize,100-mydata$FaultRate,type="b",col=4,main="LLVM AS",xlab="Sample Size",ylab="Prediction Accuracy")
| /ces_modified/CART.R | no_license | yoola/Cross_ML | R | false | false | 10,130 | r | #This is a modified copy of the file CART.R in the git repository https://github.com/atrisarkar/ces
#
# Author: Atri
###############################################################################
# rpart() function is a built in R library fuction for Recursive Partitioning and Regression Trees
# CART: building a binary tree, each node is a feature, each path is a configuration,
# each leave is the performance of the corresponding path configuration
library(rpart)
library(randomForest)
library(gbm)
library(rattle)
source(file="/Users/jula/Github/Cross_ML/ces_modified/path_settings.R")
source(file=script_CART)
# Initialization ##################################################################################
initData <- function(testSet){
#cat("Please enter full address of the dataset (for example: /Users/Data/Dataset.csv)", '\n')
#fileAddress <<- scan(file = "", what = " ", n = 1, quiet = TRUE)
#fileAddress <- "/Users/jula/Github/ces/data/Benchmark/Input/Apache.csv"
fileAddress <- testSet
#cat("Please enter address of output folder (for example: /Useres/Data/Output)", '\n')
#outputAddress <<- scan(file = "", what = " ", n = 1, quiet = TRUE)
outputAddress <<- Output_CART
# added one <
#cat("Please enter output filename", "\n")
#outputFilename <<- scan(file = "", what = " ", n = 1, quiet = TRUE)
outputFilename <<- "output_CART"
# Load the data
dataAddr <<- paste("file:///", fileAddress, sep="")
crs$dataset <- read.csv(dataAddr, na.strings=c(".", "NA", "", "?"), strip.white=TRUE, encoding="UTF-8")
# Calculate number of features
featureCount <<- ncol(crs$dataset) - 1
# Calculate number of observations
obsCount <<- nrow(crs$dataset) - 1
}
initGeneralParams <- function(){
#print("Please enter number of times experiment should be repeated")
#seedRepetitions <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
seedRepetitions <<- numberOfRepPerRound #5
#print("Please enter name of the method that will be used for experiment")
#methodName <<- scan(file = "", what = " ", n = 1, quiet = TRUE)
methodName <<- "anova"
}
initSamplingParams <- function(){
#print("Please enter sampling units (1 - observations; 2 - percentage; 3 - coefficient)")
#samplingType <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
samplingType <<- 1
#print("Please enter sampling progression (1 - arithmetic; 2 - geometric)")
#samplingProgression <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
samplingProgression <<- 1
#print("Please enter progression base")
#samplingProgressionBase <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
samplingProgressionBase <<-1 # added one <
#cat("Please enter sampling range lower value", '\n')
#samplingLower <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
samplingLower <<- 1 # added one <
#cat("Please enter sampling range upper value", '\n')
#samplingUpper <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
samplingUpper <<- numberOfRounds
}
initMinSplitParams <- function(){
#cat("Please enter minSplit range lower value", '\n')
#minSplitLower <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
# minSplitLower <- 2
#cat("Please enter minSplit range upper value", '\n')
#minSplitUpper <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
# minSplitUpper <- 5
}
initMinBucketParams <- function(){
#cat("Please enter minBucket range lower value", '\n')
#minBucketLower <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
# minBucketLower <- 2
#cat("Please enter minBucket range upper value", '\n')
#minBucketUpper <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
# minBucketUpper <- 5
}
initMaxDepthParams <- function(){
#cat("Please enter maxDepth range lower value", '\n')
#maxDepthLower <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
# maxDepthLower <- 25
#cat("Please enter maxDepth range upper value", '\n')
#maxDepthUpper <<- scan(file = "", what = integer(), n = 1, quiet = FALSE)
# maxDepthUpper <- 30
}
initComplexityParams <- function(){
#cat("Please enter complexity range lower value", '\n')
#complexLower <<- scan(file = "", what = numeric(), n = 1, quiet = FALSE)
complexLower <- 0
#cat("Please enter complexity range upper value", '\n')
#complexUpper <<- scan(file = "", what = numeric(), n = 1, quiet = FALSE)
complexUpper <- 0.001
#cat("Please enter complexity step", '\n')
#complexStep <<- scan(file = "", what = numeric(), n = 1, quiet = FALSE)
complexStep <<- minImprovementPerRound #0.0001
}
initCARTParams <- function(){
initMinSplitParams()
initMinBucketParams()
initMaxDepthParams()
initComplexityParams()
}
initParams <- function(){
initGeneralParams()
initSamplingParams()
initCARTParams()
}
init <- function(){
initData(testSet)
initParams()
}
# Analysis ########################################################################################
analyse <- function(){
# Calculate sampling progression ##############################################################
samplingVector <<- NULL
samplingAcc <- samplingLower
while(samplingAcc <= samplingUpper)
{
samplingVector <<- c(samplingVector, samplingAcc)
if(samplingProgression == 1) # Arithmetic progression
{
samplingAcc <- samplingAcc + samplingProgressionBase
}
else # Geometric progression
{
samplingAcc <- samplingAcc * samplingProgressionBase
}
}
if(samplingType == 1) # Observations
{
samplingVector <<- samplingVector
}
if(samplingType == 2) # Percentage
{
samplingVector <<- round(samplingVector * obsCount / 100, digits = 0)
}
if(samplingType == 3) # Coefficient
{
samplingVector <<- samplingVector * featureCount
}
# Analyse data ################################################################################
analyseCART()
}
analyseCART <- function()
{
# Utility variables ###########################################################################
faultRate_old <- 0
faultDataset <- NULL
resultDataset <- NULL
resultDataset <- rbind(resultDataset, c("Sampling Amount", "Fault Rate"))
terminationReason <- c("Termination reason", "numberOfRounds")
# Main loop ###################################################################################
for(samplingIter in samplingVector){
current.faultset <- NULL
for(seedIter in 1:seedRepetitions){
# Build the training/validate/test datasets ###############################################
#set.seed(seedIter)
crs$nobs <- nrow(crs$dataset)
crs$sample <- crs$train <- sample(nrow(crs$dataset), samplingIter)
crs$validate <- NULL
crs$train.test.diff <- setdiff(setdiff(seq_len(nrow(crs$dataset)), crs$train), crs$validate)
size<-length(crs$train)
if(size<=100){
mb <- floor(size/10 + 1/2)
ms <- mb * 2
} else {
ms <- floor(size/10 + 1/2)
mb <- floor(ms/2)
}
features.size <- length(colnames(crs$dataset)) - 1
crs$test <- sample(crs$train.test.diff, size)
# Select the variables
crs$input <- setdiff(colnames(crs$dataset), "PERF") # 'PERF' -> Function to evaluate the performance
crs$numeric <- NULL
crs$categoric <- NULL
crs$target <- "PERF"
crs$risk <- NULL
crs$ident <- NULL
crs$ignore <- NULL
crs$weights <- NULL
print("Training Done")
# Building a CART model ###################################################################
require(rpart, quietly=TRUE)
#set.seed(seedIter)
crs$rpart <- rpart(PERF ~ .,
data=crs$dataset[crs$train, c(crs$input, crs$target)],method="anova",
parms=list(split="information"),
control=rpart.control(
minsplit=ms,
minbucket=mb,
maxdepth=30,
cp=0,
usesurrogate=0,
maxsurrogate=0))
print("Building Done")
# Evaluate the CART model #################################################################
# Obtain predictions for the Decision Tree model on BerkeleyC.csv [test]
crs$pr <- predict(crs$rpart, newdata=crs$dataset[crs$test, c(crs$input)])
#print(crs$rpart) # <<<<<<<<neeeew
# Extract the relevant variables from the dataset
sdata <- subset(crs$dataset[crs$test,], select=c("PERF"))
faultRate <- abs(sdata - crs$pr) / sdata * 100
if(is.null(faultDataset)){
faultDataset <- faultRate
}else{
faultDataset <- cbind(faultDataset, faultRate)
}
if(is.null(current.faultset)){
current.faultset <- faultRate
}else{
current.faultset <- cbind(current.faultset, faultRate)
}
# Process all results #########################################################################
#outputFilename.split <- paste(outputFilename,samplingIter, sep="_")
#address01 <- paste(outputAddress, "/", outputFilename.split, ".csv", sep="")
#faultSet.row <- t(as.matrix(colMeans(current.faultset)))
#write.csv(faultSet.row, file=address01,row.names=FALSE)
#
faultRate <- mean(rowMeans(faultDataset))
# print(faultRate)
resultDataset <- rbind(resultDataset, c(samplingIter, faultRate))
#print("faultRate")
#print(faultRate)
faultDataset <- NULL
}# for(seedIter in 1:seedRepetitions)
if(abs(faultRate-faultRate_old)<complexStep){
terminationReason <- c("Termination reason", "minImprovementPerRound")
print(terminationReason)
break
}
faultRate_old <- faultRate
} # for(samplingIter in samplingLower:samplingUpper)
# Output the combined data ####################################################################
address00 <- paste(outputAddress, "/", outputFilename, ".csv", sep="")
write.csv(rbind(terminationReason, resultDataset), file=address00, row.names=FALSE)
}
#plot(mydata$SampleSize,100-mydata$FaultRate,type="b",col=4,main="LLVM AS",xlab="Sample Size",ylab="Prediction Accuracy")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resourcegroupstaggingapi_service.R
\name{resourcegroupstaggingapi}
\alias{resourcegroupstaggingapi}
\title{AWS Resource Groups Tagging API}
\usage{
resourcegroupstaggingapi(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
Resource Groups Tagging API
This guide describes the API operations for the resource groups tagging.
A tag is a label that you assign to an AWS resource. A tag consists of a
key and a value, both of which you define. For example, if you have two
Amazon EC2 instances, you might assign both a tag key of \"Stack.\" But
the value of \"Stack\" might be \"Testing\" for one and \"Production\"
for the other.
Tagging can help you organize your resources and enables you to simplify
resource management, access management and cost allocation.
You can use the resource groups tagging API operations to complete the
following tasks:
\itemize{
\item Tag and untag supported resources located in the specified Region
for the AWS account.
\item Use tag-based filters to search for resources located in the
specified Region for the AWS account.
\item List all existing tag keys in the specified Region for the AWS
account.
\item List all existing values for the specified key in the specified
Region for the AWS account.
}
To use resource groups tagging API operations, you must add the
following permissions to your IAM policy:
\itemize{
\item \code{tag:GetResources}
\item \code{tag:TagResources}
\item \code{tag:UntagResources}
\item \code{tag:GetTagKeys}
\item \code{tag:GetTagValues}
}
You\'ll also need permissions to access the resources of individual
services so that you can tag and untag those resources.
For more information on IAM policies, see \href{http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage.html}{Managing IAM Policies}
in the \emph{IAM User Guide}.
You can use the Resource Groups Tagging API to tag resources for the
following AWS services.
\itemize{
\item Alexa for Business (a4b)
\item API Gateway
\item Amazon AppStream
\item AWS AppSync
\item AWS App Mesh
\item Amazon Athena
\item Amazon Aurora
\item AWS Backup
\item AWS Certificate Manager
\item AWS Certificate Manager Private CA
\item Amazon Cloud Directory
\item AWS CloudFormation
\item Amazon CloudFront
\item AWS CloudHSM
\item AWS CloudTrail
\item Amazon CloudWatch (alarms only)
\item Amazon CloudWatch Events
\item Amazon CloudWatch Logs
\item AWS CodeBuild
\item AWS CodeCommit
\item AWS CodePipeline
\item AWS CodeStar
\item Amazon Cognito Identity
\item Amazon Cognito User Pools
\item Amazon Comprehend
\item AWS Config
\item AWS Data Exchange
\item AWS Data Pipeline
\item AWS Database Migration Service
\item AWS DataSync
\item AWS Device Farm
\item AWS Direct Connect
\item AWS Directory Service
\item Amazon DynamoDB
\item Amazon EBS
\item Amazon EC2
\item Amazon ECR
\item Amazon ECS
\item Amazon EKS
\item AWS Elastic Beanstalk
\item Amazon Elastic File System
\item Elastic Load Balancing
\item Amazon ElastiCache
\item Amazon Elasticsearch Service
\item AWS Elemental MediaLive
\item AWS Elemental MediaPackage
\item AWS Elemental MediaTailor
\item Amazon EMR
\item Amazon FSx
\item Amazon S3 Glacier
\item AWS Glue
\item Amazon GuardDuty
\item Amazon Inspector
\item AWS IoT Analytics
\item AWS IoT Core
\item AWS IoT Device Defender
\item AWS IoT Device Management
\item AWS IoT Events
\item AWS IoT Greengrass
\item AWS IoT 1-Click
\item AWS IoT Things Graph
\item AWS Key Management Service
\item Amazon Kinesis
\item Amazon Kinesis Data Analytics
\item Amazon Kinesis Data Firehose
\item AWS Lambda
\item AWS License Manager
\item Amazon Machine Learning
\item Amazon MQ
\item Amazon MSK
\item Amazon Neptune
\item AWS OpsWorks
\item AWS Organizations
\item Amazon Quantum Ledger Database (QLDB)
\item Amazon RDS
\item Amazon Redshift
\item AWS Resource Access Manager
\item AWS Resource Groups
\item AWS RoboMaker
\item Amazon Route 53
\item Amazon Route 53 Resolver
\item Amazon S3 (buckets only)
\item Amazon SageMaker
\item AWS Secrets Manager
\item AWS Security Hub
\item AWS Service Catalog
\item Amazon Simple Email Service (SES)
\item Amazon Simple Notification Service (SNS)
\item Amazon Simple Queue Service (SQS)
\item Amazon Simple Workflow Service
\item AWS Step Functions
\item AWS Storage Gateway
\item AWS Systems Manager
\item AWS Transfer for SFTP
\item AWS WAF Regional
\item Amazon VPC
\item Amazon WorkSpaces
}
}
\section{Service syntax}{
\preformatted{svc <- resourcegroupstaggingapi(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=resourcegroupstaggingapi_describe_report_creation]{describe_report_creation} \tab Describes the status of the StartReportCreation operation \cr
\link[=resourcegroupstaggingapi_get_compliance_summary]{get_compliance_summary} \tab Returns a table that shows counts of resources that are noncompliant with their tag policies \cr
\link[=resourcegroupstaggingapi_get_resources]{get_resources} \tab Returns all the tagged or previously tagged resources that are located in the specified Region for the AWS account \cr
\link[=resourcegroupstaggingapi_get_tag_keys]{get_tag_keys} \tab Returns all tag keys in the specified Region for the AWS account \cr
\link[=resourcegroupstaggingapi_get_tag_values]{get_tag_values} \tab Returns all tag values for the specified key in the specified Region for the AWS account \cr
\link[=resourcegroupstaggingapi_start_report_creation]{start_report_creation} \tab Generates a report that lists all tagged resources in accounts across your organization and tells whether each resource is compliant with the effective tag policy\cr
\link[=resourcegroupstaggingapi_tag_resources]{tag_resources} \tab Applies one or more tags to the specified resources \cr
\link[=resourcegroupstaggingapi_untag_resources]{untag_resources} \tab Removes the specified tags from the specified resources
}
}
\examples{
\dontrun{
svc <- resourcegroupstaggingapi()
svc$describe_report_creation(
Foo = 123
)
}
}
| /cran/paws.management/man/resourcegroupstaggingapi.Rd | permissive | jcheng5/paws | R | false | true | 6,333 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resourcegroupstaggingapi_service.R
\name{resourcegroupstaggingapi}
\alias{resourcegroupstaggingapi}
\title{AWS Resource Groups Tagging API}
\usage{
resourcegroupstaggingapi(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
Resource Groups Tagging API
This guide describes the API operations for the resource groups tagging.
A tag is a label that you assign to an AWS resource. A tag consists of a
key and a value, both of which you define. For example, if you have two
Amazon EC2 instances, you might assign both a tag key of \"Stack.\" But
the value of \"Stack\" might be \"Testing\" for one and \"Production\"
for the other.
Tagging can help you organize your resources and enables you to simplify
resource management, access management and cost allocation.
You can use the resource groups tagging API operations to complete the
following tasks:
\itemize{
\item Tag and untag supported resources located in the specified Region
for the AWS account.
\item Use tag-based filters to search for resources located in the
specified Region for the AWS account.
\item List all existing tag keys in the specified Region for the AWS
account.
\item List all existing values for the specified key in the specified
Region for the AWS account.
}
To use resource groups tagging API operations, you must add the
following permissions to your IAM policy:
\itemize{
\item \code{tag:GetResources}
\item \code{tag:TagResources}
\item \code{tag:UntagResources}
\item \code{tag:GetTagKeys}
\item \code{tag:GetTagValues}
}
You\'ll also need permissions to access the resources of individual
services so that you can tag and untag those resources.
For more information on IAM policies, see \href{http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage.html}{Managing IAM Policies}
in the \emph{IAM User Guide}.
You can use the Resource Groups Tagging API to tag resources for the
following AWS services.
\itemize{
\item Alexa for Business (a4b)
\item API Gateway
\item Amazon AppStream
\item AWS AppSync
\item AWS App Mesh
\item Amazon Athena
\item Amazon Aurora
\item AWS Backup
\item AWS Certificate Manager
\item AWS Certificate Manager Private CA
\item Amazon Cloud Directory
\item AWS CloudFormation
\item Amazon CloudFront
\item AWS CloudHSM
\item AWS CloudTrail
\item Amazon CloudWatch (alarms only)
\item Amazon CloudWatch Events
\item Amazon CloudWatch Logs
\item AWS CodeBuild
\item AWS CodeCommit
\item AWS CodePipeline
\item AWS CodeStar
\item Amazon Cognito Identity
\item Amazon Cognito User Pools
\item Amazon Comprehend
\item AWS Config
\item AWS Data Exchange
\item AWS Data Pipeline
\item AWS Database Migration Service
\item AWS DataSync
\item AWS Device Farm
\item AWS Direct Connect
\item AWS Directory Service
\item Amazon DynamoDB
\item Amazon EBS
\item Amazon EC2
\item Amazon ECR
\item Amazon ECS
\item Amazon EKS
\item AWS Elastic Beanstalk
\item Amazon Elastic File System
\item Elastic Load Balancing
\item Amazon ElastiCache
\item Amazon Elasticsearch Service
\item AWS Elemental MediaLive
\item AWS Elemental MediaPackage
\item AWS Elemental MediaTailor
\item Amazon EMR
\item Amazon FSx
\item Amazon S3 Glacier
\item AWS Glue
\item Amazon GuardDuty
\item Amazon Inspector
\item AWS IoT Analytics
\item AWS IoT Core
\item AWS IoT Device Defender
\item AWS IoT Device Management
\item AWS IoT Events
\item AWS IoT Greengrass
\item AWS IoT 1-Click
\item AWS IoT Things Graph
\item AWS Key Management Service
\item Amazon Kinesis
\item Amazon Kinesis Data Analytics
\item Amazon Kinesis Data Firehose
\item AWS Lambda
\item AWS License Manager
\item Amazon Machine Learning
\item Amazon MQ
\item Amazon MSK
\item Amazon Neptune
\item AWS OpsWorks
\item AWS Organizations
\item Amazon Quantum Ledger Database (QLDB)
\item Amazon RDS
\item Amazon Redshift
\item AWS Resource Access Manager
\item AWS Resource Groups
\item AWS RoboMaker
\item Amazon Route 53
\item Amazon Route 53 Resolver
\item Amazon S3 (buckets only)
\item Amazon SageMaker
\item AWS Secrets Manager
\item AWS Security Hub
\item AWS Service Catalog
\item Amazon Simple Email Service (SES)
\item Amazon Simple Notification Service (SNS)
\item Amazon Simple Queue Service (SQS)
\item Amazon Simple Workflow Service
\item AWS Step Functions
\item AWS Storage Gateway
\item AWS Systems Manager
\item AWS Transfer for SFTP
\item AWS WAF Regional
\item Amazon VPC
\item Amazon WorkSpaces
}
}
\section{Service syntax}{
\preformatted{svc <- resourcegroupstaggingapi(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=resourcegroupstaggingapi_describe_report_creation]{describe_report_creation} \tab Describes the status of the StartReportCreation operation \cr
\link[=resourcegroupstaggingapi_get_compliance_summary]{get_compliance_summary} \tab Returns a table that shows counts of resources that are noncompliant with their tag policies \cr
\link[=resourcegroupstaggingapi_get_resources]{get_resources} \tab Returns all the tagged or previously tagged resources that are located in the specified Region for the AWS account \cr
\link[=resourcegroupstaggingapi_get_tag_keys]{get_tag_keys} \tab Returns all tag keys in the specified Region for the AWS account \cr
\link[=resourcegroupstaggingapi_get_tag_values]{get_tag_values} \tab Returns all tag values for the specified key in the specified Region for the AWS account \cr
\link[=resourcegroupstaggingapi_start_report_creation]{start_report_creation} \tab Generates a report that lists all tagged resources in accounts across your organization and tells whether each resource is compliant with the effective tag policy\cr
\link[=resourcegroupstaggingapi_tag_resources]{tag_resources} \tab Applies one or more tags to the specified resources \cr
\link[=resourcegroupstaggingapi_untag_resources]{untag_resources} \tab Removes the specified tags from the specified resources
}
}
\examples{
\dontrun{
svc <- resourcegroupstaggingapi()
svc$describe_report_creation(
Foo = 123
)
}
}
|
.libPaths(c("Q:/Variablenexport/variableMetadataPreparation/library", .libPaths()))
parse_cli_arguments <- function() {
option_list <- list(
optparse::make_option(c("-e", "--excel-directory"),
type = "character", action = "store", default = NA,
help = paste0(
"Path to the directory containing the input ",
"excel files: files must be named vimport_dsXX.xlsx"
),
dest = "exceldirectory"
),
optparse::make_option(c("-s", "--stata-directory"),
type = "character", action = "store", default = NA,
help = paste0(
"Path to the directory containing the input ",
"stata dataset files: files must be named dsXX.dta"
),
dest = "statadirectory"
),
optparse::make_option(c("-o", "--output-directory"),
type = "character", action = "store", default = NA,
help = "Path to the directory of output files",
dest = "outputdirectory"
),
optparse::make_option(c("-m", "--missing-conditions"),
type = "character", action = "store", default = NA,
help = "Path to excel file containing the missing conditions",
dest = "missing_conditions"
),
optparse::make_option(c("-n", "--variables-no-distribution"),
type = "character", action = "store", default = "pid,id",
help = paste0(
"Names of variables without distribution: ",
"default = \"pid,id\", variables with accessWays not-accessible ",
"should not be included in this list"
),
dest = "variables_no_distribution"
)
)
option_parser <- optparse::OptionParser(option_list = option_list)
opt <- optparse::parse_args(option_parser)
if (is.na(opt$exceldirectory)) {
optparse::print_help(option_parser)
stop("EXCEL-DIRECTORY must not be empty!")
}
if (length(dir(opt$exceldirectory, pattern = "vimport_ds[0-9]+\\.xlsx")) == 0) {
optparse::print_help(option_parser)
stop(paste0(
"EXCEL-DIRECTORY must contain excel files (.xlsx)",
" named vimport_ds1, vimport_ds2,...!"
))
}
if (is.na(opt$statadirectory)) {
optparse::print_help(option_parser)
stop("STATA-DIRECTORY must not be empty!")
}
if (length(dir(opt$statadirectory, pattern = "ds[0-9]+\\.dta")) == 0) {
optparse::print_help(option_parser)
stop(paste0(
"STATA-DIRECTORY must contain stata files (.dta)",
" named ds1.dta, ds2.dta,...!"
))
}
if (is.na(opt$outputdirectory)) {
optparse::print_help(option_parser)
stop("OUTPUT-DIRECTORY must not be empty!")
}
variableMetadataPreparation::variable_metadata_generation(
opt$exceldirectory, opt$statadirectory, opt$missing_conditions,
opt$outputdirectory, opt$variables_no_distribution
)
}
parse_cli_arguments()
| /bin/parse_cli_arguments.R | permissive | dzhw/variableMetadataPreparation | R | false | false | 2,761 | r | .libPaths(c("Q:/Variablenexport/variableMetadataPreparation/library", .libPaths()))
parse_cli_arguments <- function() {
option_list <- list(
optparse::make_option(c("-e", "--excel-directory"),
type = "character", action = "store", default = NA,
help = paste0(
"Path to the directory containing the input ",
"excel files: files must be named vimport_dsXX.xlsx"
),
dest = "exceldirectory"
),
optparse::make_option(c("-s", "--stata-directory"),
type = "character", action = "store", default = NA,
help = paste0(
"Path to the directory containing the input ",
"stata dataset files: files must be named dsXX.dta"
),
dest = "statadirectory"
),
optparse::make_option(c("-o", "--output-directory"),
type = "character", action = "store", default = NA,
help = "Path to the directory of output files",
dest = "outputdirectory"
),
optparse::make_option(c("-m", "--missing-conditions"),
type = "character", action = "store", default = NA,
help = "Path to excel file containing the missing conditions",
dest = "missing_conditions"
),
optparse::make_option(c("-n", "--variables-no-distribution"),
type = "character", action = "store", default = "pid,id",
help = paste0(
"Names of variables without distribution: ",
"default = \"pid,id\", variables with accessWays not-accessible ",
"should not be included in this list"
),
dest = "variables_no_distribution"
)
)
option_parser <- optparse::OptionParser(option_list = option_list)
opt <- optparse::parse_args(option_parser)
if (is.na(opt$exceldirectory)) {
optparse::print_help(option_parser)
stop("EXCEL-DIRECTORY must not be empty!")
}
if (length(dir(opt$exceldirectory, pattern = "vimport_ds[0-9]+\\.xlsx")) == 0) {
optparse::print_help(option_parser)
stop(paste0(
"EXCEL-DIRECTORY must contain excel files (.xlsx)",
" named vimport_ds1, vimport_ds2,...!"
))
}
if (is.na(opt$statadirectory)) {
optparse::print_help(option_parser)
stop("STATA-DIRECTORY must not be empty!")
}
if (length(dir(opt$statadirectory, pattern = "ds[0-9]+\\.dta")) == 0) {
optparse::print_help(option_parser)
stop(paste0(
"STATA-DIRECTORY must contain stata files (.dta)",
" named ds1.dta, ds2.dta,...!"
))
}
if (is.na(opt$outputdirectory)) {
optparse::print_help(option_parser)
stop("OUTPUT-DIRECTORY must not be empty!")
}
variableMetadataPreparation::variable_metadata_generation(
opt$exceldirectory, opt$statadirectory, opt$missing_conditions,
opt$outputdirectory, opt$variables_no_distribution
)
}
parse_cli_arguments()
|
power.mean <-
function(values, order = 1, weights = rep(1, length(values)))
{
## Normalise weights to sum to 1 (as per Rényi)
proportions <- weights / sum(weights)
## Check that the number of 'values' is equal to the number of 'weights'
if (length(values) != length(weights)) stop('The number of values does not equal the number of weights, please check arguments')
## Check that 'values' are non-negative
if (any(values[!is.nan(values)] < 0))
stop('Check that values (argument) are non-negative.')
## Check whether all proportions are NaN - happens when nothing in group
## In that case we want to propagate the NaN
if (all(is.nan(proportions))) return(NaN)
## Otherwise NaNs should only occur when weight is 0
## and so will be ignored
if (order > 0) {
if (is.infinite(order)) {
max(values[weights > 0])
} else if (isTRUE(all.equal(order, 0))) { ## Avoid rounding errors for order 0
prod(values[weights > 0] ^ proportions[weights > 0])
} else {
sum(proportions[weights > 0] * values[weights > 0] ^ order) ^ (1 / order)
}
} else { ## Negative orders, need to remove zeros
if (is.infinite(order)) {
min(values[weights > 0])
} else if (isTRUE(all.equal(order, 0))) { ## Avoid rounding errors for order 0
prod(values[weights > 0] ^ proportions[weights > 0])
} else {
sum(proportions[weights > 0] * values[weights > 0] ^ order) ^ (1 / order)
}
}
}
| /R/power.mean.R | permissive | ljallen/RDiversity | R | false | false | 1,462 | r | power.mean <-
function(values, order = 1, weights = rep(1, length(values)))
{
## Normalise weights to sum to 1 (as per Rényi)
proportions <- weights / sum(weights)
## Check that the number of 'values' is equal to the number of 'weights'
if (length(values) != length(weights)) stop('The number of values does not equal the number of weights, please check arguments')
## Check that 'values' are non-negative
if (any(values[!is.nan(values)] < 0))
stop('Check that values (argument) are non-negative.')
## Check whether all proportions are NaN - happens when nothing in group
## In that case we want to propagate the NaN
if (all(is.nan(proportions))) return(NaN)
## Otherwise NaNs should only occur when weight is 0
## and so will be ignored
if (order > 0) {
if (is.infinite(order)) {
max(values[weights > 0])
} else if (isTRUE(all.equal(order, 0))) { ## Avoid rounding errors for order 0
prod(values[weights > 0] ^ proportions[weights > 0])
} else {
sum(proportions[weights > 0] * values[weights > 0] ^ order) ^ (1 / order)
}
} else { ## Negative orders, need to remove zeros
if (is.infinite(order)) {
min(values[weights > 0])
} else if (isTRUE(all.equal(order, 0))) { ## Avoid rounding errors for order 0
prod(values[weights > 0] ^ proportions[weights > 0])
} else {
sum(proportions[weights > 0] * values[weights > 0] ^ order) ^ (1 / order)
}
}
}
|
\name{states}
\alias{states}
\docType{data}
\title{
data: states
}
\description{
Spatial Polygon Data Frame of lower 48 U.S. states
}
\usage{data("states")
}
\format{
SpatialPolygonsDataFrame
}
\examples{
library(sp)
data("states")
plot(states)
}
| /man/states.Rd | no_license | AdaChornelia/assignR | R | false | false | 255 | rd | \name{states}
\alias{states}
\docType{data}
\title{
data: states
}
\description{
Spatial Polygon Data Frame of lower 48 U.S. states
}
\usage{data("states")
}
\format{
SpatialPolygonsDataFrame
}
\examples{
library(sp)
data("states")
plot(states)
}
|
test_that("Test readtext:::get_temp function for test dirs", {
filename <- readtext:::get_temp()
filename2 <- readtext:::get_temp()
expect_false(filename == filename2)
# test directory parameter
dirname <- readtext:::get_temp(directory = TRUE)
expect_true(dir.exists(dirname))
# test prefix parameter
filename <- readtext:::get_temp(prefix = "testprefix")
expect_equal(
substr(basename(filename), 1, 10),
"testprefix"
)
# test that a new filename will be given if the original already exists
org_filename <- readtext:::get_temp()
new_filename <- readtext:::get_temp()
expect_false(org_filename == new_filename)
# file names are the same when seed is given
org_filename2 <- readtext:::get_temp(seed = 'xyz')
new_filename2 <- readtext:::get_temp(seed = 'xyz')
expect_true(org_filename2 == new_filename2)
})
test_that("Test is_probably_xpath", {
expect_false(readtext:::is_probably_xpath("A"))
expect_false(readtext:::is_probably_xpath("a:what"))
expect_true(readtext:::is_probably_xpath("/A/B/C"))
expect_true(readtext:::is_probably_xpath("A/B/C"))
})
test_that("Test readtext:::get_docvars_filenames for parsing filenames", {
filenames <- c("~/tmp/documents/USA_blue_horse.txt",
"~/tmp/documents/France_green_dog.txt",
"~/tmp/documents/China_red_dragon.txt",
"~/tmp/spaced words/Ireland_black_bear.txt")
df <- readtext:::get_docvars_filenames(filenames,
docvarnames = c("country", "color",
"animal"),
verbosity = 2)
expect_equal(df$animal,
c("horse", "dog", "dragon", "bear"))
expect_equal(names(df), c("country", "color", "animal"))
expect_s3_class(df, "data.frame")
})
test_that("file_ext returns expected extensions", {
filenames <- c("~/tmp/documents/USA_blue_horse.txt",
"~/tmp/documents/France_green_dog.csv",
"~/tmp/documents/China_red_dragon.json",
"~/tmp/spaced words/Ireland_black_bear.tar.gz")
expect_equal(readtext:::file_ext(filenames),
c("txt", "csv", "json", "gz"))
})
test_that("Test download_remote", {
expect_error(
download_remote("https://www.google.com/404.txt", ignore_missing = FALSE)
)
})
| /tests/testthat/test-utils.R | no_license | quanteda/readtext | R | false | false | 2,471 | r | test_that("Test readtext:::get_temp function for test dirs", {
filename <- readtext:::get_temp()
filename2 <- readtext:::get_temp()
expect_false(filename == filename2)
# test directory parameter
dirname <- readtext:::get_temp(directory = TRUE)
expect_true(dir.exists(dirname))
# test prefix parameter
filename <- readtext:::get_temp(prefix = "testprefix")
expect_equal(
substr(basename(filename), 1, 10),
"testprefix"
)
# test that a new filename will be given if the original already exists
org_filename <- readtext:::get_temp()
new_filename <- readtext:::get_temp()
expect_false(org_filename == new_filename)
# file names are the same when seed is given
org_filename2 <- readtext:::get_temp(seed = 'xyz')
new_filename2 <- readtext:::get_temp(seed = 'xyz')
expect_true(org_filename2 == new_filename2)
})
test_that("Test is_probably_xpath", {
expect_false(readtext:::is_probably_xpath("A"))
expect_false(readtext:::is_probably_xpath("a:what"))
expect_true(readtext:::is_probably_xpath("/A/B/C"))
expect_true(readtext:::is_probably_xpath("A/B/C"))
})
test_that("Test readtext:::get_docvars_filenames for parsing filenames", {
filenames <- c("~/tmp/documents/USA_blue_horse.txt",
"~/tmp/documents/France_green_dog.txt",
"~/tmp/documents/China_red_dragon.txt",
"~/tmp/spaced words/Ireland_black_bear.txt")
df <- readtext:::get_docvars_filenames(filenames,
docvarnames = c("country", "color",
"animal"),
verbosity = 2)
expect_equal(df$animal,
c("horse", "dog", "dragon", "bear"))
expect_equal(names(df), c("country", "color", "animal"))
expect_s3_class(df, "data.frame")
})
test_that("file_ext returns expected extensions", {
filenames <- c("~/tmp/documents/USA_blue_horse.txt",
"~/tmp/documents/France_green_dog.csv",
"~/tmp/documents/China_red_dragon.json",
"~/tmp/spaced words/Ireland_black_bear.tar.gz")
expect_equal(readtext:::file_ext(filenames),
c("txt", "csv", "json", "gz"))
})
test_that("Test download_remote", {
expect_error(
download_remote("https://www.google.com/404.txt", ignore_missing = FALSE)
)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/concave-penalties.R
\name{Fig3.1}
\alias{Fig3.1}
\title{Reproduce Figure 3.1}
\usage{
Fig3.1(
range = c(-4, 4),
col = c("#FF4E37FF", "#00B500FF", "#008DFFFF"),
parlist = list(mfrow = c(1, 3), mar = c(5, 5, 5, 0.5), xpd = 1)
)
}
\arguments{
\item{range}{Range for beta coefficient (vector of length 2)}
\item{col}{Lasso/ridge color (vector of length 2)}
\item{parlist}{List of arguments to pass to \code{par()}}
}
\description{
Reproduces Figure 3.1 from the book; if you specify any options, your results may look different.
}
\examples{
Fig3.1()
}
| /man/Fig3.1.Rd | no_license | pbreheny/hdrm | R | false | true | 635 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/concave-penalties.R
\name{Fig3.1}
\alias{Fig3.1}
\title{Reproduce Figure 3.1}
\usage{
Fig3.1(
range = c(-4, 4),
col = c("#FF4E37FF", "#00B500FF", "#008DFFFF"),
parlist = list(mfrow = c(1, 3), mar = c(5, 5, 5, 0.5), xpd = 1)
)
}
\arguments{
\item{range}{Range for beta coefficient (vector of length 2)}
\item{col}{Lasso/ridge color (vector of length 2)}
\item{parlist}{List of arguments to pass to \code{par()}}
}
\description{
Reproduces Figure 3.1 from the book; if you specify any options, your results may look different.
}
\examples{
Fig3.1()
}
|
# plot info about fits from 3 sources
"model2d_3_plots" <- function(fitsum, L) {
fsr <- fitsum
corr_1_3 <- fsr[grep("corrSigma_d\\[.*.,1,3\\]", rownames(fsr)),]
corr_2_3 <- fsr[grep("corrSigma_d\\[.*.,2,3\\]", rownames(fsr)),]
# plot correlation vs time
pdf(paste0("pdf/2d_3/corr_L",L,".pdf"))
par(bty="l")
plot(ud,corr_1_3[,1],
xlab="Grid Cells Apart",ylab="Correlation",
main=paste0("Correlation vs Grid Cell Distance, L=",L,"\n","DIC: ",round(DIC,1),", pD=",round(pD,1)),
type="l",lwd=2,col="blue",ylim=c(-0.5,1), xaxt='n',xlim=rev(range(ud))) # BC
axis(1, at=d.samet[seq.samet], labels=rev(seq.samet))
lines(ud,corr_2_3[,1],lwd=0.5,col="red") # RCM
# 95% intervals
# BC
lines(ud,corr_1_3[,4],lwd=0.5,col="blue",lty=2)
lines(ud,corr_1_3[,8],lwd=0.5,col="blue",lty=2)
# RCM
lines(ud,corr_2_3[,4],lwd=0.5,col="red",lty=2)
lines(ud,corr_2_3[,8],lwd=0.5,col="red",lty=2)
abline(h=0, lty=3)
if (L > 1) {
points(knots, rep(-0.5, length(knots)), col="red", pch=4, cex=1.5)
}
legend("topleft",c("BC","RCM"),ncol=1,inset=0.05,col=c("blue","red"),lty=c(1,1))
graphics.off()
# plot conditional correlation
cond.corrs <- matrix(0, nrow=2, ncol=length(ud))
for (source in c(1,2)) {
if (source == 1) {
o_source <- 2
} else {
o_source <- 1
}
cond.corrs[source,] <- unlist(sapply(1:length(ud), function(my_d) {
cmat <- matrix(fsr[grep( paste0("corrSigma_d\\[",my_d,","), rownames(fsr)),1],nrow=3,ncol=3)
# compute conditional corr mat
cmat.cond <- cmat[c(source,3),c(source,3)] - tcrossprod(cmat[c(source,3),o_source])
cmat.cond[1,2]
}))
}
print(cond.corrs[,1:10])
pdf(paste0("pdf/2d_3/cond_corr_L",L,".pdf"))
par(bty="l")
plot(ud,cond.corrs[1,],
xlab="Grid Cells Apart",ylab="Conditional Correlation",
main=paste0("Conditional Corr vs Grid Cell Distance, L=",L,"\n","DIC: ",round(DIC,1),", pD=",round(pD,1)),
type="l",lwd=2,col="blue",ylim=c(-0.5,1), xaxt='n',xlim=rev(range(ud))) # BC
axis(1, at=d.samet[seq.samet], labels=rev(seq.samet))
lines(ud,cond.corrs[2,],lwd=0.5,col="red") # RCM
abline(h=0, lty=3)
if (L > 1) {
points(knots, rep(-0.5, length(knots)), col="red", pch=4, cex=1.5)
}
legend("topleft",c("BC given RCM","RCM given BC"),ncol=1,inset=0.05,col=c("blue","red"),lty=c(1,1))
graphics.off()
}
d.samet <- D[row(D)==col(D)]
N.samet <- length(d.samet)
seq.samet <- ( c(1, 20, 30, 40, 50, 60, 70, 80, 100) )
if (TRUE) {
# linear
for (L in c(5,10,15,25)) {
load(paste0("fitsums/fitsum_linL",L,".RData"))
model2d_3_plots(fitsum, L)
}
}
if (FALSE) {
# b-spline
for (L in c(5,10,15,25,30)) {
load(paste0("fitsums/fitsum_bsL",L,".RData"))
model2d_3_plots(fitsum, L)
}
}
if (FALSE) {
for (L in c(4,7,11,14,17,20,24,27,30,35)) { #,20,24,27,30)) {
load(paste0("fitsums/fitsumL",L,".RData"))
model2d_3_plots(fitsum, L)
}
}
done
load("fitsums/fitsumL1.RData"); model2d_3_plots(fitsum, 1)
load("fitsums/fitsumL2.RData"); model2d_3_plots(fitsum, 2)
load("fitsums/fitsumL3.RData"); model2d_3_plots(fitsum, 3)
load("fitsums/fitsumL4.RData"); model2d_3_plots(fitsum, 4)
load("fitsums/fitsumL6.RData"); model2d_3_plots(fitsum, 6)
load("fitsums/fitsumL8.RData"); model2d_3_plots(fitsum, 8)
load("fitsums/fitsumL10.RData"); model2d_3_plots(fitsum, 10)
#load("fitsums/fitsumL11.RData"); model2d_3_plots(fitsum, 11)
#load("fitsums/fitsumL12.RData"); model2d_3_plots(fitsum, 12)
#load("fitsums/fitsumL13.RData"); model2d_3_plots(fitsum, 13)
#load("fitsums/fitsumL14.RData"); model2d_3_plots(fitsum, 14)
#load("fitsums/fitsumL15.RData"); model2d_3_plots(fitsum, 15)
load("fitsums/fitsumL16.RData"); model2d_3_plots(fitsum, 16)
load("fitsums/fitsumL17.RData"); model2d_3_plots(fitsum, 17)
load("fitsums/fitsumL18.RData"); model2d_3_plots(fitsum, 18)
load("fitsums/fitsumL19.RData"); model2d_3_plots(fitsum, 19)
load("fitsums/fitsumL20.RData"); model2d_3_plots(fitsum, 20)
| /R/plot_2d_3.R | no_license | MariaMcCrann/climate | R | false | false | 3,944 | r | # plot info about fits from 3 sources
"model2d_3_plots" <- function(fitsum, L) {
fsr <- fitsum
corr_1_3 <- fsr[grep("corrSigma_d\\[.*.,1,3\\]", rownames(fsr)),]
corr_2_3 <- fsr[grep("corrSigma_d\\[.*.,2,3\\]", rownames(fsr)),]
# plot correlation vs time
pdf(paste0("pdf/2d_3/corr_L",L,".pdf"))
par(bty="l")
plot(ud,corr_1_3[,1],
xlab="Grid Cells Apart",ylab="Correlation",
main=paste0("Correlation vs Grid Cell Distance, L=",L,"\n","DIC: ",round(DIC,1),", pD=",round(pD,1)),
type="l",lwd=2,col="blue",ylim=c(-0.5,1), xaxt='n',xlim=rev(range(ud))) # BC
axis(1, at=d.samet[seq.samet], labels=rev(seq.samet))
lines(ud,corr_2_3[,1],lwd=0.5,col="red") # RCM
# 95% intervals
# BC
lines(ud,corr_1_3[,4],lwd=0.5,col="blue",lty=2)
lines(ud,corr_1_3[,8],lwd=0.5,col="blue",lty=2)
# RCM
lines(ud,corr_2_3[,4],lwd=0.5,col="red",lty=2)
lines(ud,corr_2_3[,8],lwd=0.5,col="red",lty=2)
abline(h=0, lty=3)
if (L > 1) {
points(knots, rep(-0.5, length(knots)), col="red", pch=4, cex=1.5)
}
legend("topleft",c("BC","RCM"),ncol=1,inset=0.05,col=c("blue","red"),lty=c(1,1))
graphics.off()
# plot conditional correlation
cond.corrs <- matrix(0, nrow=2, ncol=length(ud))
for (source in c(1,2)) {
if (source == 1) {
o_source <- 2
} else {
o_source <- 1
}
cond.corrs[source,] <- unlist(sapply(1:length(ud), function(my_d) {
cmat <- matrix(fsr[grep( paste0("corrSigma_d\\[",my_d,","), rownames(fsr)),1],nrow=3,ncol=3)
# compute conditional corr mat
cmat.cond <- cmat[c(source,3),c(source,3)] - tcrossprod(cmat[c(source,3),o_source])
cmat.cond[1,2]
}))
}
print(cond.corrs[,1:10])
pdf(paste0("pdf/2d_3/cond_corr_L",L,".pdf"))
par(bty="l")
plot(ud,cond.corrs[1,],
xlab="Grid Cells Apart",ylab="Conditional Correlation",
main=paste0("Conditional Corr vs Grid Cell Distance, L=",L,"\n","DIC: ",round(DIC,1),", pD=",round(pD,1)),
type="l",lwd=2,col="blue",ylim=c(-0.5,1), xaxt='n',xlim=rev(range(ud))) # BC
axis(1, at=d.samet[seq.samet], labels=rev(seq.samet))
lines(ud,cond.corrs[2,],lwd=0.5,col="red") # RCM
abline(h=0, lty=3)
if (L > 1) {
points(knots, rep(-0.5, length(knots)), col="red", pch=4, cex=1.5)
}
legend("topleft",c("BC given RCM","RCM given BC"),ncol=1,inset=0.05,col=c("blue","red"),lty=c(1,1))
graphics.off()
}
d.samet <- D[row(D)==col(D)]
N.samet <- length(d.samet)
seq.samet <- ( c(1, 20, 30, 40, 50, 60, 70, 80, 100) )
if (TRUE) {
# linear
for (L in c(5,10,15,25)) {
load(paste0("fitsums/fitsum_linL",L,".RData"))
model2d_3_plots(fitsum, L)
}
}
if (FALSE) {
# b-spline
for (L in c(5,10,15,25,30)) {
load(paste0("fitsums/fitsum_bsL",L,".RData"))
model2d_3_plots(fitsum, L)
}
}
if (FALSE) {
for (L in c(4,7,11,14,17,20,24,27,30,35)) { #,20,24,27,30)) {
load(paste0("fitsums/fitsumL",L,".RData"))
model2d_3_plots(fitsum, L)
}
}
done
load("fitsums/fitsumL1.RData"); model2d_3_plots(fitsum, 1)
load("fitsums/fitsumL2.RData"); model2d_3_plots(fitsum, 2)
load("fitsums/fitsumL3.RData"); model2d_3_plots(fitsum, 3)
load("fitsums/fitsumL4.RData"); model2d_3_plots(fitsum, 4)
load("fitsums/fitsumL6.RData"); model2d_3_plots(fitsum, 6)
load("fitsums/fitsumL8.RData"); model2d_3_plots(fitsum, 8)
load("fitsums/fitsumL10.RData"); model2d_3_plots(fitsum, 10)
#load("fitsums/fitsumL11.RData"); model2d_3_plots(fitsum, 11)
#load("fitsums/fitsumL12.RData"); model2d_3_plots(fitsum, 12)
#load("fitsums/fitsumL13.RData"); model2d_3_plots(fitsum, 13)
#load("fitsums/fitsumL14.RData"); model2d_3_plots(fitsum, 14)
#load("fitsums/fitsumL15.RData"); model2d_3_plots(fitsum, 15)
load("fitsums/fitsumL16.RData"); model2d_3_plots(fitsum, 16)
load("fitsums/fitsumL17.RData"); model2d_3_plots(fitsum, 17)
load("fitsums/fitsumL18.RData"); model2d_3_plots(fitsum, 18)
load("fitsums/fitsumL19.RData"); model2d_3_plots(fitsum, 19)
load("fitsums/fitsumL20.RData"); model2d_3_plots(fitsum, 20)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search_coha.R
\name{search_coha}
\alias{search_coha}
\title{Search the Corpus of Historical American English (COHA)}
\usage{
search_coha(search_terms, section = "fict", max_type = 10,
max_per_term = 100, max_total_result = 1000)
}
\arguments{
\item{search_terms}{The search term or terms, as a vector of strings.}
\item{section}{The section or sections of the COHA to search in, from among \code{"all"} for all sections, \code{"fict"} for the fiction section (the default), \code{"mag"} for magazine, \code{"news"} for newspaper, \code{"nf"} for NF Books. Also, a specific decade can be specified by the first year in the decade, for example, \code{1920} or \code{1880}. Any combination of genres and/or decades can be specified in a vector, for example, \code{section = c("1850", "1950")} or \code{section = c("mag", "news")}.}
\item{max_type}{An integer specifying the maximum number of unique word types to return for each search string (results shown in the upper right portion of the COHA). For example, searching for nouns with the search string "[n*]" could potentially return tens of thousands of unique types, but the user may only be interested in the 100 most frequent ones.}
\item{max_per_term}{An integer specifying the maximum number of keyword-in-context (KWIC) results to return for each search string.}
\item{max_total_result}{An integer specifying the maximum number of total results to return. If only one search term is given in \code{search_terms}, this argument should be equal to or greater than the integer specified in \code{max_per_term}.}
}
\value{
A data frame.
}
\description{
Retrieve keyword-in-context results from the COHA.
}
\examples{
search_coha("erstwhile")
search_coha("erstwhile", section = "mag")
search_coha(c("erstwhile", "ere"), section = c("mag", "news"), max_per_term = 500)
}
| /man/search_coha.Rd | no_license | ekbrown/byucorpora | R | false | true | 1,908 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search_coha.R
\name{search_coha}
\alias{search_coha}
\title{Search the Corpus of Historical American English (COHA)}
\usage{
search_coha(search_terms, section = "fict", max_type = 10,
max_per_term = 100, max_total_result = 1000)
}
\arguments{
\item{search_terms}{The search term or terms, as a vector of strings.}
\item{section}{The section or sections of the COHA to search in, from among \code{"all"} for all sections, \code{"fict"} for the fiction section (the default), \code{"mag"} for magazine, \code{"news"} for newspaper, \code{"nf"} for NF Books. Also, a specific decade can be specified by the first year in the decade, for example, \code{1920} or \code{1880}. Any combination of genres and/or decades can be specified in a vector, for example, \code{section = c("1850", "1950")} or \code{section = c("mag", "news")}.}
\item{max_type}{An integer specifying the maximum number of unique word types to return for each search string (results shown in the upper right portion of the COHA). For example, searching for nouns with the search string "[n*]" could potentially return tens of thousands of unique types, but the user may only be interested in the 100 most frequent ones.}
\item{max_per_term}{An integer specifying the maximum number of keyword-in-context (KWIC) results to return for each search string.}
\item{max_total_result}{An integer specifying the maximum number of total results to return. If only one search term is given in \code{search_terms}, this argument should be equal to or greater than the integer specified in \code{max_per_term}.}
}
\value{
A data frame.
}
\description{
Retrieve keyword-in-context results from the COHA.
}
\examples{
search_coha("erstwhile")
search_coha("erstwhile", section = "mag")
search_coha(c("erstwhile", "ere"), section = c("mag", "news"), max_per_term = 500)
}
|
local({
# the requested version of renv
version <- "0.9.1"
# avoid recursion
if (!is.na(Sys.getenv("RENV_R_INITIALIZING", unset = NA)))
return(invisible(TRUE))
# signal that we're loading renv during R startup
Sys.setenv("RENV_R_INITIALIZING" = "true")
on.exit(Sys.unsetenv("RENV_R_INITIALIZING"), add = TRUE)
# signal that we've consented to use renv
options(renv.consent = TRUE)
# load the 'utils' package eagerly -- this ensures that renv shims, which
# mask 'utils' packages, will come first on the search path
library(utils, lib.loc = .Library)
# check to see if renv has already been loaded
if ("renv" %in% loadedNamespaces()) {
# if renv has already been loaded, and it's the requested version of renv,
# nothing to do
spec <- .getNamespaceInfo(.getNamespace("renv"), "spec")
if (identical(spec[["version"]], version))
return(invisible(TRUE))
# otherwise, unload and attempt to load the correct version of renv
unloadNamespace("renv")
}
# construct path to renv in library
libpath <- local({
root <- Sys.getenv("RENV_PATHS_LIBRARY", unset = "renv/library")
prefix <- paste("R", getRversion()[1, 1:2], sep = "-")
# include SVN revision for development versions of R
# (to avoid sharing platform-specific artefacts with released versions of R)
devel <-
identical(R.version[["status"]], "Under development (unstable)") ||
identical(R.version[["nickname"]], "Unsuffered Consequences")
if (devel)
prefix <- paste(prefix, R.version[["svn rev"]], sep = "-r")
file.path(root, prefix, R.version$platform)
})
# try to load renv from the project library
if (requireNamespace("renv", lib.loc = libpath, quietly = TRUE)) {
# warn if the version of renv loaded does not match
loadedversion <- utils::packageDescription("renv", fields = "Version")
if (version != loadedversion) {
# assume four-component versions are from GitHub; three-component
# versions are from CRAN
components <- strsplit(loadedversion, "[.-]")[[1]]
remote <- if (length(components) == 4L)
paste("rstudio/renv", loadedversion, sep = "@")
else
paste("renv", loadedversion, sep = "@")
fmt <- paste(
"renv %1$s was loaded from project library, but renv %2$s is recorded in lockfile.",
"Use `renv::record(\"%3$s\")` to record this version in the lockfile.",
"Use `renv::restore(packages = \"renv\")` to install renv %2$s into the project library.",
sep = "\n"
)
msg <- sprintf(fmt, loadedversion, version, remote)
warning(msg, call. = FALSE)
}
# load the project
return(renv::load())
}
# failed to find renv locally; we'll try to install from GitHub.
# first, set up download options as appropriate (try to use GITHUB_PAT)
install_renv <- function() {
message("Failed to find installation of renv -- attempting to bootstrap...")
# ensure .Rprofile doesn't get executed
rpu <- Sys.getenv("R_PROFILE_USER", unset = NA)
Sys.setenv(R_PROFILE_USER = "<NA>")
on.exit({
if (is.na(rpu))
Sys.unsetenv("R_PROFILE_USER")
else
Sys.setenv(R_PROFILE_USER = rpu)
}, add = TRUE)
# prepare download options
pat <- Sys.getenv("GITHUB_PAT")
if (nzchar(Sys.which("curl")) && nzchar(pat)) {
fmt <- "--location --fail --header \"Authorization: token %s\""
extra <- sprintf(fmt, pat)
saved <- options("download.file.method", "download.file.extra")
options(download.file.method = "curl", download.file.extra = extra)
on.exit(do.call(base::options, saved), add = TRUE)
} else if (nzchar(Sys.which("wget")) && nzchar(pat)) {
fmt <- "--header=\"Authorization: token %s\""
extra <- sprintf(fmt, pat)
saved <- options("download.file.method", "download.file.extra")
options(download.file.method = "wget", download.file.extra = extra)
on.exit(do.call(base::options, saved), add = TRUE)
}
# fix up repos
repos <- getOption("repos")
on.exit(options(repos = repos), add = TRUE)
repos[repos == "@CRAN@"] <- "https://cloud.r-project.org"
options(repos = repos)
# check for renv on CRAN matching this version
db <- as.data.frame(available.packages(), stringsAsFactors = FALSE)
if ("renv" %in% rownames(db)) {
entry <- db["renv", ]
if (identical(entry$Version, version)) {
message("* Installing renv ", version, " ... ", appendLF = FALSE)
dir.create(libpath, showWarnings = FALSE, recursive = TRUE)
utils::install.packages("renv", lib = libpath, quiet = TRUE)
message("Done!")
return(TRUE)
}
}
# try to download renv
message("* Downloading renv ", version, " ... ", appendLF = FALSE)
prefix <- "https://api.github.com"
url <- file.path(prefix, "repos/rstudio/renv/tarball", version)
destfile <- tempfile("renv-", fileext = ".tar.gz")
on.exit(unlink(destfile), add = TRUE)
utils::download.file(url, destfile = destfile, mode = "wb", quiet = TRUE)
message("Done!")
# attempt to install it into project library
message("* Installing renv ", version, " ... ", appendLF = FALSE)
dir.create(libpath, showWarnings = FALSE, recursive = TRUE)
# invoke using system2 so we can capture and report output
bin <- R.home("bin")
exe <- if (Sys.info()[["sysname"]] == "Windows") "R.exe" else "R"
r <- file.path(bin, exe)
args <- c("--vanilla", "CMD", "INSTALL", "-l", shQuote(libpath), shQuote(destfile))
output <- system2(r, args, stdout = TRUE, stderr = TRUE)
message("Done!")
# check for successful install
status <- attr(output, "status")
if (is.numeric(status) && !identical(status, 0L)) {
text <- c("Error installing renv", "=====================", output)
writeLines(text, con = stderr())
}
}
try(install_renv())
# try again to load
if (requireNamespace("renv", lib.loc = libpath, quietly = TRUE)) {
message("Successfully installed and loaded renv ", version, ".")
return(renv::load())
}
# failed to download or load renv; warn the user
msg <- c(
"Failed to find an renv installation: the project will not be loaded.",
"Use `renv::activate()` to re-initialize the project."
)
warning(paste(msg, collapse = "\n"), call. = FALSE)
})
| /renv/activate.R | no_license | DmytroRybalko/AnalyticsEdge | R | false | false | 6,401 | r |
local({
# the requested version of renv
version <- "0.9.1"
# avoid recursion
if (!is.na(Sys.getenv("RENV_R_INITIALIZING", unset = NA)))
return(invisible(TRUE))
# signal that we're loading renv during R startup
Sys.setenv("RENV_R_INITIALIZING" = "true")
on.exit(Sys.unsetenv("RENV_R_INITIALIZING"), add = TRUE)
# signal that we've consented to use renv
options(renv.consent = TRUE)
# load the 'utils' package eagerly -- this ensures that renv shims, which
# mask 'utils' packages, will come first on the search path
library(utils, lib.loc = .Library)
# check to see if renv has already been loaded
if ("renv" %in% loadedNamespaces()) {
# if renv has already been loaded, and it's the requested version of renv,
# nothing to do
spec <- .getNamespaceInfo(.getNamespace("renv"), "spec")
if (identical(spec[["version"]], version))
return(invisible(TRUE))
# otherwise, unload and attempt to load the correct version of renv
unloadNamespace("renv")
}
# construct path to renv in library
libpath <- local({
root <- Sys.getenv("RENV_PATHS_LIBRARY", unset = "renv/library")
prefix <- paste("R", getRversion()[1, 1:2], sep = "-")
# include SVN revision for development versions of R
# (to avoid sharing platform-specific artefacts with released versions of R)
devel <-
identical(R.version[["status"]], "Under development (unstable)") ||
identical(R.version[["nickname"]], "Unsuffered Consequences")
if (devel)
prefix <- paste(prefix, R.version[["svn rev"]], sep = "-r")
file.path(root, prefix, R.version$platform)
})
# try to load renv from the project library
if (requireNamespace("renv", lib.loc = libpath, quietly = TRUE)) {
# warn if the version of renv loaded does not match
loadedversion <- utils::packageDescription("renv", fields = "Version")
if (version != loadedversion) {
# assume four-component versions are from GitHub; three-component
# versions are from CRAN
components <- strsplit(loadedversion, "[.-]")[[1]]
remote <- if (length(components) == 4L)
paste("rstudio/renv", loadedversion, sep = "@")
else
paste("renv", loadedversion, sep = "@")
fmt <- paste(
"renv %1$s was loaded from project library, but renv %2$s is recorded in lockfile.",
"Use `renv::record(\"%3$s\")` to record this version in the lockfile.",
"Use `renv::restore(packages = \"renv\")` to install renv %2$s into the project library.",
sep = "\n"
)
msg <- sprintf(fmt, loadedversion, version, remote)
warning(msg, call. = FALSE)
}
# load the project
return(renv::load())
}
# failed to find renv locally; we'll try to install from GitHub.
# first, set up download options as appropriate (try to use GITHUB_PAT)
install_renv <- function() {
message("Failed to find installation of renv -- attempting to bootstrap...")
# ensure .Rprofile doesn't get executed
rpu <- Sys.getenv("R_PROFILE_USER", unset = NA)
Sys.setenv(R_PROFILE_USER = "<NA>")
on.exit({
if (is.na(rpu))
Sys.unsetenv("R_PROFILE_USER")
else
Sys.setenv(R_PROFILE_USER = rpu)
}, add = TRUE)
# prepare download options
pat <- Sys.getenv("GITHUB_PAT")
if (nzchar(Sys.which("curl")) && nzchar(pat)) {
fmt <- "--location --fail --header \"Authorization: token %s\""
extra <- sprintf(fmt, pat)
saved <- options("download.file.method", "download.file.extra")
options(download.file.method = "curl", download.file.extra = extra)
on.exit(do.call(base::options, saved), add = TRUE)
} else if (nzchar(Sys.which("wget")) && nzchar(pat)) {
fmt <- "--header=\"Authorization: token %s\""
extra <- sprintf(fmt, pat)
saved <- options("download.file.method", "download.file.extra")
options(download.file.method = "wget", download.file.extra = extra)
on.exit(do.call(base::options, saved), add = TRUE)
}
# fix up repos
repos <- getOption("repos")
on.exit(options(repos = repos), add = TRUE)
repos[repos == "@CRAN@"] <- "https://cloud.r-project.org"
options(repos = repos)
# check for renv on CRAN matching this version
db <- as.data.frame(available.packages(), stringsAsFactors = FALSE)
if ("renv" %in% rownames(db)) {
entry <- db["renv", ]
if (identical(entry$Version, version)) {
message("* Installing renv ", version, " ... ", appendLF = FALSE)
dir.create(libpath, showWarnings = FALSE, recursive = TRUE)
utils::install.packages("renv", lib = libpath, quiet = TRUE)
message("Done!")
return(TRUE)
}
}
# try to download renv
message("* Downloading renv ", version, " ... ", appendLF = FALSE)
prefix <- "https://api.github.com"
url <- file.path(prefix, "repos/rstudio/renv/tarball", version)
destfile <- tempfile("renv-", fileext = ".tar.gz")
on.exit(unlink(destfile), add = TRUE)
utils::download.file(url, destfile = destfile, mode = "wb", quiet = TRUE)
message("Done!")
# attempt to install it into project library
message("* Installing renv ", version, " ... ", appendLF = FALSE)
dir.create(libpath, showWarnings = FALSE, recursive = TRUE)
# invoke using system2 so we can capture and report output
bin <- R.home("bin")
exe <- if (Sys.info()[["sysname"]] == "Windows") "R.exe" else "R"
r <- file.path(bin, exe)
args <- c("--vanilla", "CMD", "INSTALL", "-l", shQuote(libpath), shQuote(destfile))
output <- system2(r, args, stdout = TRUE, stderr = TRUE)
message("Done!")
# check for successful install
status <- attr(output, "status")
if (is.numeric(status) && !identical(status, 0L)) {
text <- c("Error installing renv", "=====================", output)
writeLines(text, con = stderr())
}
}
try(install_renv())
# try again to load
if (requireNamespace("renv", lib.loc = libpath, quietly = TRUE)) {
message("Successfully installed and loaded renv ", version, ".")
return(renv::load())
}
# failed to download or load renv; warn the user
msg <- c(
"Failed to find an renv installation: the project will not be loaded.",
"Use `renv::activate()` to re-initialize the project."
)
warning(paste(msg, collapse = "\n"), call. = FALSE)
})
|
#plot3.R
#Loads data and creates plot3 from Projec Assignment 1 of
#Coursera's "Exploratory Data Analysis"
#(Aug 2014 session).
#
#script assumes that code file load_data.R is in the
#working directory.
#
#png graphics file is written to working directory
source("load_data.R")
writeLines("creating plot3.png...")
png("plot3.png", width=480, height=480, units="px")
with(powerdata, {
plot(datetime,
Sub_metering_1,
type="l",
col="black",
xlab="",
ylab="Energy sub metering")
lines(datetime,
Sub_metering_2,
col="red")
lines(datetime,
Sub_metering_3,
col="blue")
legend("topright",
legend=c("Sub_metering_1",
"Sub_metering_2",
"Sub_metering_3"),
lty=1,
col=c("black","red","blue"))
})
dev.off()
writeLines("...plot complete") | /plot3.R | no_license | dkgonda/ExData_Plotting1 | R | false | false | 962 | r | #plot3.R
#Loads data and creates plot3 from Projec Assignment 1 of
#Coursera's "Exploratory Data Analysis"
#(Aug 2014 session).
#
#script assumes that code file load_data.R is in the
#working directory.
#
#png graphics file is written to working directory
source("load_data.R")
writeLines("creating plot3.png...")
png("plot3.png", width=480, height=480, units="px")
with(powerdata, {
plot(datetime,
Sub_metering_1,
type="l",
col="black",
xlab="",
ylab="Energy sub metering")
lines(datetime,
Sub_metering_2,
col="red")
lines(datetime,
Sub_metering_3,
col="blue")
legend("topright",
legend=c("Sub_metering_1",
"Sub_metering_2",
"Sub_metering_3"),
lty=1,
col=c("black","red","blue"))
})
dev.off()
writeLines("...plot complete") |
complete <- function(directory, id = 1:332) {
final.data <- data.frame()
for (i in id)
{
file.Path <- paste(getwd(), "/", directory, "/",formatC(i, width = 3, flag= "0" ), ".csv" ,sep="")
rawData <- read.csv(file.Path)
complete.Cases <- rawData[complete.cases(rawData),]
one.row <- c(i,nrow(complete.Cases))
final.data <- rbind(final.data, one.row)
}
colnames(final.data) <- c('id', 'nobs')
final.data
# http://rstudio-pubs-static.s3.amazonaws.com/1938_14f57c0817674c85ac1df70f6ffcf8a3.html
# https://gist.github.com/timmyshen/6872633
# https://rpubs.com/SatoshiLiang/16516
} | /Data Science/Coursera/R Programming Language/Week 2/complete.R | no_license | JoseMFdez-Econ/Work-Files | R | false | false | 846 | r | complete <- function(directory, id = 1:332) {
final.data <- data.frame()
for (i in id)
{
file.Path <- paste(getwd(), "/", directory, "/",formatC(i, width = 3, flag= "0" ), ".csv" ,sep="")
rawData <- read.csv(file.Path)
complete.Cases <- rawData[complete.cases(rawData),]
one.row <- c(i,nrow(complete.Cases))
final.data <- rbind(final.data, one.row)
}
colnames(final.data) <- c('id', 'nobs')
final.data
# http://rstudio-pubs-static.s3.amazonaws.com/1938_14f57c0817674c85ac1df70f6ffcf8a3.html
# https://gist.github.com/timmyshen/6872633
# https://rpubs.com/SatoshiLiang/16516
} |
## Vaccine Analysis Codes
library(data.table)
library(foreign)
library(fastmatch)
library(readstata13)
library(ggplot2)
library(scales)
library(lubridate)
library(zoo)
rm(list=ls())
setwd("/Users/ziao/Desktop/ALP301/Data")
data = as.data.table(read.csv("Academic_Survey_Research_in_Africa_2021_05_04_15_55_42.csv",
na.strings=c("","NA"), header = T))
names(data)
## consent = YES, 393 responses
data = data[consent_response %in% c("yes","Yes", "YES"),]
## got assignment a treatment
data = data[!is.na(treatment_format)]
## check treatment probabilities
summary(data$treatment_format)
summary(data$treatment_nudges)
data[treatment_format=="VID", treatmentX:="video"]
data[treatment_format=="MOG", treatmentX:="graphic"]
data[treatment_format=="TXT", treatmentX:="text"]
data[treatment_format=="IMG", treatmentX:=treatment_nudges]
data$treatmentX = as.factor(data$treatmentX)
summary(data$treatmentX)
## hist of treatment probabilities
table = as.data.frame(table(data$treatmentX))
names(table) = c("group", "count")
pic = ggplot(table, aes(x = group, y = count, width=.7)) +
geom_bar(stat="identity", position="dodge", fill="lightskyblue", colour="navy") + theme_light() +
xlab("\n Group") + ylab("Number of Respondents \n") +
labs(title = "Number of Respondents by Treatment Assignment (N=276)") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(label = c("concern", "control", "deliberation", "endorsement", "graphic", "real info",
"relatable", "safety others", "safety self", "text", "video")) +
theme(axis.text.x = element_text(angle = 60, vjust = 0.5, hjust=0.5))
ggsave("treatment_hist.pdf", pic, width = 7, height = 5, units = "in")
## look at attrition
data2 = data[!is.na(dv_send_post4),]
table2 = as.data.frame(table(data2$treatmentX))
names(table2) = c("group", "count")
attrition = merge(table, table2, by = "group")
attrition$remaining = attrition$count.y/ attrition$count.x
pic = ggplot(attrition, aes(x = group, y = remaining, width=.7)) +
geom_bar(stat="identity", position="dodge", fill="lightskyblue", colour="navy") + theme_light() +
xlab("\n Group") + ylab("Share of Remaining Respondents \n") +
labs(title = "Share of Remaining Respondents by Treatment Assignments (avg = 0.52)") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(label = c("concern", "control", "deliberation", "endorsement", "graphic", "real info",
"relatable", "safety others", "safety self", "text", "video")) +
theme(axis.text.x = element_text(angle = 60, vjust = 0.5, hjust=0.5))
ggsave("attrition_hist.pdf", pic, width = 7, height = 5, units = "in")
## look at pre-post mean difference
data3 = data2
data3[, pre:=ifelse(dv_send_pre1=="Yes",1,0)]
data3[, post:=ifelse(dv_send_post4=="Yes",1,0)]
data3[, diff:=post-pre]
summary(data3$diff)
table3 = as.data.frame(table(data3[diff==1,]$treatmentX))
names(table3) = c("group", "count")
pic = ggplot(table3, aes(x = group, y = count, width=.7)) +
geom_bar(stat="identity", position="dodge", fill="lightskyblue", colour="navy") + theme_light() +
xlab("\n Group") + ylab("Number of Respondents \n") +
labs(title = "Number of Respondents who Switched from Not Sharing to Sharing") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(label = c("concern", "control", "deliberation", "endorsement", "graphic", "real info",
"relatable", "safety others", "safety self", "text", "video")) +
theme(axis.text.x = element_text(angle = 60, vjust = 0.5, hjust=0.5))
ggsave("pos_change_post_hist.pdf", pic, width = 7, height = 5, units = "in")
table4 = as.data.frame(table(data3[diff==-1,]$treatmentX))
names(table4) = c("group", "count")
pic = ggplot(table4, aes(x = group, y = count, width=.7)) +
geom_bar(stat="identity", position="dodge", fill="indianred1", colour="indianred4") + theme_light() +
xlab("\n Group") + ylab("Number of Respondents \n") +
labs(title = "Number of Respondents who Switched from Sharing to Not Sharing") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(label = c("concern", "control", "deliberation", "endorsement", "graphic", "real info",
"relatable", "safety others", "safety self", "text", "video")) +
theme(axis.text.x = element_text(angle = 60, vjust = 0.5, hjust=0.5))
ggsave("neg_change_post_hist.pdf", pic, width = 7, height = 5, units = "in")
## look at pre-post mean difference
data3 = data2
data3[, pre:=ifelse(dv_timeline_pre1=="Yes",1,0)]
data3[, post:=ifelse(dv_timeline_post4=="Yes",1,0)]
data3[, diff:=post-pre]
summary(data3$diff)
table3 = as.data.frame(table(data3[diff==1,]$treatmentX))
names(table3) = c("group", "count")
pic = ggplot(table3, aes(x = group, y = count, width=.7)) +
geom_bar(stat="identity", position="dodge", fill="lightskyblue", colour="navy") + theme_light() +
xlab("\n Group") + ylab("Number of Respondents \n") +
labs(title = "Number of Respondents who Switched from Not Sharing to Sharing") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(label = c("concern", "control", "deliberation", "endorsement", "graphic", "real info",
"relatable", "safety others", "safety self", "text", "video")) +
theme(axis.text.x = element_text(angle = 60, vjust = 0.5, hjust=0.5))
ggsave("pos_change_timeline_hist.pdf", pic, width = 7, height = 5, units = "in")
table4 = as.data.frame(table(data3[diff==-1,]$treatmentX))
names(table4) = c("group", "count")
pic = ggplot(table4, aes(x = group, y = count, width=.7)) +
geom_bar(stat="identity", position="dodge", fill="indianred1", colour="indianred4") + theme_light() +
xlab("\n Group") + ylab("Number of Respondents \n") +
labs(title = "Number of Respondents who Switched from Sharing to Not Sharing") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(label = c("concern", "control", "deliberation", "endorsement", "graphic", "real info",
"relatable", "safety others", "safety self", "text", "video")) +
theme(axis.text.x = element_text(angle = 60, vjust = 0.5, hjust=0.5))
ggsave("neg_change_timeline_hist.pdf", pic, width = 7, height = 5, units = "in")
| /Vaccine_analysis_data_summary.R | no_license | alexjuziao/ALP301-spr21-project1-1 | R | false | false | 6,328 | r | ## Vaccine Analysis Codes
library(data.table)
library(foreign)
library(fastmatch)
library(readstata13)
library(ggplot2)
library(scales)
library(lubridate)
library(zoo)
rm(list=ls())
setwd("/Users/ziao/Desktop/ALP301/Data")
data = as.data.table(read.csv("Academic_Survey_Research_in_Africa_2021_05_04_15_55_42.csv",
na.strings=c("","NA"), header = T))
names(data)
## consent = YES, 393 responses
data = data[consent_response %in% c("yes","Yes", "YES"),]
## got assignment a treatment
data = data[!is.na(treatment_format)]
## check treatment probabilities
summary(data$treatment_format)
summary(data$treatment_nudges)
data[treatment_format=="VID", treatmentX:="video"]
data[treatment_format=="MOG", treatmentX:="graphic"]
data[treatment_format=="TXT", treatmentX:="text"]
data[treatment_format=="IMG", treatmentX:=treatment_nudges]
data$treatmentX = as.factor(data$treatmentX)
summary(data$treatmentX)
## hist of treatment probabilities
table = as.data.frame(table(data$treatmentX))
names(table) = c("group", "count")
pic = ggplot(table, aes(x = group, y = count, width=.7)) +
geom_bar(stat="identity", position="dodge", fill="lightskyblue", colour="navy") + theme_light() +
xlab("\n Group") + ylab("Number of Respondents \n") +
labs(title = "Number of Respondents by Treatment Assignment (N=276)") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(label = c("concern", "control", "deliberation", "endorsement", "graphic", "real info",
"relatable", "safety others", "safety self", "text", "video")) +
theme(axis.text.x = element_text(angle = 60, vjust = 0.5, hjust=0.5))
ggsave("treatment_hist.pdf", pic, width = 7, height = 5, units = "in")
## look at attrition
data2 = data[!is.na(dv_send_post4),]
table2 = as.data.frame(table(data2$treatmentX))
names(table2) = c("group", "count")
attrition = merge(table, table2, by = "group")
attrition$remaining = attrition$count.y/ attrition$count.x
pic = ggplot(attrition, aes(x = group, y = remaining, width=.7)) +
geom_bar(stat="identity", position="dodge", fill="lightskyblue", colour="navy") + theme_light() +
xlab("\n Group") + ylab("Share of Remaining Respondents \n") +
labs(title = "Share of Remaining Respondents by Treatment Assignments (avg = 0.52)") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(label = c("concern", "control", "deliberation", "endorsement", "graphic", "real info",
"relatable", "safety others", "safety self", "text", "video")) +
theme(axis.text.x = element_text(angle = 60, vjust = 0.5, hjust=0.5))
ggsave("attrition_hist.pdf", pic, width = 7, height = 5, units = "in")
## look at pre-post mean difference
data3 = data2
data3[, pre:=ifelse(dv_send_pre1=="Yes",1,0)]
data3[, post:=ifelse(dv_send_post4=="Yes",1,0)]
data3[, diff:=post-pre]
summary(data3$diff)
table3 = as.data.frame(table(data3[diff==1,]$treatmentX))
names(table3) = c("group", "count")
pic = ggplot(table3, aes(x = group, y = count, width=.7)) +
geom_bar(stat="identity", position="dodge", fill="lightskyblue", colour="navy") + theme_light() +
xlab("\n Group") + ylab("Number of Respondents \n") +
labs(title = "Number of Respondents who Switched from Not Sharing to Sharing") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(label = c("concern", "control", "deliberation", "endorsement", "graphic", "real info",
"relatable", "safety others", "safety self", "text", "video")) +
theme(axis.text.x = element_text(angle = 60, vjust = 0.5, hjust=0.5))
ggsave("pos_change_post_hist.pdf", pic, width = 7, height = 5, units = "in")
table4 = as.data.frame(table(data3[diff==-1,]$treatmentX))
names(table4) = c("group", "count")
pic = ggplot(table4, aes(x = group, y = count, width=.7)) +
geom_bar(stat="identity", position="dodge", fill="indianred1", colour="indianred4") + theme_light() +
xlab("\n Group") + ylab("Number of Respondents \n") +
labs(title = "Number of Respondents who Switched from Sharing to Not Sharing") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(label = c("concern", "control", "deliberation", "endorsement", "graphic", "real info",
"relatable", "safety others", "safety self", "text", "video")) +
theme(axis.text.x = element_text(angle = 60, vjust = 0.5, hjust=0.5))
ggsave("neg_change_post_hist.pdf", pic, width = 7, height = 5, units = "in")
## look at pre-post mean difference
data3 = data2
data3[, pre:=ifelse(dv_timeline_pre1=="Yes",1,0)]
data3[, post:=ifelse(dv_timeline_post4=="Yes",1,0)]
data3[, diff:=post-pre]
summary(data3$diff)
table3 = as.data.frame(table(data3[diff==1,]$treatmentX))
names(table3) = c("group", "count")
pic = ggplot(table3, aes(x = group, y = count, width=.7)) +
geom_bar(stat="identity", position="dodge", fill="lightskyblue", colour="navy") + theme_light() +
xlab("\n Group") + ylab("Number of Respondents \n") +
labs(title = "Number of Respondents who Switched from Not Sharing to Sharing") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(label = c("concern", "control", "deliberation", "endorsement", "graphic", "real info",
"relatable", "safety others", "safety self", "text", "video")) +
theme(axis.text.x = element_text(angle = 60, vjust = 0.5, hjust=0.5))
ggsave("pos_change_timeline_hist.pdf", pic, width = 7, height = 5, units = "in")
table4 = as.data.frame(table(data3[diff==-1,]$treatmentX))
names(table4) = c("group", "count")
pic = ggplot(table4, aes(x = group, y = count, width=.7)) +
geom_bar(stat="identity", position="dodge", fill="indianred1", colour="indianred4") + theme_light() +
xlab("\n Group") + ylab("Number of Respondents \n") +
labs(title = "Number of Respondents who Switched from Sharing to Not Sharing") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(label = c("concern", "control", "deliberation", "endorsement", "graphic", "real info",
"relatable", "safety others", "safety self", "text", "video")) +
theme(axis.text.x = element_text(angle = 60, vjust = 0.5, hjust=0.5))
ggsave("neg_change_timeline_hist.pdf", pic, width = 7, height = 5, units = "in")
|
# Making sure that making covariance functions into Rcpp functions
# still gives correct results.
# Exponential kernel
set.seed(0)
n <- 20
x <- matrix(seq(0,1,length.out = n), ncol=1)
f <- Vectorize(function(x) {sin(2*pi*x) + .5*sin(4*pi*x) +rnorm(1,0,.3)})
y <- f(x) #sin(2*pi*x) #+ rnorm(n,0,1e-1)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Exponential$new(1), parallel=FALSE, verbose=10, nug.est=T))
# .89 sec
system.time(gp$cool1Dplot()) # .42 sec
gp$predict(.656) # -0.6040612
gp$predict(c(.11, .24, .455, .676, .888)) # 1.5120375, 0.8360396, 0.4850529, -0.6252635, -1.3454632
gp$predict(matrix(c(.11, .24, .455, .676, .888), ncol=1))
set.seed(0)
n <- 200
x <- matrix(runif(6*n), ncol=6)
y <- TestFunctions::OTL_Circuit(x)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Exponential, parallel=FALSE, verbose=10, nug.est=T))
# 19.68 / 20.28 s
system.time(gp$predict(x+.01)) # .43 sec
system.time(gp$predict(x+.01, covmat = T)) # .72 sec
gp$predict(matrix(c(.1,.2,.3,.4,.5,.6), ncol=6)) # 5.577286
# Matern 3/2 kernel
set.seed(0)
n <- 20
x <- matrix(seq(0,1,length.out = n), ncol=1)
f <- Vectorize(function(x) {sin(2*pi*x) + .5*sin(4*pi*x) +rnorm(1,0,.3)})
y <- f(x) #sin(2*pi*x) #+ rnorm(n,0,1e-1)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Matern32, parallel=FALSE, verbose=10, nug.est=T))
# 1.73 sec
system.time(gp$cool1Dplot()) # .55 sec
gp$predict(.656) # -0.6063402
gp$predict(c(.11, .24, .455, .676, .888)) # 1.4436862 0.8492838 0.4596046 -0.6550763 -1.2473287
gp$predict(matrix(c(.11, .24, .455, .676, .888), ncol=1))
set.seed(0)
n <- 200
x <- matrix(runif(6*n), ncol=6)
y <- TestFunctions::OTL_Circuit(x)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Matern32, parallel=FALSE, verbose=10, nug.est=T))
# 29.31 / 30.49 s
system.time(gp$predict(x+.01)) # .65 sec
system.time(gp$predict(x+.01, covmat = T)) # 1.15 sec
gp$predict(matrix(c(.1,.2,.3,.4,.5,.6), ncol=6)) # 5.646576
# Matern 5/2 kernel
set.seed(0)
n <- 20
x <- matrix(seq(0,1,length.out = n), ncol=1)
f <- Vectorize(function(x) {sin(2*pi*x) + .5*sin(4*pi*x) +rnorm(1,0,.3)})
y <- f(x) #sin(2*pi*x) #+ rnorm(n,0,1e-1)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Matern52, parallel=FALSE, verbose=10, nug.est=T))
# 1.59 sec
system.time(gp$cool1Dplot()) # .56 sec
gp$predict(.656) # -0.616631
gp$predict(c(.11, .24, .455, .676, .888)) # 1.4023642 0.8733849 0.4285692 -0.6816842-1.1858629
gp$predict(matrix(c(.11, .24, .455, .676, .888), ncol=1))
set.seed(0)
n <- 200
x <- matrix(runif(6*n), ncol=6)
y <- TestFunctions::OTL_Circuit(x)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Matern52, parallel=FALSE, verbose=10, nug.est=T))
# 24.51 / 25.66 s
system.time(gp$predict(x+.01)) # .68 sec
system.time(gp$predict(x+.01, covmat = T)) # 1.02 sec
gp$predict(matrix(c(.1,.2,.3,.4,.5,.6), ncol=6)) # 5.526564
# Gaussian kernel
set.seed(0)
n <- 20
x <- matrix(seq(0,1,length.out = n), ncol=1)
f <- Vectorize(function(x) {sin(2*pi*x) + .5*sin(4*pi*x) +rnorm(1,0,.3)})
y <- f(x) #sin(2*pi*x) #+ rnorm(n,0,1e-1)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Gaussian, parallel=FALSE, verbose=10, nug.est=T))
# .45 sec
system.time(gp$cool1Dplot()) # 05 sec
gp$predict(.656) # -0.6367818
gp$predict(c(.11, .24, .455, .676, .888)) # 1.3779479 0.9186582 0.4100991 -0.7215350 -1.1539650
gp$predict(matrix(c(.11, .24, .455, .676, .888), ncol=1))
set.seed(0)
n <- 200
x <- matrix(runif(6*n), ncol=6)
y <- TestFunctions::OTL_Circuit(x)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Gaussian, parallel=FALSE, verbose=10, nug.est=T))
# 5.55/5.77 s
system.time(gp$predict(x+.01)) # 0 sec
system.time(gp$predict(x+.01, covmat = T)) # .02 sec
gp$predict(matrix(c(.1,.2,.3,.4,.5,.6), ncol=6)) # 5.548369
# Test Rcpp kernel_gauss_dC
set.seed(0)
n <- 20
x <- matrix(seq(0,1,length.out = n), ncol=1)
f <- Vectorize(function(x) {sin(2*pi*x) + .5*sin(4*pi*x) +rnorm(1,0,.3)})
y <- f(x) #sin(2*pi*x) #+ rnorm(n,0,1e-1)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Gaussian, parallel=FALSE, verbose=10, nug.est=T))
debugonce(gp$kernel$C_dC_dparams)
gp$update()
| /scratch/scratch_kernels_rcpp.R | no_license | CollinErickson/GauPro | R | false | false | 4,158 | r | # Making sure that making covariance functions into Rcpp functions
# still gives correct results.
# Exponential kernel
set.seed(0)
n <- 20
x <- matrix(seq(0,1,length.out = n), ncol=1)
f <- Vectorize(function(x) {sin(2*pi*x) + .5*sin(4*pi*x) +rnorm(1,0,.3)})
y <- f(x) #sin(2*pi*x) #+ rnorm(n,0,1e-1)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Exponential$new(1), parallel=FALSE, verbose=10, nug.est=T))
# .89 sec
system.time(gp$cool1Dplot()) # .42 sec
gp$predict(.656) # -0.6040612
gp$predict(c(.11, .24, .455, .676, .888)) # 1.5120375, 0.8360396, 0.4850529, -0.6252635, -1.3454632
gp$predict(matrix(c(.11, .24, .455, .676, .888), ncol=1))
set.seed(0)
n <- 200
x <- matrix(runif(6*n), ncol=6)
y <- TestFunctions::OTL_Circuit(x)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Exponential, parallel=FALSE, verbose=10, nug.est=T))
# 19.68 / 20.28 s
system.time(gp$predict(x+.01)) # .43 sec
system.time(gp$predict(x+.01, covmat = T)) # .72 sec
gp$predict(matrix(c(.1,.2,.3,.4,.5,.6), ncol=6)) # 5.577286
# Matern 3/2 kernel
set.seed(0)
n <- 20
x <- matrix(seq(0,1,length.out = n), ncol=1)
f <- Vectorize(function(x) {sin(2*pi*x) + .5*sin(4*pi*x) +rnorm(1,0,.3)})
y <- f(x) #sin(2*pi*x) #+ rnorm(n,0,1e-1)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Matern32, parallel=FALSE, verbose=10, nug.est=T))
# 1.73 sec
system.time(gp$cool1Dplot()) # .55 sec
gp$predict(.656) # -0.6063402
gp$predict(c(.11, .24, .455, .676, .888)) # 1.4436862 0.8492838 0.4596046 -0.6550763 -1.2473287
gp$predict(matrix(c(.11, .24, .455, .676, .888), ncol=1))
set.seed(0)
n <- 200
x <- matrix(runif(6*n), ncol=6)
y <- TestFunctions::OTL_Circuit(x)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Matern32, parallel=FALSE, verbose=10, nug.est=T))
# 29.31 / 30.49 s
system.time(gp$predict(x+.01)) # .65 sec
system.time(gp$predict(x+.01, covmat = T)) # 1.15 sec
gp$predict(matrix(c(.1,.2,.3,.4,.5,.6), ncol=6)) # 5.646576
# Matern 5/2 kernel
set.seed(0)
n <- 20
x <- matrix(seq(0,1,length.out = n), ncol=1)
f <- Vectorize(function(x) {sin(2*pi*x) + .5*sin(4*pi*x) +rnorm(1,0,.3)})
y <- f(x) #sin(2*pi*x) #+ rnorm(n,0,1e-1)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Matern52, parallel=FALSE, verbose=10, nug.est=T))
# 1.59 sec
system.time(gp$cool1Dplot()) # .56 sec
gp$predict(.656) # -0.616631
gp$predict(c(.11, .24, .455, .676, .888)) # 1.4023642 0.8733849 0.4285692 -0.6816842-1.1858629
gp$predict(matrix(c(.11, .24, .455, .676, .888), ncol=1))
set.seed(0)
n <- 200
x <- matrix(runif(6*n), ncol=6)
y <- TestFunctions::OTL_Circuit(x)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Matern52, parallel=FALSE, verbose=10, nug.est=T))
# 24.51 / 25.66 s
system.time(gp$predict(x+.01)) # .68 sec
system.time(gp$predict(x+.01, covmat = T)) # 1.02 sec
gp$predict(matrix(c(.1,.2,.3,.4,.5,.6), ncol=6)) # 5.526564
# Gaussian kernel
set.seed(0)
n <- 20
x <- matrix(seq(0,1,length.out = n), ncol=1)
f <- Vectorize(function(x) {sin(2*pi*x) + .5*sin(4*pi*x) +rnorm(1,0,.3)})
y <- f(x) #sin(2*pi*x) #+ rnorm(n,0,1e-1)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Gaussian, parallel=FALSE, verbose=10, nug.est=T))
# .45 sec
system.time(gp$cool1Dplot()) # 05 sec
gp$predict(.656) # -0.6367818
gp$predict(c(.11, .24, .455, .676, .888)) # 1.3779479 0.9186582 0.4100991 -0.7215350 -1.1539650
gp$predict(matrix(c(.11, .24, .455, .676, .888), ncol=1))
set.seed(0)
n <- 200
x <- matrix(runif(6*n), ncol=6)
y <- TestFunctions::OTL_Circuit(x)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Gaussian, parallel=FALSE, verbose=10, nug.est=T))
# 5.55/5.77 s
system.time(gp$predict(x+.01)) # 0 sec
system.time(gp$predict(x+.01, covmat = T)) # .02 sec
gp$predict(matrix(c(.1,.2,.3,.4,.5,.6), ncol=6)) # 5.548369
# Test Rcpp kernel_gauss_dC
set.seed(0)
n <- 20
x <- matrix(seq(0,1,length.out = n), ncol=1)
f <- Vectorize(function(x) {sin(2*pi*x) + .5*sin(4*pi*x) +rnorm(1,0,.3)})
y <- f(x) #sin(2*pi*x) #+ rnorm(n,0,1e-1)
system.time(gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Gaussian, parallel=FALSE, verbose=10, nug.est=T))
debugonce(gp$kernel$C_dC_dparams)
gp$update()
|
###################################
#Continuous Uniform Distribution
uniform.summary=function(a,b,plotpdf=TRUE,plotcdf=TRUE)
{
if(a>=b){return("a must be smaller than b")}
if(a==-Inf |b==Inf|a==Inf|b==-Inf){return("a and b must be finite")}
mu=(a+b)/2
sigma2=(b-a)^2/12
sigma=sqrt(sigma2)
if(plotpdf==TRUE & plotcdf==FALSE)
{
s=seq(a,b,length=100)
plot(s,dunif(s,a,b),xlab="x",ylab="f(x)",type="l",ylim=c(0,1.1/(b-a)),xlim=c(a-(b-a)/10,b+(b-a)/10))
lines(seq(a-(b-a)/10,a,length=100),rep(0,100))
lines(seq(b,b+(b-a)/10,length=100),rep(0,100))
segments(a,0,a,1/(b-a),lty=3)
segments(b,0,b,1/(b-a),lty=3)
}
if(plotpdf==FALSE & plotcdf==TRUE)
{
s=seq(a-(b-a)/10,b+(b-a)/10,length=100)
plot(s,punif(s,a,b),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
if(plotpdf==TRUE & plotcdf==TRUE)
{
par(mfrow=c(2,1))
par(mar=c(4,4,.5,.1))
s=seq(a,b,length=100)
plot(s,dunif(s,a,b),xlab="x",ylab="f(x)",type="l",ylim=c(0,1.1/(b-a)),xlim=c(a-(b-a)/10,b+(b-a)/10))
lines(seq(a-(b-a)/10,a,length=100),rep(0,100))
lines(seq(b,b+(b-a)/10,length=100),rep(0,100))
segments(a,0,a,1/(b-a),lty=3)
segments(b,0,b,1/(b-a),lty=3)
s=seq(a-(b-a)/10,b+(b-a)/10,length=100)
plot(s,punif(s,a,b),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
par(mfrow=c(1,1))
return(list(mean=mu,variance=sigma2,standard.deviation=sigma))
}
uniform.prob=function(a,b,lb,ub)
{
if(a>=b){return("a must be smaller than b")}
if(a==-Inf |b==Inf|a==Inf|b==-Inf){return("a and b must be finite")}
if(ub<lb){return("lb must be smaller than ub!")}
return(punif(ub,a,b)-punif(lb,a,b))
}
uniform.quantile=function(a,b,q)
{
if(a>=b){return("a must be smaller than b")}
if(a==-Inf |b==Inf|a==Inf|b==-Inf){return("a and b must be finite")}
if(q<=0|q>=1){return("q must be between 0 and 1")}
return(qunif(q,a,b))
}
###################################
#Normal Distribution
normal.summary=function(mu,sigma,plotpdf=TRUE,plotcdf=TRUE)
{
if(abs(mu)==Inf | abs(sigma)==Inf){return("mu and sigma must be finite")}
if(sigma<=0){return("sigma must be positive")}
mu=mu
sigma2=sigma^2
sigma=sigma
if(plotpdf==TRUE & plotcdf==FALSE)
{
s=seq(mu-4*sigma,mu+4*sigma,length=200)
plot(s,dnorm(s,mu,sigma),xlab="x",ylab="f(x)",type="l")
}
if(plotpdf==FALSE & plotcdf==TRUE)
{
s=seq(mu-4*sigma,mu+4*sigma,length=200)
plot(s,pnorm(s,mu,sigma),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
if(plotpdf==TRUE & plotcdf==TRUE)
{
par(mfrow=c(2,1))
par(mar=c(4,4,.5,.1))
s=seq(mu-4*sigma,mu+4*sigma,length=200)
plot(s,dnorm(s,mu,sigma),xlab="x",ylab="f(x)",type="l")
plot(s,pnorm(s,mu,sigma),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
par(mfrow=c(1,1))
return(list(mean=mu,variance=sigma2,standard.deviation=sigma))
}
normal.prob=function(mu,sigma,lb,ub)
{
if(abs(mu)==Inf | abs(sigma)==Inf){return("mu and sigma must be finite")}
if(sigma<=0){return("sigma must be positive")}
if(ub<lb){return("lb must be smaller than ub!")}
return(pnorm(ub,mu,sigma)-pnorm(lb,mu,sigma))
}
normal.quantile=function(mu,sigma,q)
{
if(abs(mu)==Inf | abs(sigma)==Inf){return("mu and sigma must be finite")}
if(sigma<=0){return("sigma must be positive")}
if(q<=0|q>=1){return("q must be between 0 and 1")}
return(qnorm(q,mu,sigma))
}
###################################
#Exponential Distribution
exponential.summary=function(lambda,plotpdf=TRUE,plotcdf=TRUE)
{
if(abs(lambda)==Inf | lambda<=0){return("lambda must be a finite positive number")}
mu=1/lambda
sigma2=1/lambda^2
sigma=1/lambda
if(plotpdf==TRUE & plotcdf==FALSE)
{
s=seq(0,qexp(0.999,rate=lambda),length=100)
plot(s,lambda*exp(-lambda*s),xlab="x",ylab="f(x)",type="l")
}
if(plotpdf==FALSE & plotcdf==TRUE)
{
s=seq(0,qexp(0.999,rate=lambda),length=100)
plot(s,1-exp(-lambda*s),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
if(plotpdf==TRUE & plotcdf==TRUE)
{
par(mfrow=c(2,1))
par(mar=c(4,4,.5,.1))
s=seq(0,qexp(0.999,rate=lambda),length=100)
plot(s,lambda*exp(-lambda*s),xlab="x",ylab="f(x)",type="l")
plot(s,1-exp(-lambda*s),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
par(mfrow=c(1,1))
return(list(mean=mu,variance=sigma2,standard.deviation=sigma))
}
exponential.prob=function(lambda,lb,ub)
{
if(abs(lambda)==Inf | lambda<=0){return("lambda must be a finite positive number")}
if(ub<lb){return("lb must be smaller than ub!")}
if(lb>0){return(exp(-lambda*lb)-exp(-lambda*ub))}
if(ub<=0){return(0)}
if(lb<=0 & ub>0){return(1-exp(-lambda*ub))}
}
exponential.quantile=function(lambda,q)
{
if(abs(lambda)==Inf | lambda<=0){return("lambda must be a finite positive number")}
if(q<=0|q>=1){return("q must be between 0 and 1")}
return(-log(1-q)/lambda)
}
###################################
#Exponential Distribution
gamma.summary=function(r,lambda,plotpdf=TRUE,plotcdf=TRUE)
{
if(abs(lambda)==Inf | lambda<=0){return("lambda must be a finite positive number")}
if(abs(r)==Inf | r<=0){return("r must be a finite positive number")}
mu=r/lambda
sigma2=r/lambda^2
sigma=sqrt(sigma2)
if(plotpdf==TRUE & plotcdf==FALSE)
{
s=seq(0,qgamma(0.999,shape=r,scale=lambda),length=100)
plot(s,dgamma(s,shape=r,scale=1/lambda),xlab="x",ylab="f(x)",type="l")
}
if(plotpdf==FALSE & plotcdf==TRUE)
{
s=seq(0,qgamma(0.999,shape=r,scale=lambda),length=100)
plot(s,pgamma(s,shape=r,scale=1/lambda),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
if(plotpdf==TRUE & plotcdf==TRUE)
{
par(mfrow=c(2,1))
par(mar=c(4,4,.5,.1))
s=seq(0,qgamma(0.999,shape=r,scale=1/lambda),length=100)
plot(s,dgamma(s,shape=r,scale=1/lambda),xlab="x",ylab="f(x)",type="l")
plot(s,pgamma(s,shape=r,scale=1/lambda),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
par(mfrow=c(1,1))
return(list(mean=mu,variance=sigma2,standard.deviation=sigma))
}
gamma.prob=function(r,lambda,lb,ub)
{
if(abs(lambda)==Inf | lambda<=0){return("lambda must be a finite positive number")}
if(abs(r)==Inf | r<=0){return("r must be a finite positive number")}
if(ub<lb){return("lb must be smaller than ub!")}
return(pgamma(ub,shape=r,scale=1/lambda)-pgamma(lb,shape=r,scale=1/lambda))
}
gamma.quantile=function(r,lambda,q)
{
if(abs(lambda)==Inf | lambda<=0){return("lambda must be a finite positive number")}
if(abs(r)==Inf | r<=0){return("r must be a finite positive number")}
if(q<=0|q>=1){return("q must be between 0 and 1")}
return(qgamma(q,shape=r,scale=1/lambda))
}
###################################
#Weibull Distribution
weibull.summary=function(beta,delta,plotpdf=TRUE,plotcdf=TRUE)
{
if(abs(delta)==Inf | delta<=0){return("delta must be a finite positive number")}
if(abs(beta)==Inf | beta<=0){return("beta must be a finite positive number")}
mu=delta*gamma(1+1/beta)
sigma2=delta^2*(gamma(1+2/beta)-(gamma(1+1/beta))^2)
sigma=sqrt(sigma2)
if(plotpdf==TRUE & plotcdf==FALSE)
{
s=seq(0,qweibull(0.999,shape=beta,scale=delta),length=100)
plot(s,dweibull(s,shape=beta,scale=delta),xlab="x",ylab="f(x)",type="l")
}
if(plotpdf==FALSE & plotcdf==TRUE)
{
s=seq(0,qweibull(0.999,shape=beta,scale=delta),length=100)
plot(s,pweibull(s,shape=beta,scale=delta),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
if(plotpdf==TRUE & plotcdf==TRUE)
{
par(mfrow=c(2,1))
par(mar=c(4,4,.5,.1))
s=seq(0,qweibull(0.999,shape=beta,scale=delta),length=100)
plot(s,dweibull(s,shape=beta,scale=delta),xlab="x",ylab="f(x)",type="l")
plot(s,pweibull(s,shape=beta,scale=delta),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
par(mfrow=c(1,1))
return(list(mean=mu,variance=sigma2,standard.deviation=sigma))
}
weibull.prob=function(beta,delta,lb,ub)
{
if(abs(delta)==Inf | delta<=0){return("delta must be a finite positive number")}
if(abs(beta)==Inf | beta<=0){return("beta must be a finite positive number")}
if(ub<lb){return("lb must be smaller than ub!")}
return(pweibull(ub,shape=beta,scale=delta)-pweibull(lb,shape=beta,scale=delta))
}
weibull.quantile=function(beta,delta,q)
{
if(abs(delta)==Inf | delta<=0){return("delta must be a finite positive number")}
if(abs(beta)==Inf | beta<=0){return("beta must be a finite positive number")}
if(q<=0|q>=1){return("q must be between 0 and 1")}
return(qweibull(q,shape=beta,scale=delta))
}
###################################
#Lognormal Distribution
lognormal.summary=function(theta,omega,plotpdf=TRUE,plotcdf=TRUE)
{
if(abs(omega)==Inf | omega<=0){return("omega must be a finite positive number")}
if(abs(theta)==Inf){return("theta must be a finite number")}
mu=exp(theta+omega^2/2)
sigma2=exp(2*theta+omega^2)*(exp(omega^2)-1)
sigma=sqrt(sigma2)
if(plotpdf==TRUE & plotcdf==FALSE)
{
s=seq(0,qlnorm(0.99,theta,omega),length=1000)
plot(s,dlnorm(s,theta,omega),xlab="x",ylab="f(x)",type="l")
}
if(plotpdf==FALSE & plotcdf==TRUE)
{
s=seq(0,qlnorm(0.99,theta,omega),length=1000)
plot(s,plnorm(s,theta,omega),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
if(plotpdf==TRUE & plotcdf==TRUE)
{
par(mfrow=c(2,1))
par(mar=c(4,4,.5,.1))
s=seq(0,qlnorm(0.99,theta,omega),length=1000)
plot(s,dlnorm(s,theta,omega),xlab="x",ylab="f(x)",type="l")
plot(s,plnorm(s,theta,omega),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
par(mfrow=c(1,1))
return(list(mean=mu,variance=sigma2,standard.deviation=sigma))
}
lognormal.prob=function(theta,omega,lb,ub)
{
if(abs(omega)==Inf | omega<=0){return("omega must be a finite positive number")}
if(abs(theta)==Inf){return("theta must be a finite number")}
if(ub<lb){return("lb must be smaller than ub!")}
return(plnorm(ub,theta,omega)-plnorm(lb,theta,omega))
}
lognormal.quantile=function(theta,omega,q)
{
if(abs(omega)==Inf | omega<=0){return("omega must be a finite positive number")}
if(abs(theta)==Inf){return("theta must be a finite number")}
if(q<=0|q>=1){return("q must be between 0 and 1")}
return(qlnorm(q,theta,omega))
}
###################################
#Beta Distribution
beta.summary=function(alpha,beta,plotpdf=TRUE,plotcdf=TRUE)
{
if(abs(alpha)==Inf | alpha<=0){return("alpha must be a finite positive number")}
if(abs(beta)==Inf | beta<=0){return("beta must be a finite positive number")}
mu=alpha/(alpha+beta)
sigma2=alpha*beta/(alpha+beta)^2/(alpha+beta+1)
sigma=sqrt(sigma2)
if(plotpdf==TRUE & plotcdf==FALSE)
{
s=seq(0,1,length=1000)
y=dbeta(s,alpha,beta)
plot(s,y,xlab="x",ylab="f(x)",type="l",xlim=c(-.15,1.15),ylim=c(0,max(y)+0.02))
lines(seq(-.15,0,length=100),rep(0,100))
lines(seq(1,1.15,length=100),rep(0,100))
segments(0,0,0,y[1],lty=3)
segments(1,0,1,y[length(y)],lty=3)
}
if(plotpdf==FALSE & plotcdf==TRUE)
{
s=seq(-.15,1.15,length=1500)
plot(s,pbeta(s,alpha,beta),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
if(plotpdf==TRUE & plotcdf==TRUE)
{
par(mfrow=c(2,1))
par(mar=c(4,4,.5,.1))
s=seq(0,1,length=1000)
y=dbeta(s,alpha,beta)
plot(s,y,xlab="x",ylab="f(x)",type="l",xlim=c(-.15,1.15),ylim=c(0,max(y)+0.02))
lines(seq(-.15,0,length=100),rep(0,100))
lines(seq(1,1.15,length=100),rep(0,100))
segments(0,0,0,y[1],lty=3)
segments(1,0,1,y[length(y)],lty=3)
s=seq(-.15,1.15,length=1500)
plot(s,pbeta(s,alpha,beta),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
par(mfrow=c(1,1))
return(list(mean=mu,variance=sigma2,standard.deviation=sigma))
}
beta.prob=function(alpha,beta,lb,ub)
{
if(abs(alpha)==Inf | alpha<=0){return("alpha must be a finite positive number")}
if(abs(beta)==Inf | beta<=0){return("beta must be a finite positive number")}
if(ub<lb){return("lb must be smaller than ub!")}
return(pbeta(ub,alpha,beta)-pbeta(lb,alpha,beta))
}
beta.quantile=function(alpha,beta,q)
{
if(abs(alpha)==Inf | alpha<=0){return("alpha must be a finite positive number")}
if(abs(beta)==Inf | beta<=0){return("beta must be a finite positive number")}
if(q<=0|q>=1){return("q must be between 0 and 1")}
return(qbeta(q,alpha,beta))
}
| /R/Continuous.R | no_license | bgrose/StatEngine | R | false | false | 12,062 | r | ###################################
#Continuous Uniform Distribution
uniform.summary=function(a,b,plotpdf=TRUE,plotcdf=TRUE)
{
if(a>=b){return("a must be smaller than b")}
if(a==-Inf |b==Inf|a==Inf|b==-Inf){return("a and b must be finite")}
mu=(a+b)/2
sigma2=(b-a)^2/12
sigma=sqrt(sigma2)
if(plotpdf==TRUE & plotcdf==FALSE)
{
s=seq(a,b,length=100)
plot(s,dunif(s,a,b),xlab="x",ylab="f(x)",type="l",ylim=c(0,1.1/(b-a)),xlim=c(a-(b-a)/10,b+(b-a)/10))
lines(seq(a-(b-a)/10,a,length=100),rep(0,100))
lines(seq(b,b+(b-a)/10,length=100),rep(0,100))
segments(a,0,a,1/(b-a),lty=3)
segments(b,0,b,1/(b-a),lty=3)
}
if(plotpdf==FALSE & plotcdf==TRUE)
{
s=seq(a-(b-a)/10,b+(b-a)/10,length=100)
plot(s,punif(s,a,b),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
if(plotpdf==TRUE & plotcdf==TRUE)
{
par(mfrow=c(2,1))
par(mar=c(4,4,.5,.1))
s=seq(a,b,length=100)
plot(s,dunif(s,a,b),xlab="x",ylab="f(x)",type="l",ylim=c(0,1.1/(b-a)),xlim=c(a-(b-a)/10,b+(b-a)/10))
lines(seq(a-(b-a)/10,a,length=100),rep(0,100))
lines(seq(b,b+(b-a)/10,length=100),rep(0,100))
segments(a,0,a,1/(b-a),lty=3)
segments(b,0,b,1/(b-a),lty=3)
s=seq(a-(b-a)/10,b+(b-a)/10,length=100)
plot(s,punif(s,a,b),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
par(mfrow=c(1,1))
return(list(mean=mu,variance=sigma2,standard.deviation=sigma))
}
uniform.prob=function(a,b,lb,ub)
{
if(a>=b){return("a must be smaller than b")}
if(a==-Inf |b==Inf|a==Inf|b==-Inf){return("a and b must be finite")}
if(ub<lb){return("lb must be smaller than ub!")}
return(punif(ub,a,b)-punif(lb,a,b))
}
uniform.quantile=function(a,b,q)
{
if(a>=b){return("a must be smaller than b")}
if(a==-Inf |b==Inf|a==Inf|b==-Inf){return("a and b must be finite")}
if(q<=0|q>=1){return("q must be between 0 and 1")}
return(qunif(q,a,b))
}
###################################
#Normal Distribution
normal.summary=function(mu,sigma,plotpdf=TRUE,plotcdf=TRUE)
{
if(abs(mu)==Inf | abs(sigma)==Inf){return("mu and sigma must be finite")}
if(sigma<=0){return("sigma must be positive")}
mu=mu
sigma2=sigma^2
sigma=sigma
if(plotpdf==TRUE & plotcdf==FALSE)
{
s=seq(mu-4*sigma,mu+4*sigma,length=200)
plot(s,dnorm(s,mu,sigma),xlab="x",ylab="f(x)",type="l")
}
if(plotpdf==FALSE & plotcdf==TRUE)
{
s=seq(mu-4*sigma,mu+4*sigma,length=200)
plot(s,pnorm(s,mu,sigma),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
if(plotpdf==TRUE & plotcdf==TRUE)
{
par(mfrow=c(2,1))
par(mar=c(4,4,.5,.1))
s=seq(mu-4*sigma,mu+4*sigma,length=200)
plot(s,dnorm(s,mu,sigma),xlab="x",ylab="f(x)",type="l")
plot(s,pnorm(s,mu,sigma),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
par(mfrow=c(1,1))
return(list(mean=mu,variance=sigma2,standard.deviation=sigma))
}
normal.prob=function(mu,sigma,lb,ub)
{
if(abs(mu)==Inf | abs(sigma)==Inf){return("mu and sigma must be finite")}
if(sigma<=0){return("sigma must be positive")}
if(ub<lb){return("lb must be smaller than ub!")}
return(pnorm(ub,mu,sigma)-pnorm(lb,mu,sigma))
}
normal.quantile=function(mu,sigma,q)
{
if(abs(mu)==Inf | abs(sigma)==Inf){return("mu and sigma must be finite")}
if(sigma<=0){return("sigma must be positive")}
if(q<=0|q>=1){return("q must be between 0 and 1")}
return(qnorm(q,mu,sigma))
}
###################################
#Exponential Distribution
exponential.summary=function(lambda,plotpdf=TRUE,plotcdf=TRUE)
{
if(abs(lambda)==Inf | lambda<=0){return("lambda must be a finite positive number")}
mu=1/lambda
sigma2=1/lambda^2
sigma=1/lambda
if(plotpdf==TRUE & plotcdf==FALSE)
{
s=seq(0,qexp(0.999,rate=lambda),length=100)
plot(s,lambda*exp(-lambda*s),xlab="x",ylab="f(x)",type="l")
}
if(plotpdf==FALSE & plotcdf==TRUE)
{
s=seq(0,qexp(0.999,rate=lambda),length=100)
plot(s,1-exp(-lambda*s),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
if(plotpdf==TRUE & plotcdf==TRUE)
{
par(mfrow=c(2,1))
par(mar=c(4,4,.5,.1))
s=seq(0,qexp(0.999,rate=lambda),length=100)
plot(s,lambda*exp(-lambda*s),xlab="x",ylab="f(x)",type="l")
plot(s,1-exp(-lambda*s),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
par(mfrow=c(1,1))
return(list(mean=mu,variance=sigma2,standard.deviation=sigma))
}
exponential.prob=function(lambda,lb,ub)
{
if(abs(lambda)==Inf | lambda<=0){return("lambda must be a finite positive number")}
if(ub<lb){return("lb must be smaller than ub!")}
if(lb>0){return(exp(-lambda*lb)-exp(-lambda*ub))}
if(ub<=0){return(0)}
if(lb<=0 & ub>0){return(1-exp(-lambda*ub))}
}
exponential.quantile=function(lambda,q)
{
if(abs(lambda)==Inf | lambda<=0){return("lambda must be a finite positive number")}
if(q<=0|q>=1){return("q must be between 0 and 1")}
return(-log(1-q)/lambda)
}
###################################
#Exponential Distribution
gamma.summary=function(r,lambda,plotpdf=TRUE,plotcdf=TRUE)
{
if(abs(lambda)==Inf | lambda<=0){return("lambda must be a finite positive number")}
if(abs(r)==Inf | r<=0){return("r must be a finite positive number")}
mu=r/lambda
sigma2=r/lambda^2
sigma=sqrt(sigma2)
if(plotpdf==TRUE & plotcdf==FALSE)
{
s=seq(0,qgamma(0.999,shape=r,scale=lambda),length=100)
plot(s,dgamma(s,shape=r,scale=1/lambda),xlab="x",ylab="f(x)",type="l")
}
if(plotpdf==FALSE & plotcdf==TRUE)
{
s=seq(0,qgamma(0.999,shape=r,scale=lambda),length=100)
plot(s,pgamma(s,shape=r,scale=1/lambda),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
if(plotpdf==TRUE & plotcdf==TRUE)
{
par(mfrow=c(2,1))
par(mar=c(4,4,.5,.1))
s=seq(0,qgamma(0.999,shape=r,scale=1/lambda),length=100)
plot(s,dgamma(s,shape=r,scale=1/lambda),xlab="x",ylab="f(x)",type="l")
plot(s,pgamma(s,shape=r,scale=1/lambda),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
par(mfrow=c(1,1))
return(list(mean=mu,variance=sigma2,standard.deviation=sigma))
}
gamma.prob=function(r,lambda,lb,ub)
{
if(abs(lambda)==Inf | lambda<=0){return("lambda must be a finite positive number")}
if(abs(r)==Inf | r<=0){return("r must be a finite positive number")}
if(ub<lb){return("lb must be smaller than ub!")}
return(pgamma(ub,shape=r,scale=1/lambda)-pgamma(lb,shape=r,scale=1/lambda))
}
gamma.quantile=function(r,lambda,q)
{
if(abs(lambda)==Inf | lambda<=0){return("lambda must be a finite positive number")}
if(abs(r)==Inf | r<=0){return("r must be a finite positive number")}
if(q<=0|q>=1){return("q must be between 0 and 1")}
return(qgamma(q,shape=r,scale=1/lambda))
}
###################################
#Weibull Distribution
weibull.summary=function(beta,delta,plotpdf=TRUE,plotcdf=TRUE)
{
if(abs(delta)==Inf | delta<=0){return("delta must be a finite positive number")}
if(abs(beta)==Inf | beta<=0){return("beta must be a finite positive number")}
mu=delta*gamma(1+1/beta)
sigma2=delta^2*(gamma(1+2/beta)-(gamma(1+1/beta))^2)
sigma=sqrt(sigma2)
if(plotpdf==TRUE & plotcdf==FALSE)
{
s=seq(0,qweibull(0.999,shape=beta,scale=delta),length=100)
plot(s,dweibull(s,shape=beta,scale=delta),xlab="x",ylab="f(x)",type="l")
}
if(plotpdf==FALSE & plotcdf==TRUE)
{
s=seq(0,qweibull(0.999,shape=beta,scale=delta),length=100)
plot(s,pweibull(s,shape=beta,scale=delta),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
if(plotpdf==TRUE & plotcdf==TRUE)
{
par(mfrow=c(2,1))
par(mar=c(4,4,.5,.1))
s=seq(0,qweibull(0.999,shape=beta,scale=delta),length=100)
plot(s,dweibull(s,shape=beta,scale=delta),xlab="x",ylab="f(x)",type="l")
plot(s,pweibull(s,shape=beta,scale=delta),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
par(mfrow=c(1,1))
return(list(mean=mu,variance=sigma2,standard.deviation=sigma))
}
weibull.prob=function(beta,delta,lb,ub)
{
if(abs(delta)==Inf | delta<=0){return("delta must be a finite positive number")}
if(abs(beta)==Inf | beta<=0){return("beta must be a finite positive number")}
if(ub<lb){return("lb must be smaller than ub!")}
return(pweibull(ub,shape=beta,scale=delta)-pweibull(lb,shape=beta,scale=delta))
}
weibull.quantile=function(beta,delta,q)
{
if(abs(delta)==Inf | delta<=0){return("delta must be a finite positive number")}
if(abs(beta)==Inf | beta<=0){return("beta must be a finite positive number")}
if(q<=0|q>=1){return("q must be between 0 and 1")}
return(qweibull(q,shape=beta,scale=delta))
}
###################################
#Lognormal Distribution
lognormal.summary=function(theta,omega,plotpdf=TRUE,plotcdf=TRUE)
{
if(abs(omega)==Inf | omega<=0){return("omega must be a finite positive number")}
if(abs(theta)==Inf){return("theta must be a finite number")}
mu=exp(theta+omega^2/2)
sigma2=exp(2*theta+omega^2)*(exp(omega^2)-1)
sigma=sqrt(sigma2)
if(plotpdf==TRUE & plotcdf==FALSE)
{
s=seq(0,qlnorm(0.99,theta,omega),length=1000)
plot(s,dlnorm(s,theta,omega),xlab="x",ylab="f(x)",type="l")
}
if(plotpdf==FALSE & plotcdf==TRUE)
{
s=seq(0,qlnorm(0.99,theta,omega),length=1000)
plot(s,plnorm(s,theta,omega),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
if(plotpdf==TRUE & plotcdf==TRUE)
{
par(mfrow=c(2,1))
par(mar=c(4,4,.5,.1))
s=seq(0,qlnorm(0.99,theta,omega),length=1000)
plot(s,dlnorm(s,theta,omega),xlab="x",ylab="f(x)",type="l")
plot(s,plnorm(s,theta,omega),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
par(mfrow=c(1,1))
return(list(mean=mu,variance=sigma2,standard.deviation=sigma))
}
lognormal.prob=function(theta,omega,lb,ub)
{
if(abs(omega)==Inf | omega<=0){return("omega must be a finite positive number")}
if(abs(theta)==Inf){return("theta must be a finite number")}
if(ub<lb){return("lb must be smaller than ub!")}
return(plnorm(ub,theta,omega)-plnorm(lb,theta,omega))
}
lognormal.quantile=function(theta,omega,q)
{
if(abs(omega)==Inf | omega<=0){return("omega must be a finite positive number")}
if(abs(theta)==Inf){return("theta must be a finite number")}
if(q<=0|q>=1){return("q must be between 0 and 1")}
return(qlnorm(q,theta,omega))
}
###################################
#Beta Distribution
beta.summary=function(alpha,beta,plotpdf=TRUE,plotcdf=TRUE)
{
if(abs(alpha)==Inf | alpha<=0){return("alpha must be a finite positive number")}
if(abs(beta)==Inf | beta<=0){return("beta must be a finite positive number")}
mu=alpha/(alpha+beta)
sigma2=alpha*beta/(alpha+beta)^2/(alpha+beta+1)
sigma=sqrt(sigma2)
if(plotpdf==TRUE & plotcdf==FALSE)
{
s=seq(0,1,length=1000)
y=dbeta(s,alpha,beta)
plot(s,y,xlab="x",ylab="f(x)",type="l",xlim=c(-.15,1.15),ylim=c(0,max(y)+0.02))
lines(seq(-.15,0,length=100),rep(0,100))
lines(seq(1,1.15,length=100),rep(0,100))
segments(0,0,0,y[1],lty=3)
segments(1,0,1,y[length(y)],lty=3)
}
if(plotpdf==FALSE & plotcdf==TRUE)
{
s=seq(-.15,1.15,length=1500)
plot(s,pbeta(s,alpha,beta),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
if(plotpdf==TRUE & plotcdf==TRUE)
{
par(mfrow=c(2,1))
par(mar=c(4,4,.5,.1))
s=seq(0,1,length=1000)
y=dbeta(s,alpha,beta)
plot(s,y,xlab="x",ylab="f(x)",type="l",xlim=c(-.15,1.15),ylim=c(0,max(y)+0.02))
lines(seq(-.15,0,length=100),rep(0,100))
lines(seq(1,1.15,length=100),rep(0,100))
segments(0,0,0,y[1],lty=3)
segments(1,0,1,y[length(y)],lty=3)
s=seq(-.15,1.15,length=1500)
plot(s,pbeta(s,alpha,beta),xlab="x",ylab="F(x)",type="l",ylim=c(0,1))
}
par(mfrow=c(1,1))
return(list(mean=mu,variance=sigma2,standard.deviation=sigma))
}
beta.prob=function(alpha,beta,lb,ub)
{
if(abs(alpha)==Inf | alpha<=0){return("alpha must be a finite positive number")}
if(abs(beta)==Inf | beta<=0){return("beta must be a finite positive number")}
if(ub<lb){return("lb must be smaller than ub!")}
return(pbeta(ub,alpha,beta)-pbeta(lb,alpha,beta))
}
beta.quantile=function(alpha,beta,q)
{
if(abs(alpha)==Inf | alpha<=0){return("alpha must be a finite positive number")}
if(abs(beta)==Inf | beta<=0){return("beta must be a finite positive number")}
if(q<=0|q>=1){return("q must be between 0 and 1")}
return(qbeta(q,alpha,beta))
}
|
library(shiny)
library(rqog)
library(dplyr)
library(tidyr)
library(shinycssloaders)
library(bslib)
library(metathis)
library(shinyWidgets)
library(glue)
valid_years <- 2016:as.integer(substr(Sys.Date(), 1, 4))
## get the names of all the data sets
dsets <- data(package = "rqog")$result[, c("Title", "Item")] %>% as_tibble()
val_dsets <- dsets$Item
names(val_dsets) <- gsub("Metadata for | Quality of Government institute", "", dsets$Title)
val_dsets < sort(names(val_dsets), decreasing = TRUE)
ui <- fluidPage(lang = "fi",
title = "rqog browser",
tags$head(tags$link(rel="shortcut icon", href="favicon.ico")),
meta() %>%
meta_description(description = "geofi-selain") %>%
meta_social(
title = "rqog browser",
description = "rqog browser: browse quality of government data in browser",
url = "",
image = "rqog_browser.png",
image_alt = "An image for social media cards",
twitter_creator = "@muuankarski",
twitter_card_type = "summary_large_image",
twitter_site = "@muuankarski"
),
theme = bslib::bs_theme(bootswatch = "cosmo",
# bg = "#0b3d91", fg = "white", primary = "#FCC780",
base_font = font_google("PT Sans"),
code_font = font_google("Space Mono")),
tags$html(HTML('<a class="sr-only sr-only-focusable" href="#maincontent">Skip to main</a>')),
tags$style(HTML("
.navbar-xyz {
background-color: rgb(255, 255, 255, .9);
border-bottom: 1px solid rgb(55, 55, 55, .4);
}
#map {
margin: auto;
}")),
tags$html(HTML('
<nav class="navbar navbar-light sticky-top navbar-xyz">
<a class="navbar-brand" role="brand" href = "https://ropengov.github.io/rqog/"><img src = "https://ropengov.github.io/rqog/reference/figures/logo.png" style = "height: 35px; padding-right: 0px;" alt = "Kompassi"></a>
<div class = "lead">rqog browser</div>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarResponsive" aria-controls="navbarResponsive" aria-expanded="false" aria-label="Avaa valikko">
<span class="navbar-toggler-icon"></span>
</button>
<div role = "navigation" class="collapse navbar-collapse justify-content-between" id="navbarResponsive">
<ul class="navbar-nav ml-auto">
<li class="nav-item">
<a class="nav-link" href="https://ropengov.github.io/rqog/">rqog-package</a>
</li>
<li class="nav-item">
<a class="nav-link" href="http://ropengov.org/">ropengov.org</a>
</li>
</ul>
</div>
</nav>')),
tags$html(HTML('<main id="maincontent">')),
tags$h2("", id = "alku"),
tags$div(class = "container",
fluidRow(column(3, class = "well",
tags$p(class = "lead", "Metadata, Data availability and code snippets for Quality of Government Institute Data"),
tags$p("This app is shipped with ",
tags$a(href = "https://ropengov.github.io/rqog",
tags$code("rqog")),
"R-package. App allows you to quickly check ",tags$a(href = "https://www.gu.se/en/quality-government/qog-data",
tags$code("The Quality of Government Institute")),
" metadata and completeness of variables and timeseries."),
tags$p("Analytical tools are available at ", tags$a(href = "https://www.gu.se/en/quality-government/qog-data/visualization-tools", "QoG-website"),"."),
tags$hr(),
tags$p("(C) Markus Kainu 2011-2023"),
tags$a(href = "https://github.com/rOpenGov/rqog/blob/master/inst/extras/rqog_app/app.R",
tags$code("Source code at Github"))
),
column(4,
selectInput("value_dataset",
"Pick dataset:",
choices = val_dsets,
selected = "2022 Standard Data - cross-sectional"),
pickerInput('value_variable',
label = "Pick variable:",
choices = NULL, multiple = TRUE),
tags$p("Data preview requires internet connection and takes some time."),
tags$p("Data preview is meant for checking the completeness of data only. For obtaining data, open R and use the R-code on top right!", HTML("↗")),
actionButton(inputId = "button",
label = "Preview data",
class="btn btn-outline-primary"
)
),
column(5,
tags$h4("R-code for obtaining the data"),
# tags$code("library(rqog)
# library(shiny)"),
# ,downloadButton("download_code", "Lataa R-koodi"),
verbatimTextOutput("output_code")
)),
tags$hr(),
fluidRow(column(5,
tags$h4("Metadata"),
tableOutput("meta_tbl")
),
column(7,
tags$h4("Data preview")
,uiOutput("ui_availability_tbl")
)
),
tags$div(style = "padding-bottom: 150px;")
)
)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
observeEvent(input$value_dataset, {
metad <- get(input$value_dataset)
df_vars <- distinct(metad, code,name)
val_vars <- df_vars$code
names(val_vars) <- df_vars$name
# updateSelectizeInput(session, inputId = 'value_variable',
# choices = val_vars,
# selected = val_vars[10],
# server = TRUE)
updatePickerInput(session, inputId = 'value_variable',
choices = val_vars,
selected = val_vars[10],
options = pickerOptions(liveSearch = TRUE,
actionsBox = TRUE)
# server = TRUE
)
})
output$meta_tbl <- renderTable({
metad <- get(input$value_dataset)
metad[metad$code %in% input$value_variable,]
})
funk <- eventReactive({
input$button
}, {
datavalue <- input$value_dataset
# datavalue <- "meta_basic_ts_2021"
datavalue_nchar <- nchar(datavalue)
val_year <- as.integer(substr(datavalue, datavalue_nchar-3, datavalue_nchar))
# define data name
if (grepl("basic", datavalue)){
data_name <- "basic"
} else if (grepl("std", datavalue)){
data_name <- "standard"
} else if (grepl("oecd", datavalue)){
data_name <- "oecd"
}
# define data type
if (grepl("ts", datavalue)){
data_type <- "time-series"
} else {
data_type <- "cross-sectional"
}
dtemp <- read_qog(which_data = data_name, data_type = data_type, year = val_year)
if (data_type == "cross-sectional"){
dtemp[,c("ccode","cname","version",input$value_variable)]
# dtemp[,c("ccode","cname","version","atop_number","bci_bci")]
} else {
dtemp2 <- dtemp[,c("ccode","cname","year","version",input$value_variable)]
# pivot_longer(dtemp2,
# names_to = "variable",
# values_to = "value",
# cols = 5:ncol(dtemp2)) %>%
# na.omit() %>%
# pivot_wider(names_from = year, values_from = value) %>%
arrange(dtemp2, cname, year)
}
}, ignoreNULL = TRUE)
output$availability_tbl <- renderTable({
funk()
})
output$ui_availability_tbl <- renderUI({
tagList(
div(style='height:520px; overflow-y: auto; overflow-x: auto;',
shinycssloaders::withSpinner(tableOutput("availability_tbl"))
)
)
})
create_code <- reactive({
req(input$value_dataset)
req(input$value_variable)
datavalue <- input$value_dataset
# datavalue <- "meta_basic_ts_2021"
datavalue_nchar <- nchar(datavalue)
val_year <- as.integer(substr(datavalue, datavalue_nchar-3, datavalue_nchar))
# define data name
if (grepl("basic", datavalue)){
data_name <- "basic"
} else if (grepl("std", datavalue)){
data_name <- "standard"
} else if (grepl("oecd", datavalue)){
data_name <- "oecd"
}
# define data type
if (grepl("ts", datavalue)){
data_type <- "time-series"
base_vars <- '"ccode","year","cname","version"'
} else {
data_type <- "cross-sectional"
base_vars <- '"ccode","cname","version"'
}
user_vars <- paste0(input$value_variable, collapse = '","')
code <- glue('
# remotes::install_github(""ropegov/rqog")
library(rqog)
df_qog <- read_qog(which_data = "{data_name}",
data_type = "{data_type}",
year = {val_year})
df_qog_subset <- df_qog[,c({base_vars},
"{user_vars}")]
head(df_qog_subset)')
return(code)
})
output$output_code <- renderText({
create_code()
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /inst/extras/rqog_app/app.R | permissive | rOpenGov/rqog | R | false | false | 10,298 | r | library(shiny)
library(rqog)
library(dplyr)
library(tidyr)
library(shinycssloaders)
library(bslib)
library(metathis)
library(shinyWidgets)
library(glue)
valid_years <- 2016:as.integer(substr(Sys.Date(), 1, 4))
## get the names of all the data sets
dsets <- data(package = "rqog")$result[, c("Title", "Item")] %>% as_tibble()
val_dsets <- dsets$Item
names(val_dsets) <- gsub("Metadata for | Quality of Government institute", "", dsets$Title)
val_dsets < sort(names(val_dsets), decreasing = TRUE)
ui <- fluidPage(lang = "fi",
title = "rqog browser",
tags$head(tags$link(rel="shortcut icon", href="favicon.ico")),
meta() %>%
meta_description(description = "geofi-selain") %>%
meta_social(
title = "rqog browser",
description = "rqog browser: browse quality of government data in browser",
url = "",
image = "rqog_browser.png",
image_alt = "An image for social media cards",
twitter_creator = "@muuankarski",
twitter_card_type = "summary_large_image",
twitter_site = "@muuankarski"
),
theme = bslib::bs_theme(bootswatch = "cosmo",
# bg = "#0b3d91", fg = "white", primary = "#FCC780",
base_font = font_google("PT Sans"),
code_font = font_google("Space Mono")),
tags$html(HTML('<a class="sr-only sr-only-focusable" href="#maincontent">Skip to main</a>')),
tags$style(HTML("
.navbar-xyz {
background-color: rgb(255, 255, 255, .9);
border-bottom: 1px solid rgb(55, 55, 55, .4);
}
#map {
margin: auto;
}")),
tags$html(HTML('
<nav class="navbar navbar-light sticky-top navbar-xyz">
<a class="navbar-brand" role="brand" href = "https://ropengov.github.io/rqog/"><img src = "https://ropengov.github.io/rqog/reference/figures/logo.png" style = "height: 35px; padding-right: 0px;" alt = "Kompassi"></a>
<div class = "lead">rqog browser</div>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarResponsive" aria-controls="navbarResponsive" aria-expanded="false" aria-label="Avaa valikko">
<span class="navbar-toggler-icon"></span>
</button>
<div role = "navigation" class="collapse navbar-collapse justify-content-between" id="navbarResponsive">
<ul class="navbar-nav ml-auto">
<li class="nav-item">
<a class="nav-link" href="https://ropengov.github.io/rqog/">rqog-package</a>
</li>
<li class="nav-item">
<a class="nav-link" href="http://ropengov.org/">ropengov.org</a>
</li>
</ul>
</div>
</nav>')),
tags$html(HTML('<main id="maincontent">')),
tags$h2("", id = "alku"),
tags$div(class = "container",
fluidRow(column(3, class = "well",
tags$p(class = "lead", "Metadata, Data availability and code snippets for Quality of Government Institute Data"),
tags$p("This app is shipped with ",
tags$a(href = "https://ropengov.github.io/rqog",
tags$code("rqog")),
"R-package. App allows you to quickly check ",tags$a(href = "https://www.gu.se/en/quality-government/qog-data",
tags$code("The Quality of Government Institute")),
" metadata and completeness of variables and timeseries."),
tags$p("Analytical tools are available at ", tags$a(href = "https://www.gu.se/en/quality-government/qog-data/visualization-tools", "QoG-website"),"."),
tags$hr(),
tags$p("(C) Markus Kainu 2011-2023"),
tags$a(href = "https://github.com/rOpenGov/rqog/blob/master/inst/extras/rqog_app/app.R",
tags$code("Source code at Github"))
),
column(4,
selectInput("value_dataset",
"Pick dataset:",
choices = val_dsets,
selected = "2022 Standard Data - cross-sectional"),
pickerInput('value_variable',
label = "Pick variable:",
choices = NULL, multiple = TRUE),
tags$p("Data preview requires internet connection and takes some time."),
tags$p("Data preview is meant for checking the completeness of data only. For obtaining data, open R and use the R-code on top right!", HTML("↗")),
actionButton(inputId = "button",
label = "Preview data",
class="btn btn-outline-primary"
)
),
column(5,
tags$h4("R-code for obtaining the data"),
# tags$code("library(rqog)
# library(shiny)"),
# ,downloadButton("download_code", "Lataa R-koodi"),
verbatimTextOutput("output_code")
)),
tags$hr(),
fluidRow(column(5,
tags$h4("Metadata"),
tableOutput("meta_tbl")
),
column(7,
tags$h4("Data preview")
,uiOutput("ui_availability_tbl")
)
),
tags$div(style = "padding-bottom: 150px;")
)
)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
observeEvent(input$value_dataset, {
metad <- get(input$value_dataset)
df_vars <- distinct(metad, code,name)
val_vars <- df_vars$code
names(val_vars) <- df_vars$name
# updateSelectizeInput(session, inputId = 'value_variable',
# choices = val_vars,
# selected = val_vars[10],
# server = TRUE)
updatePickerInput(session, inputId = 'value_variable',
choices = val_vars,
selected = val_vars[10],
options = pickerOptions(liveSearch = TRUE,
actionsBox = TRUE)
# server = TRUE
)
})
output$meta_tbl <- renderTable({
metad <- get(input$value_dataset)
metad[metad$code %in% input$value_variable,]
})
funk <- eventReactive({
input$button
}, {
datavalue <- input$value_dataset
# datavalue <- "meta_basic_ts_2021"
datavalue_nchar <- nchar(datavalue)
val_year <- as.integer(substr(datavalue, datavalue_nchar-3, datavalue_nchar))
# define data name
if (grepl("basic", datavalue)){
data_name <- "basic"
} else if (grepl("std", datavalue)){
data_name <- "standard"
} else if (grepl("oecd", datavalue)){
data_name <- "oecd"
}
# define data type
if (grepl("ts", datavalue)){
data_type <- "time-series"
} else {
data_type <- "cross-sectional"
}
dtemp <- read_qog(which_data = data_name, data_type = data_type, year = val_year)
if (data_type == "cross-sectional"){
dtemp[,c("ccode","cname","version",input$value_variable)]
# dtemp[,c("ccode","cname","version","atop_number","bci_bci")]
} else {
dtemp2 <- dtemp[,c("ccode","cname","year","version",input$value_variable)]
# pivot_longer(dtemp2,
# names_to = "variable",
# values_to = "value",
# cols = 5:ncol(dtemp2)) %>%
# na.omit() %>%
# pivot_wider(names_from = year, values_from = value) %>%
arrange(dtemp2, cname, year)
}
}, ignoreNULL = TRUE)
output$availability_tbl <- renderTable({
funk()
})
output$ui_availability_tbl <- renderUI({
tagList(
div(style='height:520px; overflow-y: auto; overflow-x: auto;',
shinycssloaders::withSpinner(tableOutput("availability_tbl"))
)
)
})
create_code <- reactive({
req(input$value_dataset)
req(input$value_variable)
datavalue <- input$value_dataset
# datavalue <- "meta_basic_ts_2021"
datavalue_nchar <- nchar(datavalue)
val_year <- as.integer(substr(datavalue, datavalue_nchar-3, datavalue_nchar))
# define data name
if (grepl("basic", datavalue)){
data_name <- "basic"
} else if (grepl("std", datavalue)){
data_name <- "standard"
} else if (grepl("oecd", datavalue)){
data_name <- "oecd"
}
# define data type
if (grepl("ts", datavalue)){
data_type <- "time-series"
base_vars <- '"ccode","year","cname","version"'
} else {
data_type <- "cross-sectional"
base_vars <- '"ccode","cname","version"'
}
user_vars <- paste0(input$value_variable, collapse = '","')
code <- glue('
# remotes::install_github(""ropegov/rqog")
library(rqog)
df_qog <- read_qog(which_data = "{data_name}",
data_type = "{data_type}",
year = {val_year})
df_qog_subset <- df_qog[,c({base_vars},
"{user_vars}")]
head(df_qog_subset)')
return(code)
})
output$output_code <- renderText({
create_code()
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
# Add any project specific configuration here.
add.config(
threads=6
)
| /lib/globals.R | no_license | joshbiology/pan-meyers-et-al | R | false | false | 80 | r | # Add any project specific configuration here.
add.config(
threads=6
)
|
################################################################################
### BIO 410/510 ###
### TRANSFORM: Rearranging data ###
################################################################################
## Another way to think about ggplot naming (from https://beanumber.github.io/sds192/lab-ggplot2.html)
# In ggplot2, aesthetic means “something you can see”. Each aesthetic is a mapping between a visual cue and a variable. Examples include:
#
# position (i.e., on the x and y axes)
# color (“outside” color)
# fill (“inside” color)
# shape (of points)
# line type
# size
#
# Each type of geom accepts only a subset of all aesthetics—refer to the geom help pages to see what mappings each geom accepts. Aesthetic mappings are set with the aes() function.
#### TODAY ####
## OBJECTIVES:
## To learn how manipulate data into a form useable for analysis and graphs.
## To do this in a way that each step is traceable and reproducible.
## To this end we'll be using the dplyr package.
## dplyr is in the tidyverse:
library(tidyverse)
########################
##1) Reading in the data
########################
## We will use a dataset of water temperature in Calispell Creek and its tributories from eastern Washington State.
## These type of data are ripe for for scripted analysis because their formats remain constant
## but graphs frequently need to be updated to reflect new data.
## Remember to set your working directory to where the file is!!!
rawdat <- read.csv("CalispellCreekandTributaryTemperatures.csv", stringsAsFactors = FALSE)
## QUESTION TO PONDER (EXTRA): What does stringsAsFactors mean? Why would we want to make it false?
## Let's assign more useable column names
names(rawdat) <- c("date", "time", "calispell_temp", "smalle_temp", "winchester_temp")
#################################
## 2) dplyr tool number 0: tbl_df
#################################
## The first step of working with data in dplyr is to load the data in what the package authors call
## a 'tibble'
## Use this code to create a new tibble called wtemp.
## Tibbles are similar to data frames but with some useful features: https://cran.r-project.org/web/packages/tibble/vignettes/tibble.html
wtemp <- as_tibble(rawdat)
## One of the best features is the printing
## Let’s see what is meant by this
wtemp
## REVIEW QUESTION AND PLAY (EXTRA): What class is wtemp? How many rows does wtemp have? How many columns?
## To reinforce how nice this is, print rawdat instead:
rawdat
## Ophf! To never see that again, let's remove rawdat from the workspace
rm(rawdat)
## Another way to get a tibble when you upload is to use the readr package, also in the tidyverse
rawdat_alt <- read_csv("CalispellCreekandTributaryTemperatures.csv")
# EXTRA QUESTION TO PONDER: why did we not need stringsAsFactors for this?
#################################
## 3) dplyr tool number 1: select
#################################
## Let's imagine that we are only intested in the temperature at the Calispell site
## select helps us to reduce the dataframe to just columns of interesting
select(wtemp, calispell_temp, date, time)
## QUESTION: Are the columns in the same order as wtemp?
## NOTE: We didn't have to type wtemp$date etc as we would outside of the tidyverse
## the select() function knows we are referring to wtemp.
## Recall that in R, the : operator is a compact way to create a sequence of numbers. For example:
5:20
## Normally this notation is just for numbers, but select() allows you to specify a sequence of columns this way.
## This can save a bunch of typing!
## TASK: Select date, time and calispell_temp using this notation
## Print the entire tibble again, to remember what it looks like.
## We can also specify the columns that we want to discard. Let's remove smalle_temp, winchester_temp that way:
select(wtemp, -smalle_temp, -winchester_temp)
## EXTRA TASK: Get that result a third way, by removing all columns from smalle_temp:winchester_temp.
## Be careful! select(wtemp, -smalle_temp:winchester_temp) doesn't do it...
#################################
## 3) dplyr tool number 2: filter
#################################
#Now that you know how to select a subset of columns using select(),
#a natural next question is “How do I select a subset of rows?”
#That’s where the filter() function comes in.
## I might be worried about high water temperatures.
## Let's filter the the dataframe table to only include data with temperature equal or greater than 15 C
filter(wtemp, calispell_temp >= 15)
## QUESTION: How many rows match this condition?
## We can also filter based on multiple conditions.
## For example, did the water get hot on the 4th of July, 2013? I want both conditions to be true:
filter(wtemp, calispell_temp >= 15, date == "7/4/13")
##And I can filter based on "or" - if any condition is true.
## For example, was water temp >=15 at any site?
filter(wtemp, calispell_temp >= 15 | smalle_temp >= 15 | winchester_temp >= 15)
##QUESTION: How many rows match this condition?
## Finally, we might want to only get the row which do not have missing data
## We can detect missing values with the is.na() function
## Try it out:
is.na(c(3,5, NA, 6))
## Now put an exclamation point (!) before is.na() to change all of the TRUEs to FALSEs and FALSEs to TRUEs
## This tells us what is NOT NA:
!is.na(c(3,5, NA, 6))
## NOTE: To see all possible unique values in a column, use the unique function:
unique(wtemp$calispell_temp)
## TASK: Time to put this all together. Please filter all of the rows of wtemp
## for which the value of calispell_temp is not NA.
## How many rows match this condition?
## EXTRA TASK: Please filter all the values of calispell_temp where the temp is greater or equal to 15, or is na
##################################
## 4) dplyr tool number 3: arrange
##################################
## Sometimes we want to order the rows of a dataset according to the values of a particular variable
## For example, let's order the dataframe by calispell_temp
arrange(wtemp, calispell_temp)
## QUESTION: What is the lowest temperature observed in Calispell Creek?
## But wait! We're more worried about high temperatures.
## To do the same, but in descending order, you have two options.
arrange(wtemp, -calispell_temp)
arrange(wtemp, desc(calispell_temp))
## And you can arrange by multiple variables.
## TASK: arrange the tibble by date (ascending) and smalle_temp (descending)
## EXTRA TASK: How could you use arrange() to sort all missing values to the start? (Hint: use is.na()).
##################################
## 5) dplyr tool number 4: mutate
##################################
## It’s common to create a new variable based on the value of one or more variables already in a dataset.
## The mutate() function does exactly this.
## I like that the data are all in C. But what if we want to talk to an "I'm not a scientist" politician about water temperature?
## We might want to convert it to F.
mutate(wtemp, calispell_temp_F = calispell_temp*9/5 + 32)
## To make our data more usable, we also might want to summarize data across time, or by month and year.
## The lubridate package helps a lot with this! Here is just a taste, but if you need to work with dates for your project check out the package.
## There is also a great swirl tutorial on how to use it.
## Let's load lubridate:
library(lubridate)
## TASK: Look at the lubridate help page. What do the functions with 'y' 'm' and 'd' (in various orders) do?
?lubridate
## Try it out:
mdy("1/13/09")
## Once dates are saved as date-time objects, we can extract information from them. Try it out.
## First, let's save the character string as a date-time object:
mydate <- mdy("1/13/09")
## Then extract the month and day:
month(mydate)
day(mydate)
##QUESTION: How would you extract the year from mydate?
## Let's use the mutate and mdy functions to create a variable called date2 that stores the date as a date-time object.
mutate(wtemp, date2 = mdy(date))
## Finally, we can use mutate to create several columns. For example, let's create date2, then create a column for month and year
mutate(wtemp, date2 = mdy(date), month = month(date2), year = year(date2))
## Let's go ahead and save those changes in an object called wtemp2 object:
wtemp2 <- mutate(wtemp, date2 = mdy(date), month = month(date2), year = year(date2))
## EXTRA TASKS (definitely do these!): There are a variety of useful creation functions. Using the documentation in 5.5, please:
## 1) Create a column that is the ranked values of calispell_temp
## 2) Create a column that is the mean value of calispell_temp (hint: you might need to add na.rm = T)
## 3) Create a column that is the amount that calispell_temp deviates from its mean
## 4) Create a column that is the log of smalle_temp
## 5) Create a column that is the difference in temperature between smalle and winchester
## TASK: Name two other creation functions and give a scenario in which you would use them
####################################
## 6) dplyr tool number 5: summarize
####################################
## Often we want to look at summarized as opposed to raw data.
## At a basic level, summarize will condense all rows of a variable into one, summarized value.
## For example, let's look at the mean water temperature at Calispell
summarize(wtemp2, avg_temp_calispell = mean(calispell_temp, na.rm = TRUE))
## QUESTION: What did na.rm = TRUE do?
## TASK: Can you use summarize to get the max value for the calispell_temp variable?
## QUESTION: Do you think this level of aggregation is very interesting?
###################################
## 6) dplyr tool number 6: group_by
###################################
## That last one was supposed to be a leading question. I don't think mean temperature is that insightful.
## I'm more interested in how temperature changes with month or year.
## If we add the group_by function, summarize will give us the requested value FOR EACH GROUP.
## First, let's create a new tibble that is equal to to wtemp2 but includes two grouping variables: month and year
wtemp_by_monthyear <- group_by(wtemp, month, year)
## QUESTION: Print wtemp and wtemp_by_monthyear. Can you see how they differ?
## Use summarize again, but this time on wtemp_by_month.
summarize(wtemp_by_monthyear, avg_temp_calispell= mean(calispell_temp, na.rm = TRUE))
## Whoa there are a lot of missing values...
## For this (and always) its good to do a count on the number of data points you are using
## TASK: Combine filter and summarize to get a count of the number of actual data points for calispell temp
| /06_dplyr/rearrange_code-abbrev.R | no_license | laurenmh/bio-data-course-2018 | R | false | false | 10,788 | r | ################################################################################
### BIO 410/510 ###
### TRANSFORM: Rearranging data ###
################################################################################
## Another way to think about ggplot naming (from https://beanumber.github.io/sds192/lab-ggplot2.html)
# In ggplot2, aesthetic means “something you can see”. Each aesthetic is a mapping between a visual cue and a variable. Examples include:
#
# position (i.e., on the x and y axes)
# color (“outside” color)
# fill (“inside” color)
# shape (of points)
# line type
# size
#
# Each type of geom accepts only a subset of all aesthetics—refer to the geom help pages to see what mappings each geom accepts. Aesthetic mappings are set with the aes() function.
#### TODAY ####
## OBJECTIVES:
## To learn how manipulate data into a form useable for analysis and graphs.
## To do this in a way that each step is traceable and reproducible.
## To this end we'll be using the dplyr package.
## dplyr is in the tidyverse:
library(tidyverse)
########################
##1) Reading in the data
########################
## We will use a dataset of water temperature in Calispell Creek and its tributories from eastern Washington State.
## These type of data are ripe for for scripted analysis because their formats remain constant
## but graphs frequently need to be updated to reflect new data.
## Remember to set your working directory to where the file is!!!
rawdat <- read.csv("CalispellCreekandTributaryTemperatures.csv", stringsAsFactors = FALSE)
## QUESTION TO PONDER (EXTRA): What does stringsAsFactors mean? Why would we want to make it false?
## Let's assign more useable column names
names(rawdat) <- c("date", "time", "calispell_temp", "smalle_temp", "winchester_temp")
#################################
## 2) dplyr tool number 0: tbl_df
#################################
## The first step of working with data in dplyr is to load the data in what the package authors call
## a 'tibble'
## Use this code to create a new tibble called wtemp.
## Tibbles are similar to data frames but with some useful features: https://cran.r-project.org/web/packages/tibble/vignettes/tibble.html
wtemp <- as_tibble(rawdat)
## One of the best features is the printing
## Let’s see what is meant by this
wtemp
## REVIEW QUESTION AND PLAY (EXTRA): What class is wtemp? How many rows does wtemp have? How many columns?
## To reinforce how nice this is, print rawdat instead:
rawdat
## Ophf! To never see that again, let's remove rawdat from the workspace
rm(rawdat)
## Another way to get a tibble when you upload is to use the readr package, also in the tidyverse
rawdat_alt <- read_csv("CalispellCreekandTributaryTemperatures.csv")
# EXTRA QUESTION TO PONDER: why did we not need stringsAsFactors for this?
#################################
## 3) dplyr tool number 1: select
#################################
## Let's imagine that we are only intested in the temperature at the Calispell site
## select helps us to reduce the dataframe to just columns of interesting
select(wtemp, calispell_temp, date, time)
## QUESTION: Are the columns in the same order as wtemp?
## NOTE: We didn't have to type wtemp$date etc as we would outside of the tidyverse
## the select() function knows we are referring to wtemp.
## Recall that in R, the : operator is a compact way to create a sequence of numbers. For example:
5:20
## Normally this notation is just for numbers, but select() allows you to specify a sequence of columns this way.
## This can save a bunch of typing!
## TASK: Select date, time and calispell_temp using this notation
## Print the entire tibble again, to remember what it looks like.
## We can also specify the columns that we want to discard. Let's remove smalle_temp, winchester_temp that way:
select(wtemp, -smalle_temp, -winchester_temp)
## EXTRA TASK: Get that result a third way, by removing all columns from smalle_temp:winchester_temp.
## Be careful! select(wtemp, -smalle_temp:winchester_temp) doesn't do it...
#################################
## 3) dplyr tool number 2: filter
#################################
#Now that you know how to select a subset of columns using select(),
#a natural next question is “How do I select a subset of rows?”
#That’s where the filter() function comes in.
## I might be worried about high water temperatures.
## Let's filter the the dataframe table to only include data with temperature equal or greater than 15 C
filter(wtemp, calispell_temp >= 15)
## QUESTION: How many rows match this condition?
## We can also filter based on multiple conditions.
## For example, did the water get hot on the 4th of July, 2013? I want both conditions to be true:
filter(wtemp, calispell_temp >= 15, date == "7/4/13")
##And I can filter based on "or" - if any condition is true.
## For example, was water temp >=15 at any site?
filter(wtemp, calispell_temp >= 15 | smalle_temp >= 15 | winchester_temp >= 15)
##QUESTION: How many rows match this condition?
## Finally, we might want to only get the row which do not have missing data
## We can detect missing values with the is.na() function
## Try it out:
is.na(c(3,5, NA, 6))
## Now put an exclamation point (!) before is.na() to change all of the TRUEs to FALSEs and FALSEs to TRUEs
## This tells us what is NOT NA:
!is.na(c(3,5, NA, 6))
## NOTE: To see all possible unique values in a column, use the unique function:
unique(wtemp$calispell_temp)
## TASK: Time to put this all together. Please filter all of the rows of wtemp
## for which the value of calispell_temp is not NA.
## How many rows match this condition?
## EXTRA TASK: Please filter all the values of calispell_temp where the temp is greater or equal to 15, or is na
##################################
## 4) dplyr tool number 3: arrange
##################################
## Sometimes we want to order the rows of a dataset according to the values of a particular variable
## For example, let's order the dataframe by calispell_temp
arrange(wtemp, calispell_temp)
## QUESTION: What is the lowest temperature observed in Calispell Creek?
## But wait! We're more worried about high temperatures.
## To do the same, but in descending order, you have two options.
arrange(wtemp, -calispell_temp)
arrange(wtemp, desc(calispell_temp))
## And you can arrange by multiple variables.
## TASK: arrange the tibble by date (ascending) and smalle_temp (descending)
## EXTRA TASK: How could you use arrange() to sort all missing values to the start? (Hint: use is.na()).
##################################
## 5) dplyr tool number 4: mutate
##################################
## It’s common to create a new variable based on the value of one or more variables already in a dataset.
## The mutate() function does exactly this.
## I like that the data are all in C. But what if we want to talk to an "I'm not a scientist" politician about water temperature?
## We might want to convert it to F.
mutate(wtemp, calispell_temp_F = calispell_temp*9/5 + 32)
## To make our data more usable, we also might want to summarize data across time, or by month and year.
## The lubridate package helps a lot with this! Here is just a taste, but if you need to work with dates for your project check out the package.
## There is also a great swirl tutorial on how to use it.
## Let's load lubridate:
library(lubridate)
## TASK: Look at the lubridate help page. What do the functions with 'y' 'm' and 'd' (in various orders) do?
?lubridate
## Try it out:
mdy("1/13/09")
## Once dates are saved as date-time objects, we can extract information from them. Try it out.
## First, let's save the character string as a date-time object:
mydate <- mdy("1/13/09")
## Then extract the month and day:
month(mydate)
day(mydate)
##QUESTION: How would you extract the year from mydate?
## Let's use the mutate and mdy functions to create a variable called date2 that stores the date as a date-time object.
mutate(wtemp, date2 = mdy(date))
## Finally, we can use mutate to create several columns. For example, let's create date2, then create a column for month and year
mutate(wtemp, date2 = mdy(date), month = month(date2), year = year(date2))
## Let's go ahead and save those changes in an object called wtemp2 object:
wtemp2 <- mutate(wtemp, date2 = mdy(date), month = month(date2), year = year(date2))
## EXTRA TASKS (definitely do these!): There are a variety of useful creation functions. Using the documentation in 5.5, please:
## 1) Create a column that is the ranked values of calispell_temp
## 2) Create a column that is the mean value of calispell_temp (hint: you might need to add na.rm = T)
## 3) Create a column that is the amount that calispell_temp deviates from its mean
## 4) Create a column that is the log of smalle_temp
## 5) Create a column that is the difference in temperature between smalle and winchester
## TASK: Name two other creation functions and give a scenario in which you would use them
####################################
## 6) dplyr tool number 5: summarize
####################################
## Often we want to look at summarized as opposed to raw data.
## At a basic level, summarize will condense all rows of a variable into one, summarized value.
## For example, let's look at the mean water temperature at Calispell
summarize(wtemp2, avg_temp_calispell = mean(calispell_temp, na.rm = TRUE))
## QUESTION: What did na.rm = TRUE do?
## TASK: Can you use summarize to get the max value for the calispell_temp variable?
## QUESTION: Do you think this level of aggregation is very interesting?
###################################
## 6) dplyr tool number 6: group_by
###################################
## That last one was supposed to be a leading question. I don't think mean temperature is that insightful.
## I'm more interested in how temperature changes with month or year.
## If we add the group_by function, summarize will give us the requested value FOR EACH GROUP.
## First, let's create a new tibble that is equal to to wtemp2 but includes two grouping variables: month and year
wtemp_by_monthyear <- group_by(wtemp, month, year)
## QUESTION: Print wtemp and wtemp_by_monthyear. Can you see how they differ?
## Use summarize again, but this time on wtemp_by_month.
summarize(wtemp_by_monthyear, avg_temp_calispell= mean(calispell_temp, na.rm = TRUE))
## Whoa there are a lot of missing values...
## For this (and always) its good to do a count on the number of data points you are using
## TASK: Combine filter and summarize to get a count of the number of actual data points for calispell temp
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# READ DATA
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
setwd(DD)
# select random samples from the whole training set
#-----------------------------------------------------------------------------------------
if ( !file.exists(paste("siteSamps_",Sets[d],".mat",sep="")) | !file.exists(paste("spSel_",Sets[d],".csv",sep="")) ) {
set.seed(7); randSamp300<-sample(1:600,300,replace=F)
set.seed(7); randSamp150<-sample(randSamp300,150,replace=F)
siteSamps<-list(randSamp150,randSamp300,1:600)
#siteSamps<-list(1:150,1:300,1:600)
names(siteSamps)<-c("sz150","sz300","full600")
# subsetting species present at least once in the large data set (300 sites)
absentSp<-list()
for (i in 1:3) {
y_tmp <- read.csv(paste("Yt_",i,"_",set_no,".csv",sep=""),header=FALSE)
y_tmp <- apply(y_tmp, 2, as.numeric)[randSamp300,]
absentSp[[i]] <- which(colSums(y_tmp)==0)
}
absentSp<-as.numeric(unlist(absentSp))
spSel<-1:ncol(y_tmp)
if (sum(absentSp)!=0) {
spSel<-spSel[-absentSp]
}
# save the samples
write.mat(siteSamps, filename=paste("siteSamps_",Sets[d],".mat",sep=""))
write.table(spSel, file=paste("spSel_",Sets[d],".csv",sep=""),sep=",",row.names=F,col.names=F)
save(siteSamps, file=paste("siteSamps_",Sets[d],".RData",sep=""))
save(spSel, file=paste("spSel_",Sets[d],".RData",sep=""))
} else {
load(file=paste("siteSamps_",Sets[d],".RData",sep=""))
load(file=paste("spSel_",Sets[d],".RData",sep=""))
}
samp <- siteSamps[[sz]]
# training
#-----------------------------------------------------------------------------------------
y_train <- list()
y_train_common <- list()
x_train <- list()
s_train <- list()
for (i in 1:3) {
y_train[[i]] <- read.csv(paste("Yt_",i,"_",set_no,".csv",sep=""),header=FALSE)
#y_train_full[[i]] <- apply(y_train[[i]], 2, as.numeric)
y_train[[i]] <- apply(y_train[[i]], 2, as.numeric)[samp,spSel]
common_sp <- which((colSums(y_train[[i]])/nrow(y_train[[i]])) >= 0.1)
y_train_common[[i]] <- y_train[[i]][,common_sp]
write.table(common_sp, file=paste("common_sp_d",i,"_",Sets[d],".csv",sep=""),sep=",",row.names=F,col.names=F)
x_train[[i]] <- as.matrix(read.csv(paste("Xt_",i,"_",set_no,".csv",sep=""),header=FALSE))
#x_train_full[[i]] <- apply(x_train[[i]], 2, as.numeric)
x_train[[i]] <- apply(x_train[[i]], 2, as.numeric)[samp,]
s_train[[i]] <- read.csv(paste("St_",i,"_",set_no,".csv",sep=""),header=FALSE)
#s_train_full[[i]] <- apply(s_train[[i]], 2, as.numeric)
s_train[[i]] <- apply(s_train[[i]], 2, as.numeric)[samp,]
colnames(s_train[[i]])<-paste('Rand',1:ncol(s_train[[i]]),sep='')
#colnames(s_train_full[[i]])<-paste('Rand',1:ncol(s_train_full[[i]]),sep='')
ncovar<-ncol(x_train[[i]])
for (k in 1:ncovar) {
x_train[[i]]<-cbind(x_train[[i]],x_train[[i]][,k]^2)
#x_train_full[[i]]<-cbind(x_train_full[[i]],x_train_full[[i]][,k]^2)
}
x_train[[i]]<-apply(x_train[[i]],2,scale)
x_train[[i]]<-cbind(1,x_train[[i]])
colnames(x_train[[i]])<-c('IC',paste('V',1:ncovar,sep=''),paste('V',1:ncovar,'_2',sep=''))
#x_train_full[[i]]<-apply(x_train_full[[i]],2,scale)
#x_train_full[[i]]<-cbind(1,x_train_full[[i]])
#colnames(x_train_full[[i]])<-c('IC',paste('V',1:ncovar,sep=''),paste('V',1:ncovar,'_2',sep=''))
}
# validation
#-----------------------------------------------------------------------------------------
y_valid<-list()
y_valid_common<-list()
x_valid<-list()
s_valid<-list()
for (i in 1:3) {
y_valid[[i]] <- read.csv(paste("Yv","_",i,"_",set_no,".csv",sep=""),header=FALSE)
y_valid[[i]] <- apply(y_valid[[i]], 2, as.numeric)[,spSel]
common_sp <- which((colSums(y_train[[i]])/nrow(y_train[[i]])) >= 0.1)
y_valid_common[[i]] <- y_valid[[i]][,common_sp]
x_valid[[i]] <- as.matrix(read.csv(paste("Xv","_",i,"_",set_no,".csv",sep=""),header=FALSE))
x_valid[[i]] <- apply(x_valid[[i]], 2, as.numeric)
s_valid[[i]] <- read.csv(paste("Sv","_",i,"_",set_no,".csv",sep=""),header=FALSE)
s_valid[[i]] <- apply(s_valid[[i]], 2, as.numeric)
colnames(s_valid[[i]])<-paste('Rand',1:ncol(s_valid[[i]]),sep='')
ncovar<-ncol(x_valid[[i]])
for (k in 1:ncovar) {
x_valid[[i]]<-cbind(x_valid[[i]],x_valid[[i]][,k]^2)
}
x_valid[[i]]<-apply(x_valid[[i]],2,scale)
x_valid[[i]]<-cbind(1,x_valid[[i]])
colnames(x_valid[[i]])<-c('IC',paste('V',1:ncovar,sep=''),paste('V',1:ncovar,'_2',sep=''))
}
# lists
#-----------------------------------------------------------------------------------------
DD_t <- list()
DD_v <- list()
DD_t_common <- list()
DD_v_common <- list()
for (i in 1:3) {
nsp <- ncol(y_train[[i]])
dd_t <- list()
for (j in 1:nsp) {
dd_t[[j]] <- data.frame(cbind(y_train[[i]][,j],x_train[[i]],s_train[[i]]))
colnames(dd_t[[j]]) <- c('sp',colnames(x_train[[i]]),colnames(s_train[[i]]))
}
dd_v <- list()
for (j in 1:nsp) {
dd_v[[j]] <- data.frame(cbind(y_valid[[i]][,j],x_valid[[i]],s_valid[[i]]))
colnames(dd_v[[j]]) <- c('sp',colnames(x_valid[[i]]),colnames(s_valid[[i]]))
}
DD_t[[i]]<-dd_t
DD_v[[i]]<-dd_v
nsp_common <- ncol(y_train_common[[i]])
dd_t_common <- list()
for (j in 1:nsp_common) {
dd_t_common[[j]] <- data.frame(cbind(y_train_common[[i]][,j],x_train[[i]],s_train[[i]]))
colnames(dd_t_common[[j]]) <- c('sp',colnames(x_train[[i]]),colnames(s_train[[i]]))
}
dd_v_common <- list()
for (j in 1:nsp_common) {
dd_v_common[[j]] <- data.frame(cbind(y_valid_common[[i]][,j],x_valid[[i]],s_valid[[i]]))
colnames(dd_v_common[[j]]) <- c('sp',colnames(x_valid[[i]]),colnames(s_valid[[i]]))
}
DD_t_common[[i]]<-dd_t_common
DD_v_common[[i]]<-dd_v_common
}
#-----------------------------------------------------------------------------------------
if (commSP) {
y_train <- y_train_common
y_valid <- y_valid_common
DD_t <- DD_t_common
DD_v <- DD_v_common
}
setwd(WD)
| /SCRIPTS/read.data.r | no_license | davan690/SDM-comparison | R | false | false | 6,072 | r | #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# READ DATA
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
setwd(DD)
# select random samples from the whole training set
#-----------------------------------------------------------------------------------------
if ( !file.exists(paste("siteSamps_",Sets[d],".mat",sep="")) | !file.exists(paste("spSel_",Sets[d],".csv",sep="")) ) {
set.seed(7); randSamp300<-sample(1:600,300,replace=F)
set.seed(7); randSamp150<-sample(randSamp300,150,replace=F)
siteSamps<-list(randSamp150,randSamp300,1:600)
#siteSamps<-list(1:150,1:300,1:600)
names(siteSamps)<-c("sz150","sz300","full600")
# subsetting species present at least once in the large data set (300 sites)
absentSp<-list()
for (i in 1:3) {
y_tmp <- read.csv(paste("Yt_",i,"_",set_no,".csv",sep=""),header=FALSE)
y_tmp <- apply(y_tmp, 2, as.numeric)[randSamp300,]
absentSp[[i]] <- which(colSums(y_tmp)==0)
}
absentSp<-as.numeric(unlist(absentSp))
spSel<-1:ncol(y_tmp)
if (sum(absentSp)!=0) {
spSel<-spSel[-absentSp]
}
# save the samples
write.mat(siteSamps, filename=paste("siteSamps_",Sets[d],".mat",sep=""))
write.table(spSel, file=paste("spSel_",Sets[d],".csv",sep=""),sep=",",row.names=F,col.names=F)
save(siteSamps, file=paste("siteSamps_",Sets[d],".RData",sep=""))
save(spSel, file=paste("spSel_",Sets[d],".RData",sep=""))
} else {
load(file=paste("siteSamps_",Sets[d],".RData",sep=""))
load(file=paste("spSel_",Sets[d],".RData",sep=""))
}
samp <- siteSamps[[sz]]
# training
#-----------------------------------------------------------------------------------------
y_train <- list()
y_train_common <- list()
x_train <- list()
s_train <- list()
for (i in 1:3) {
y_train[[i]] <- read.csv(paste("Yt_",i,"_",set_no,".csv",sep=""),header=FALSE)
#y_train_full[[i]] <- apply(y_train[[i]], 2, as.numeric)
y_train[[i]] <- apply(y_train[[i]], 2, as.numeric)[samp,spSel]
common_sp <- which((colSums(y_train[[i]])/nrow(y_train[[i]])) >= 0.1)
y_train_common[[i]] <- y_train[[i]][,common_sp]
write.table(common_sp, file=paste("common_sp_d",i,"_",Sets[d],".csv",sep=""),sep=",",row.names=F,col.names=F)
x_train[[i]] <- as.matrix(read.csv(paste("Xt_",i,"_",set_no,".csv",sep=""),header=FALSE))
#x_train_full[[i]] <- apply(x_train[[i]], 2, as.numeric)
x_train[[i]] <- apply(x_train[[i]], 2, as.numeric)[samp,]
s_train[[i]] <- read.csv(paste("St_",i,"_",set_no,".csv",sep=""),header=FALSE)
#s_train_full[[i]] <- apply(s_train[[i]], 2, as.numeric)
s_train[[i]] <- apply(s_train[[i]], 2, as.numeric)[samp,]
colnames(s_train[[i]])<-paste('Rand',1:ncol(s_train[[i]]),sep='')
#colnames(s_train_full[[i]])<-paste('Rand',1:ncol(s_train_full[[i]]),sep='')
ncovar<-ncol(x_train[[i]])
for (k in 1:ncovar) {
x_train[[i]]<-cbind(x_train[[i]],x_train[[i]][,k]^2)
#x_train_full[[i]]<-cbind(x_train_full[[i]],x_train_full[[i]][,k]^2)
}
x_train[[i]]<-apply(x_train[[i]],2,scale)
x_train[[i]]<-cbind(1,x_train[[i]])
colnames(x_train[[i]])<-c('IC',paste('V',1:ncovar,sep=''),paste('V',1:ncovar,'_2',sep=''))
#x_train_full[[i]]<-apply(x_train_full[[i]],2,scale)
#x_train_full[[i]]<-cbind(1,x_train_full[[i]])
#colnames(x_train_full[[i]])<-c('IC',paste('V',1:ncovar,sep=''),paste('V',1:ncovar,'_2',sep=''))
}
# validation
#-----------------------------------------------------------------------------------------
y_valid<-list()
y_valid_common<-list()
x_valid<-list()
s_valid<-list()
for (i in 1:3) {
y_valid[[i]] <- read.csv(paste("Yv","_",i,"_",set_no,".csv",sep=""),header=FALSE)
y_valid[[i]] <- apply(y_valid[[i]], 2, as.numeric)[,spSel]
common_sp <- which((colSums(y_train[[i]])/nrow(y_train[[i]])) >= 0.1)
y_valid_common[[i]] <- y_valid[[i]][,common_sp]
x_valid[[i]] <- as.matrix(read.csv(paste("Xv","_",i,"_",set_no,".csv",sep=""),header=FALSE))
x_valid[[i]] <- apply(x_valid[[i]], 2, as.numeric)
s_valid[[i]] <- read.csv(paste("Sv","_",i,"_",set_no,".csv",sep=""),header=FALSE)
s_valid[[i]] <- apply(s_valid[[i]], 2, as.numeric)
colnames(s_valid[[i]])<-paste('Rand',1:ncol(s_valid[[i]]),sep='')
ncovar<-ncol(x_valid[[i]])
for (k in 1:ncovar) {
x_valid[[i]]<-cbind(x_valid[[i]],x_valid[[i]][,k]^2)
}
x_valid[[i]]<-apply(x_valid[[i]],2,scale)
x_valid[[i]]<-cbind(1,x_valid[[i]])
colnames(x_valid[[i]])<-c('IC',paste('V',1:ncovar,sep=''),paste('V',1:ncovar,'_2',sep=''))
}
# lists
#-----------------------------------------------------------------------------------------
DD_t <- list()
DD_v <- list()
DD_t_common <- list()
DD_v_common <- list()
for (i in 1:3) {
nsp <- ncol(y_train[[i]])
dd_t <- list()
for (j in 1:nsp) {
dd_t[[j]] <- data.frame(cbind(y_train[[i]][,j],x_train[[i]],s_train[[i]]))
colnames(dd_t[[j]]) <- c('sp',colnames(x_train[[i]]),colnames(s_train[[i]]))
}
dd_v <- list()
for (j in 1:nsp) {
dd_v[[j]] <- data.frame(cbind(y_valid[[i]][,j],x_valid[[i]],s_valid[[i]]))
colnames(dd_v[[j]]) <- c('sp',colnames(x_valid[[i]]),colnames(s_valid[[i]]))
}
DD_t[[i]]<-dd_t
DD_v[[i]]<-dd_v
nsp_common <- ncol(y_train_common[[i]])
dd_t_common <- list()
for (j in 1:nsp_common) {
dd_t_common[[j]] <- data.frame(cbind(y_train_common[[i]][,j],x_train[[i]],s_train[[i]]))
colnames(dd_t_common[[j]]) <- c('sp',colnames(x_train[[i]]),colnames(s_train[[i]]))
}
dd_v_common <- list()
for (j in 1:nsp_common) {
dd_v_common[[j]] <- data.frame(cbind(y_valid_common[[i]][,j],x_valid[[i]],s_valid[[i]]))
colnames(dd_v_common[[j]]) <- c('sp',colnames(x_valid[[i]]),colnames(s_valid[[i]]))
}
DD_t_common[[i]]<-dd_t_common
DD_v_common[[i]]<-dd_v_common
}
#-----------------------------------------------------------------------------------------
if (commSP) {
y_train <- y_train_common
y_valid <- y_valid_common
DD_t <- DD_t_common
DD_v <- DD_v_common
}
setwd(WD)
|
# The script ./mhg_code/'!MoralizingGods.R' is a cleaned version of code from:
#
# https://github.com/pesavage/moralizing-gods
#
# The script outputs the text file MoralisingGodsStatus.txt at the point where
# the relevant Nature analysis occurs.
#
# To check the code in ./mhg_code, the data used to generate Extended Data
# Fig. 1 can be downloaded from here:
#
# https://www.nature.com/articles/s41586-019-1043-4#MOESM6
#
# The file 41586_2019_1043_MOESM6_ESM_sheet1.csv contains sheet1 of the
# associated .xlsx file in .csv format.
#
# This script iterates over the 12 NGAs used in the Nature paper for which data
# 41586_2019_1043_MOESM6_ESM.xlsx to ensure that the code in ./mhg_code gives
# identical results. This is done for both MoralisingGods and DoctrinalMode.
rm(list=ls()) # Clear the workspace
natureData <- read.csv('41586_2019_1043_MOESM6_ESM_sheet1.csv',stringsAsFactors=F)
githubData <- read.csv('./mhg_code/data_used_for_nature_analysis.csv',stringsAsFactors=F)
# Subset to only the columns needed and give the columns the same names
natureData <- natureData[,c('NGA','Date..negative.years.BCE..positive...years.CE.','MoralisingGods','DoctrinalMode')]
names(natureData) <- c('NGA','Time','MoralisingGods','DoctrinalMode')
githubData <- githubData[,c('NGA','Time','MoralisingGods','DoctrinalMode')]
NGAs <- unique(natureData$NGA)
for(i in 1:length(NGAs)) {
# Subset for convenience
nga <- NGAs[i]
nat <- natureData[natureData$NGA == nga,]
git <- githubData[githubData$NGA == nga,]
# Set NA to -1 so that all works nicely
nat[is.na(nat)] <- -1
git[is.na(git)] <- -1
if(!all.equal(dim(nat),dim(git))) {
print(nga)
print('Failed for dimension')
} else {
# Dimensions are OK
if(!all(nat$Time == git$Time)) {
print(nga)
print('Failed for Time data')
}
if(!all(nat$MoralisingGods == git$MoralisingGods)) {
print(nga)
print('Failed for MoralisingGods data')
}
if(!all(nat$DoctrinalMode == git$DoctrinalMode)) {
print(nga)
print('Failed for DoctrinalMode data')
}
}
}
# Moralising God date by first occurence
for(i in 1:length(NGAs)) {
# Subset for convenience
nga <- NGAs[i]
nat <- natureData[natureData$NGA == nga,]
git <- githubData[githubData$NGA == nga,]
# Set NA to -1 so that all works nicely
nat[is.na(nat)] <- -1
git[is.na(git)] <- -1
print('--')
print(nga)
print(git$Time[min(which(git$MoralisingGods == 1))])
}
| /mhg_code/check_moralising_gods_status.R | permissive | jaewshin/Holocene | R | false | false | 2,454 | r | # The script ./mhg_code/'!MoralizingGods.R' is a cleaned version of code from:
#
# https://github.com/pesavage/moralizing-gods
#
# The script outputs the text file MoralisingGodsStatus.txt at the point where
# the relevant Nature analysis occurs.
#
# To check the code in ./mhg_code, the data used to generate Extended Data
# Fig. 1 can be downloaded from here:
#
# https://www.nature.com/articles/s41586-019-1043-4#MOESM6
#
# The file 41586_2019_1043_MOESM6_ESM_sheet1.csv contains sheet1 of the
# associated .xlsx file in .csv format.
#
# This script iterates over the 12 NGAs used in the Nature paper for which data
# 41586_2019_1043_MOESM6_ESM.xlsx to ensure that the code in ./mhg_code gives
# identical results. This is done for both MoralisingGods and DoctrinalMode.
rm(list=ls()) # Clear the workspace
natureData <- read.csv('41586_2019_1043_MOESM6_ESM_sheet1.csv',stringsAsFactors=F)
githubData <- read.csv('./mhg_code/data_used_for_nature_analysis.csv',stringsAsFactors=F)
# Subset to only the columns needed and give the columns the same names
natureData <- natureData[,c('NGA','Date..negative.years.BCE..positive...years.CE.','MoralisingGods','DoctrinalMode')]
names(natureData) <- c('NGA','Time','MoralisingGods','DoctrinalMode')
githubData <- githubData[,c('NGA','Time','MoralisingGods','DoctrinalMode')]
NGAs <- unique(natureData$NGA)
for(i in 1:length(NGAs)) {
# Subset for convenience
nga <- NGAs[i]
nat <- natureData[natureData$NGA == nga,]
git <- githubData[githubData$NGA == nga,]
# Set NA to -1 so that all works nicely
nat[is.na(nat)] <- -1
git[is.na(git)] <- -1
if(!all.equal(dim(nat),dim(git))) {
print(nga)
print('Failed for dimension')
} else {
# Dimensions are OK
if(!all(nat$Time == git$Time)) {
print(nga)
print('Failed for Time data')
}
if(!all(nat$MoralisingGods == git$MoralisingGods)) {
print(nga)
print('Failed for MoralisingGods data')
}
if(!all(nat$DoctrinalMode == git$DoctrinalMode)) {
print(nga)
print('Failed for DoctrinalMode data')
}
}
}
# Moralising God date by first occurence
for(i in 1:length(NGAs)) {
# Subset for convenience
nga <- NGAs[i]
nat <- natureData[natureData$NGA == nga,]
git <- githubData[githubData$NGA == nga,]
# Set NA to -1 so that all works nicely
nat[is.na(nat)] <- -1
git[is.na(git)] <- -1
print('--')
print(nga)
print(git$Time[min(which(git$MoralisingGods == 1))])
}
|
library(kknn)
all_data <- read.table('data/pima-indians-diabetes.data', sep = ',')
all_data$V9 <- as.factor(all_data$V9)
train_knn <- function(distance) {
model <- train.kknn(V9 ~ ., data = all_data, kmax = 50,
kernel = c("biweight", "triangular", "triweight", "cos", "inv", "gaussian",
"optimal", "rectangular", "rank", "epanechnikov"),
distance = distance)
b_p = model[["best.parameters"]]
print(model[["MISCLASS"]][b_p$k, b_p$kernel])
return(b_p)
}
best_params <- sapply(1:5, train_knn)
model <- train.kknn(V9 ~ ., data = all_data, kmax = 50,
kernel = c("inv", "rectangular", "triweight", "cos",
"gaussian", "optimal"),
distance = 1)
plot(model)
set.seed(12345)
all_data <- all_data[order(runif(nrow(all_data))), ]
nt <- as.integer(nrow(all_data) * 0.8)
train_data <- all_data[1:nt, ]
test_data <- all_data[(nt + 1):nrow(all_data), ]
model <- kknn(V9 ~ ., train_data, test_data, k = 27, kernel = 'inv', distance = 1)
predicted <- fitted(model)
print(1 - sum(diag(table(predicted, test_data$V9))) / nrow(test_data)) | /final/knn_cl.R | no_license | iliaKyzmin/Machine-Learning | R | false | false | 1,187 | r | library(kknn)
all_data <- read.table('data/pima-indians-diabetes.data', sep = ',')
all_data$V9 <- as.factor(all_data$V9)
train_knn <- function(distance) {
model <- train.kknn(V9 ~ ., data = all_data, kmax = 50,
kernel = c("biweight", "triangular", "triweight", "cos", "inv", "gaussian",
"optimal", "rectangular", "rank", "epanechnikov"),
distance = distance)
b_p = model[["best.parameters"]]
print(model[["MISCLASS"]][b_p$k, b_p$kernel])
return(b_p)
}
best_params <- sapply(1:5, train_knn)
model <- train.kknn(V9 ~ ., data = all_data, kmax = 50,
kernel = c("inv", "rectangular", "triweight", "cos",
"gaussian", "optimal"),
distance = 1)
plot(model)
set.seed(12345)
all_data <- all_data[order(runif(nrow(all_data))), ]
nt <- as.integer(nrow(all_data) * 0.8)
train_data <- all_data[1:nt, ]
test_data <- all_data[(nt + 1):nrow(all_data), ]
model <- kknn(V9 ~ ., train_data, test_data, k = 27, kernel = 'inv', distance = 1)
predicted <- fitted(model)
print(1 - sum(diag(table(predicted, test_data$V9))) / nrow(test_data)) |
# This is my first r script | /first r script.R | no_license | ppleeuw/introtoBDA | R | false | false | 27 | r | # This is my first r script |
### Jinliang Yang
### use impute_parent in CJ data
###########
write_subgeno <- function(geno, ped, ksize=10, outfile="out"){
ped[, 1:3] <- apply(ped[, 1:3], 2, as.character)
tot <- ceiling(nrow(ped)/10)
#for(i in 1:tot){
tem <- lapply(1:tot, function(i){
message(sprintf("###>>> start to write the [ %s/%s ] subset of geno", i, tot))
if(i != tot){
kid <- ped$proid[((i-1)*ksize+1):(ksize*i)]
}else{
kid <- ped$proid[((i-1)*ksize+1):nrow(ped)]
}
subgeno <- geno[, c("snpid", kid)]
outfile1 <- paste0(outfile, "_subgeno", i, ".csv")
write.table(subgeno, outfile1, sep=",", row.names=FALSE, quote=FALSE)
})
message(sprintf("###>>> DONE <<< ###"))
}
#### read in masked data
library(data.table, lib="~/bin/Rlib/")
library(imputeR)
### read genotype. snpinfo and pedigree data
ped <- read.csv("data/Parentage_for_imputeR.csv")
names(ped) <- c("proid", "parent1", "parent2")
geno <- fread("largedata/teo_updated/teo_raw_biallelic_recoded_20160303_AGPv2.txt")
geno <- as.data.frame(geno)
p5 <- c("PC_M05_ID1", "PC_I58_ID2", "PC_N09_ID1", "PC_I58_ID2", "PC_L08_ID1")
goodloci <- read.table("data/good_loci.txt")
subgeno <- subset(geno, snpid %in% goodloci$V1)
### updated geno matrix
imp4 <- read.csv("largedata/ip/imp4.csv")
if(sum(subgeno$snpid != row.names(imp4)) >0) stop("!!! ERROR")
ncol(subgeno[, names(imp4)])
subgeno[, names(imp4)] <- imp4
ped[, 1:3] <- apply(ped[, 1:3], 2, as.character)
myped <- subset(ped, parent1 == parent2 & parent1 %in% p5)
###############
write_subgeno(geno=subgeno, ped=myped, ksize=10, outfile="largedata/ik/kid")
| /profiling/8.Luis_data/8.D.0_ik_update_geno.R | no_license | yangjl/phasing | R | false | false | 1,682 | r | ### Jinliang Yang
### use impute_parent in CJ data
###########
write_subgeno <- function(geno, ped, ksize=10, outfile="out"){
ped[, 1:3] <- apply(ped[, 1:3], 2, as.character)
tot <- ceiling(nrow(ped)/10)
#for(i in 1:tot){
tem <- lapply(1:tot, function(i){
message(sprintf("###>>> start to write the [ %s/%s ] subset of geno", i, tot))
if(i != tot){
kid <- ped$proid[((i-1)*ksize+1):(ksize*i)]
}else{
kid <- ped$proid[((i-1)*ksize+1):nrow(ped)]
}
subgeno <- geno[, c("snpid", kid)]
outfile1 <- paste0(outfile, "_subgeno", i, ".csv")
write.table(subgeno, outfile1, sep=",", row.names=FALSE, quote=FALSE)
})
message(sprintf("###>>> DONE <<< ###"))
}
#### read in masked data
library(data.table, lib="~/bin/Rlib/")
library(imputeR)
### read genotype. snpinfo and pedigree data
ped <- read.csv("data/Parentage_for_imputeR.csv")
names(ped) <- c("proid", "parent1", "parent2")
geno <- fread("largedata/teo_updated/teo_raw_biallelic_recoded_20160303_AGPv2.txt")
geno <- as.data.frame(geno)
p5 <- c("PC_M05_ID1", "PC_I58_ID2", "PC_N09_ID1", "PC_I58_ID2", "PC_L08_ID1")
goodloci <- read.table("data/good_loci.txt")
subgeno <- subset(geno, snpid %in% goodloci$V1)
### updated geno matrix
imp4 <- read.csv("largedata/ip/imp4.csv")
if(sum(subgeno$snpid != row.names(imp4)) >0) stop("!!! ERROR")
ncol(subgeno[, names(imp4)])
subgeno[, names(imp4)] <- imp4
ped[, 1:3] <- apply(ped[, 1:3], 2, as.character)
myped <- subset(ped, parent1 == parent2 & parent1 %in% p5)
###############
write_subgeno(geno=subgeno, ped=myped, ksize=10, outfile="largedata/ik/kid")
|
#####################################
##### Get followers with rtweet #####
#####################################
library(rtweet)
# Get bassnectar info
lorin_info <- lookup_users("bassnectar")
######################### Fetch User IDs #######################################
# retreive initial user ids
basshead_IDs <- get_followers("bassnectar")
# set page for next iteration
page <- next_cursor(basshead_IDs)
# wait for rate limit reset
Sys.sleep(60)*15
# Initialize loop variables
id_iterations <- (lorin_info$followers_count %/% 75000) + 1
iterations = 1
# Initiate loop to retreive follower ids
while(id_iterations > iterations){
# Store new data in temporary data frame
basshead_IDs_temp <- get_followers("bassnectar", page = page)
# move cursor for next iteration
page <- next_cursor(basshead_IDs_temp)
# Add new data to existing ID data frame
basshead_IDs <- rbind(basshead_IDs, basshead_IDs_temp)
# Delete temporary DF
rm(basshead_IDs_temp)
iterations = iterations + 1
# Retrieve rate limit info and pause loop until reset
currentRL <- rate_limit(twitter_token)
Sys.sleep(60 * (currentRL$reset[38]) + 1)
}
# Write the data to CSV
write.csv(basshead_IDs, "./follower_IDs.csv")
######################### Get User Data ########################################
# Get number of columns for user data DF
num_columns <- ncol(lorin_info)
# create bassheads df with 0 rows
bassheads <- data.frame(matrix(nrow = 0, ncol = num_columns))
# Assign column names to basshead DF
colnames(bassheads) <- colnames(lorin_info)
# initialize loop variables
info_interations <- (lorin_info$followers_count %/% 18000) + 1
info_index = 1
for(i in 1:info_interations){
bassheads_temp <- lookup_users(basshead_IDs[info_index:(i*18000),])
info_index = info_index + 18000
# add data to basshead data frame
bassheads <- rbind(bassheads_temp, bassheads)
# Remove uneeded DF
rm(bassheads_temp)
# Get current RL
currentRL <- rate_limit(twitter_token)
# sleep until reset if RL is hit
if(currentRL$remaining[36] < 180){
# Pause R until ratelimit reset
Sys.sleep(60* (as.integer(currentRL$reset[36])) + 1)
}
}
# Write basshead info to CSV
write.csv(bassheads, "./basshead_df_raw.csv")
| /Get Follower info - rtweet.R | no_license | Dmunslow/BassnectarProject | R | false | false | 2,343 | r | #####################################
##### Get followers with rtweet #####
#####################################
library(rtweet)
# Get bassnectar info
lorin_info <- lookup_users("bassnectar")
######################### Fetch User IDs #######################################
# retreive initial user ids
basshead_IDs <- get_followers("bassnectar")
# set page for next iteration
page <- next_cursor(basshead_IDs)
# wait for rate limit reset
Sys.sleep(60)*15
# Initialize loop variables
id_iterations <- (lorin_info$followers_count %/% 75000) + 1
iterations = 1
# Initiate loop to retreive follower ids
while(id_iterations > iterations){
# Store new data in temporary data frame
basshead_IDs_temp <- get_followers("bassnectar", page = page)
# move cursor for next iteration
page <- next_cursor(basshead_IDs_temp)
# Add new data to existing ID data frame
basshead_IDs <- rbind(basshead_IDs, basshead_IDs_temp)
# Delete temporary DF
rm(basshead_IDs_temp)
iterations = iterations + 1
# Retrieve rate limit info and pause loop until reset
currentRL <- rate_limit(twitter_token)
Sys.sleep(60 * (currentRL$reset[38]) + 1)
}
# Write the data to CSV
write.csv(basshead_IDs, "./follower_IDs.csv")
######################### Get User Data ########################################
# Get number of columns for user data DF
num_columns <- ncol(lorin_info)
# create bassheads df with 0 rows
bassheads <- data.frame(matrix(nrow = 0, ncol = num_columns))
# Assign column names to basshead DF
colnames(bassheads) <- colnames(lorin_info)
# initialize loop variables
info_interations <- (lorin_info$followers_count %/% 18000) + 1
info_index = 1
for(i in 1:info_interations){
bassheads_temp <- lookup_users(basshead_IDs[info_index:(i*18000),])
info_index = info_index + 18000
# add data to basshead data frame
bassheads <- rbind(bassheads_temp, bassheads)
# Remove uneeded DF
rm(bassheads_temp)
# Get current RL
currentRL <- rate_limit(twitter_token)
# sleep until reset if RL is hit
if(currentRL$remaining[36] < 180){
# Pause R until ratelimit reset
Sys.sleep(60* (as.integer(currentRL$reset[36])) + 1)
}
}
# Write basshead info to CSV
write.csv(bassheads, "./basshead_df_raw.csv")
|
library(dplyr)
## Initial set-up - DO NOT RERUN
# hkdc %>%
# mutate(Code = substr(ConstituencyCode, start = 1, stop = 1)) %>%
# select(Code, District_EN, District_ZH, Region_ZH, Region_ZH) %>%
# group_by_all() %>%
# summarise(n = n()) %>%
# writexl::write_xlsx(here::here(".dev", "data", "hkdistrictsummary.xlsx"))
hkdistrict_summary <- readxl::read_xlsx(here::here(".dev", "data", "hkdistrictsummary.xlsx"))
## Use data
usethis::use_data(hkdistrict_summary, overwrite = TRUE)
| /.dev/script/hkdistrictsummary - preparation.R | permissive | Hong-Kong-Districts-Info/hkdatasets | R | false | false | 490 | r | library(dplyr)
## Initial set-up - DO NOT RERUN
# hkdc %>%
# mutate(Code = substr(ConstituencyCode, start = 1, stop = 1)) %>%
# select(Code, District_EN, District_ZH, Region_ZH, Region_ZH) %>%
# group_by_all() %>%
# summarise(n = n()) %>%
# writexl::write_xlsx(here::here(".dev", "data", "hkdistrictsummary.xlsx"))
hkdistrict_summary <- readxl::read_xlsx(here::here(".dev", "data", "hkdistrictsummary.xlsx"))
## Use data
usethis::use_data(hkdistrict_summary, overwrite = TRUE)
|
library(ebirdst)
library(rnaturalearth)
library(ggplot2)
library(viridisLite)
library(dplyr)
library(tidyverse)
library(raster)
library(sf)
library(readr)
#Select The species you want to plot here. In this example, I am using Green Winged Teal "gnwtea".
## For Mallard use "mallar3" and for Pintail use "norpin"
sp_path <- ebirdst_download(species = "gnwtea", force= TRUE)
# load trimmed median abundances
abunds <- load_raster("abundance", path = sp_path)
## Uncomment below for upper and lower percentiles (upper = 90th, lower = 10th)
#lower <- load_raster("abundance_lower", path= sp_path)
#upper <- load_raster("abundance_upper", path = sp_path)
date_vector <- parse_raster_dates(abunds)
# to convert the data to a simpler geographic format and access tabularly
# reproject into geographic (decimal degrees)
abund_stack_ll <- projectRaster(abunds[[4]], crs = "+init=epsg:4326",
method = "ngb")
# Convert raster object into a matrix
p <- rasterToPoints(abund_stack_ll)
colnames(p) <- c("longitude", "latitude", "abundance_umean")
head(p)
# use parse_raster_dates() to get actual date objects for each layer
############################## REMEMBER ######################################
## We are using the S&T weeks here, not the Duck Week from the other runs. ##
## You will have to convert between the two. ##
## Load "DuckWeek.csv" in the Data folder to see ##
## the Duck Week for each S&T week ##
##############################################################################
date_vector <- parse_raster_dates(abunds)
print(date_vector)
### Load Duck Week table for conversion.
DuckWeek <- read_csv("Data/DuckWeek.csv")
# define mollweide projection
mollweide <- "+proj=moll +lon_0=-90 +x_0=0 +y_0=0 +ellps=WGS84"
## Create area over which to plot data
us <- ne_countries(continent = "North America", returnclass = "sf") %>%
st_union() %>%
st_transform(crs = mollweide)
states <- ne_states(iso_a2 = "US", returnclass = "sf") %>%
filter(postal %in% c("IL", "MO", "IA")) %>%
st_transform(crs = mollweide) %>%
st_geometry()
## Select S&T week to plot. Here we choose week 16, which is the week of 04/19/2018,
## and Duck Week 33. Change the "16" in the first line below to the S&T week you want to plot.
abd <- projectRaster(abunds[[16]], crs = mollweide, method = "ngb")
abd_mask <- mask(crop(abd, as_Spatial(states)), as_Spatial(states))
bins <- calc_bins(abd_mask)
pal <- abundance_palette(length(bins$bins) - 1, season = "weekly")
par(mar = c(0, 0, 0, 0))
plot(states, col = NA, border = NA)
plot(us, col = "grey90", border = NA, add = TRUE)
plot(states, col = "grey80", border = NA, add = TRUE)
plot(abd_mask,
breaks = bins$bins,
col = pal,
axes = FALSE, box = FALSE, legend = FALSE,
maxpixels = ncell(abd),
add = TRUE)
plot(states, col = NA, border = "white", lwd = 0.5, add = TRUE)
# create a thinner set of labels
bin_labels <- format(round(bins$bins, 2), nsmall = 2)
bin_labels[!(bin_labels %in% c(bin_labels[1],
bin_labels[round((length(bin_labels) / 2)) + 1],
bin_labels[length(bin_labels)]))] <- ""
# plot legend
plot(abd_mask^bins$power, legend.only = TRUE,
col = abundance_palette(length(bins$bins) - 1, season = "weekly"),
breaks = bins$bins^bins$power, lab.breaks = bin_labels,
legend.shrink = 0.97, legend.width = 2,
axis.args = list(cex.axis = 0.9, lwd.ticks = 0, col = NA, line = -0.8))
title("AGWT Relative Abundance Week of 04/19/2018",
line = -1, cex.main = 1)
| /ABD_Maps.R | no_license | OrinRobinson/Ducks_and_eBird | R | false | false | 3,700 | r | library(ebirdst)
library(rnaturalearth)
library(ggplot2)
library(viridisLite)
library(dplyr)
library(tidyverse)
library(raster)
library(sf)
library(readr)
#Select The species you want to plot here. In this example, I am using Green Winged Teal "gnwtea".
## For Mallard use "mallar3" and for Pintail use "norpin"
sp_path <- ebirdst_download(species = "gnwtea", force= TRUE)
# load trimmed median abundances
abunds <- load_raster("abundance", path = sp_path)
## Uncomment below for upper and lower percentiles (upper = 90th, lower = 10th)
#lower <- load_raster("abundance_lower", path= sp_path)
#upper <- load_raster("abundance_upper", path = sp_path)
date_vector <- parse_raster_dates(abunds)
# to convert the data to a simpler geographic format and access tabularly
# reproject into geographic (decimal degrees)
abund_stack_ll <- projectRaster(abunds[[4]], crs = "+init=epsg:4326",
method = "ngb")
# Convert raster object into a matrix
p <- rasterToPoints(abund_stack_ll)
colnames(p) <- c("longitude", "latitude", "abundance_umean")
head(p)
# use parse_raster_dates() to get actual date objects for each layer
############################## REMEMBER ######################################
## We are using the S&T weeks here, not the Duck Week from the other runs. ##
## You will have to convert between the two. ##
## Load "DuckWeek.csv" in the Data folder to see ##
## the Duck Week for each S&T week ##
##############################################################################
date_vector <- parse_raster_dates(abunds)
print(date_vector)
### Load Duck Week table for conversion.
DuckWeek <- read_csv("Data/DuckWeek.csv")
# define mollweide projection
mollweide <- "+proj=moll +lon_0=-90 +x_0=0 +y_0=0 +ellps=WGS84"
## Create area over which to plot data
us <- ne_countries(continent = "North America", returnclass = "sf") %>%
st_union() %>%
st_transform(crs = mollweide)
states <- ne_states(iso_a2 = "US", returnclass = "sf") %>%
filter(postal %in% c("IL", "MO", "IA")) %>%
st_transform(crs = mollweide) %>%
st_geometry()
## Select S&T week to plot. Here we choose week 16, which is the week of 04/19/2018,
## and Duck Week 33. Change the "16" in the first line below to the S&T week you want to plot.
abd <- projectRaster(abunds[[16]], crs = mollweide, method = "ngb")
abd_mask <- mask(crop(abd, as_Spatial(states)), as_Spatial(states))
bins <- calc_bins(abd_mask)
pal <- abundance_palette(length(bins$bins) - 1, season = "weekly")
par(mar = c(0, 0, 0, 0))
plot(states, col = NA, border = NA)
plot(us, col = "grey90", border = NA, add = TRUE)
plot(states, col = "grey80", border = NA, add = TRUE)
plot(abd_mask,
breaks = bins$bins,
col = pal,
axes = FALSE, box = FALSE, legend = FALSE,
maxpixels = ncell(abd),
add = TRUE)
plot(states, col = NA, border = "white", lwd = 0.5, add = TRUE)
# create a thinner set of labels
bin_labels <- format(round(bins$bins, 2), nsmall = 2)
bin_labels[!(bin_labels %in% c(bin_labels[1],
bin_labels[round((length(bin_labels) / 2)) + 1],
bin_labels[length(bin_labels)]))] <- ""
# plot legend
plot(abd_mask^bins$power, legend.only = TRUE,
col = abundance_palette(length(bins$bins) - 1, season = "weekly"),
breaks = bins$bins^bins$power, lab.breaks = bin_labels,
legend.shrink = 0.97, legend.width = 2,
axis.args = list(cex.axis = 0.9, lwd.ticks = 0, col = NA, line = -0.8))
title("AGWT Relative Abundance Week of 04/19/2018",
line = -1, cex.main = 1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/strvalidator-package.r
\docType{data}
\name{set2}
\alias{set2}
\title{SGMPlus example data}
\format{A data frame with 32 rows and 5 variables}
\usage{
data(set2)
}
\description{
A slimmed dataset containing SGM Plus genotyping result for 2 replicates
of 'sampleA'.
}
\keyword{datasets}
| /man/set2.Rd | no_license | sctyner/strvalidator | R | false | true | 364 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/strvalidator-package.r
\docType{data}
\name{set2}
\alias{set2}
\title{SGMPlus example data}
\format{A data frame with 32 rows and 5 variables}
\usage{
data(set2)
}
\description{
A slimmed dataset containing SGM Plus genotyping result for 2 replicates
of 'sampleA'.
}
\keyword{datasets}
|
#!/usr/bin/env Rscript
rm(list = ls())
# Rscript scripts/generateMissingVariantCountStackBarChartParallel.R \
# --cores 3 \
# --inputs /scratch/yenc/projects/VisualVariants/data/Nebraska.Chr01.txt \
# --inputs /scratch/yenc/projects/VisualVariants/data/Nebraska.Chr02.txt \
# --inputs /scratch/yenc/projects/VisualVariants/data/Nebraska.Chr03.txt \
# --output /scratch/yenc/projects/VisualVariants/output/missing_variant_count_stack_bar_chart.png \
# --all
library(foreach)
library(iterators)
library(parallel)
library(doParallel)
library(VisualVariants)
parser <- argparse::ArgumentParser()
parser$add_argument("--cores", type="integer", default=1, help="Number of processing cores")
parser$add_argument("--inputs", type="character", action="append", help="Input bcftools tab delimited files", required=TRUE)
parser$add_argument("--output", type="character", help="Output file path", required=TRUE)
parser$add_argument("--all", action="store_true", default=FALSE, help="Output all files")
args <- parser$parse_args()
cores <- args$cores
inputs <- args$inputs
output <- args$output
all <- args$all
for(i in 1:length(inputs)){
if(!file.exists(inputs[i])){
quit(status=1)
}
}
if(!dir.exists(dirname(output))){
dir.create(dirname(output), showWarnings=FALSE, recursive=TRUE)
if(!dir.exists(dirname(output))){
quit(status=1)
}
}
return_value <- generateMissingVariantCountStackBarChartParallel(
bcftools_tab_delimited_file_path=inputs,
cores=cores
)
if(all == TRUE){
utils::write.csv(
x=return_value$MissingVariantDataFrame,
file=file.path(gsub("(\\.png)$|(\\.jpg)$|(\\.jpeg)$", ".MissingVariantDataFrame.csv", output, ignore.case=TRUE)),
row.names=FALSE,
na=""
)
utils::write.csv(
x=return_value$MissingVariantCountDataFrame,
file=file.path(gsub("(\\.png)|(\\.jpg)|(\\.jpeg)", ".MissingVariantCountDataFrame.csv", output, ignore.case=TRUE)),
row.names=FALSE,
na=""
)
}
ggplot2::ggsave(
filename = basename(output),
plot = return_value$MissingVariantCountStackBarChart,
path = dirname(output),
width = 32,
height = 18
)
| /scripts/generateMissingVariantCountStackBarChartParallel.R | permissive | yenon118/VisualVariants | R | false | false | 2,110 | r | #!/usr/bin/env Rscript
rm(list = ls())
# Rscript scripts/generateMissingVariantCountStackBarChartParallel.R \
# --cores 3 \
# --inputs /scratch/yenc/projects/VisualVariants/data/Nebraska.Chr01.txt \
# --inputs /scratch/yenc/projects/VisualVariants/data/Nebraska.Chr02.txt \
# --inputs /scratch/yenc/projects/VisualVariants/data/Nebraska.Chr03.txt \
# --output /scratch/yenc/projects/VisualVariants/output/missing_variant_count_stack_bar_chart.png \
# --all
library(foreach)
library(iterators)
library(parallel)
library(doParallel)
library(VisualVariants)
parser <- argparse::ArgumentParser()
parser$add_argument("--cores", type="integer", default=1, help="Number of processing cores")
parser$add_argument("--inputs", type="character", action="append", help="Input bcftools tab delimited files", required=TRUE)
parser$add_argument("--output", type="character", help="Output file path", required=TRUE)
parser$add_argument("--all", action="store_true", default=FALSE, help="Output all files")
args <- parser$parse_args()
cores <- args$cores
inputs <- args$inputs
output <- args$output
all <- args$all
for(i in 1:length(inputs)){
if(!file.exists(inputs[i])){
quit(status=1)
}
}
if(!dir.exists(dirname(output))){
dir.create(dirname(output), showWarnings=FALSE, recursive=TRUE)
if(!dir.exists(dirname(output))){
quit(status=1)
}
}
return_value <- generateMissingVariantCountStackBarChartParallel(
bcftools_tab_delimited_file_path=inputs,
cores=cores
)
if(all == TRUE){
utils::write.csv(
x=return_value$MissingVariantDataFrame,
file=file.path(gsub("(\\.png)$|(\\.jpg)$|(\\.jpeg)$", ".MissingVariantDataFrame.csv", output, ignore.case=TRUE)),
row.names=FALSE,
na=""
)
utils::write.csv(
x=return_value$MissingVariantCountDataFrame,
file=file.path(gsub("(\\.png)|(\\.jpg)|(\\.jpeg)", ".MissingVariantCountDataFrame.csv", output, ignore.case=TRUE)),
row.names=FALSE,
na=""
)
}
ggplot2::ggsave(
filename = basename(output),
plot = return_value$MissingVariantCountStackBarChart,
path = dirname(output),
width = 32,
height = 18
)
|
llData <- read.table("./household_power_consumption.txt", sep = ";", header = TRUE)
dataOfInterest <- allData [allData$Date=="1/2/2007" | allData$Date=="2/2/2007",]
dataOfInterest$Global_active_power <- as.numeric(levels(dataOfInterest$Global_active_power))[dataOfInterest$Global_active_power]
dataOfInterest$Time = strptime(paste(as.Date(as.character(dataOfInterest$Date),format="%d/%m/%Y"),dataOfInterest$Time, sep = " "),format="%Y-%m-%d %H:%M:%S")
dataOfInterest$Date <- as.Date(as.character(dataOfInterest$Date),format="%d/%m/%Y")
dataOfInterest$Sub_metering_1 <- as.numeric(levels(dataOfInterest$Sub_metering_1))[dataOfInterest$Sub_metering_1]
dataOfInterest$Sub_metering_2 <- as.numeric(levels(dataOfInterest$Sub_metering_2))[dataOfInterest$Sub_metering_2]
dataOfInterest$Voltage <- as.numeric(levels(dataOfInterest$Voltage))[dataOfInterest$Voltage]
dataOfInterest$Global_reactive_power <- as.numeric(levels(dataOfInterest$Global_reactive_power))[dataOfInterest$Global_reactive_power]
png(filename = "./plot4.png", width = 480, height = 480, units = "px")
par(mfrow=c(2,2))
plot(c(min(dataOfInterest$Time),max(dataOfInterest$Time)+1),c(min(dataOfInterest$Global_active_power), max(dataOfInterest$Global_active_power)), type="n", xlab="", ylab="Global Active Power (kilowatts)" )
lines(dataOfInterest$Time, dataOfInterest$Global_active_power)
plot(c(min(dataOfInterest$Time),max(dataOfInterest$Time)+1),c(min(dataOfInterest$Voltage), max(dataOfInterest$Voltage)), type="n", xlab="datetime", ylab = "Voltage")
lines(dataOfInterest$Time, dataOfInterest$Voltage)
plot(c(min(dataOfInterest$Time),max(dataOfInterest$Time)+1),
c(min(min(dataOfInterest$Sub_metering_1), min(dataOfInterest$Sub_metering_2), min(dataOfInterest$Sub_metering_3))
,max(max(dataOfInterest$Sub_metering_1), max(dataOfInterest$Sub_metering_2), max(dataOfInterest$Sub_metering_3))
), type="n", xlab="", ylab="Energy sub metering" )
lines(dataOfInterest$Time, dataOfInterest$Sub_metering_1, col="black")
lines(dataOfInterest$Time, dataOfInterest$Sub_metering_2, col="red")
lines(dataOfInterest$Time, dataOfInterest$Sub_metering_3, col="blue")
legend( x="topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"), lwd=1, lty=c(1,1), bty = "n")
plot(c(min(dataOfInterest$Time),max(dataOfInterest$Time)+1),c(min(dataOfInterest$Global_reactive_power), max(dataOfInterest$Global_reactive_power)), type="n", xlab="datetime", ylab = "Global_reactive_power")
lines(dataOfInterest$Time, dataOfInterest$Global_reactive_power)
dev.off()
| /plot4.R | no_license | aepag/ExData_Plotting1 | R | false | false | 2,617 | r | llData <- read.table("./household_power_consumption.txt", sep = ";", header = TRUE)
dataOfInterest <- allData [allData$Date=="1/2/2007" | allData$Date=="2/2/2007",]
dataOfInterest$Global_active_power <- as.numeric(levels(dataOfInterest$Global_active_power))[dataOfInterest$Global_active_power]
dataOfInterest$Time = strptime(paste(as.Date(as.character(dataOfInterest$Date),format="%d/%m/%Y"),dataOfInterest$Time, sep = " "),format="%Y-%m-%d %H:%M:%S")
dataOfInterest$Date <- as.Date(as.character(dataOfInterest$Date),format="%d/%m/%Y")
dataOfInterest$Sub_metering_1 <- as.numeric(levels(dataOfInterest$Sub_metering_1))[dataOfInterest$Sub_metering_1]
dataOfInterest$Sub_metering_2 <- as.numeric(levels(dataOfInterest$Sub_metering_2))[dataOfInterest$Sub_metering_2]
dataOfInterest$Voltage <- as.numeric(levels(dataOfInterest$Voltage))[dataOfInterest$Voltage]
dataOfInterest$Global_reactive_power <- as.numeric(levels(dataOfInterest$Global_reactive_power))[dataOfInterest$Global_reactive_power]
png(filename = "./plot4.png", width = 480, height = 480, units = "px")
par(mfrow=c(2,2))
plot(c(min(dataOfInterest$Time),max(dataOfInterest$Time)+1),c(min(dataOfInterest$Global_active_power), max(dataOfInterest$Global_active_power)), type="n", xlab="", ylab="Global Active Power (kilowatts)" )
lines(dataOfInterest$Time, dataOfInterest$Global_active_power)
plot(c(min(dataOfInterest$Time),max(dataOfInterest$Time)+1),c(min(dataOfInterest$Voltage), max(dataOfInterest$Voltage)), type="n", xlab="datetime", ylab = "Voltage")
lines(dataOfInterest$Time, dataOfInterest$Voltage)
plot(c(min(dataOfInterest$Time),max(dataOfInterest$Time)+1),
c(min(min(dataOfInterest$Sub_metering_1), min(dataOfInterest$Sub_metering_2), min(dataOfInterest$Sub_metering_3))
,max(max(dataOfInterest$Sub_metering_1), max(dataOfInterest$Sub_metering_2), max(dataOfInterest$Sub_metering_3))
), type="n", xlab="", ylab="Energy sub metering" )
lines(dataOfInterest$Time, dataOfInterest$Sub_metering_1, col="black")
lines(dataOfInterest$Time, dataOfInterest$Sub_metering_2, col="red")
lines(dataOfInterest$Time, dataOfInterest$Sub_metering_3, col="blue")
legend( x="topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"), lwd=1, lty=c(1,1), bty = "n")
plot(c(min(dataOfInterest$Time),max(dataOfInterest$Time)+1),c(min(dataOfInterest$Global_reactive_power), max(dataOfInterest$Global_reactive_power)), type="n", xlab="datetime", ylab = "Global_reactive_power")
lines(dataOfInterest$Time, dataOfInterest$Global_reactive_power)
dev.off()
|
library(data.table);library(magrittr);library(DT);library(jstable);library(dplyr);library(stats)
#setwd("~/ShinyApps/jihyunbaek/lithium")
lithium <- readRDS("lithium.RDS")
# CKD-EPI----------------------------------------
CKDEPI<-function(scr,age,sex){
if(sex=="F"){ k<-0.7; alpha<-(-0.329); const<-1.018 }
else{ k<-0.9; alpha<-(-0.411); const<-1 }
GFR <- 141 * min(scr/k,1)^alpha * max(scr/k,1)^(-1.209) * 0.993^age * const
return(GFR)
}
df <- lithium$MEDI[, c("NO","처방일","처방명","일수","횟수")]
names(df) <- c("NO","date","drug","day","times")
df[, drug := ifelse(drug == "Lithium carbonate 300mg", "Lithium", "Valproate")]
df <- unique(df)
df <- df[order(-day), .(maxday = max(day, na.rm = T), maxnotqd = day[which(times != 1)[1]]), by=c("NO","date","drug")]
df <- df[, .(maxday, qd = ifelse(is.na(maxnotqd), maxday , maxday - maxnotqd)), by=c("NO","date","drug")]
df <- df[, .(totDay = sum(maxday, na.rm = T), qd = sum(qd, na.rm = T)/sum(maxday, na.rm = T)),by = c("NO", "drug")]
df.long <- dcast(df, NO ~ drug, value.var = c("totDay", "qd"))
#left_join 위해서 NO의 class를 맞춰주기----------------------------------------
lithium$`clinical data`$NO <- lithium$`clinical data`$NO %>% as.numeric() %>% as.character()
df.long$NO <- df.long$NO %>% as.character()
lithium$`clinical data` <- merge(lithium$`clinical data`, df.long, by = "NO")
## Dx group
lithium$`clinical data`[, group_bipolar_schizoaffective_other := factor(ifelse(grepl("Bipolar|bipolar", lithium$`clinical data`$주상병명), "Bipolar disorder",
ifelse(grepl("Schizoaffective|schizoaffective", lithium$`clinical data`$주상병명), "Schizoaffective disorder", "vOthers")))]
# Data inclusion----------------------------------------
a <- lithium$`clinical data`[,
.(NO,성별,생년월일,totDay_Lithium,totDay_Valproate,qd_Lithium,qd_Valproate,
HTN = factor(as.integer(!is.na(`고혈압 여부`))), DM = factor(as.integer(!is.na(`당뇨 여부`))), group_bipolar_schizoaffective_other)]
N_profile<-cbind("전체",NA,nrow(a),NA,NA)
## NO. list: TDM both
NO.tdmboth <- lithium$`renal function & TDM`[, NO := as.character(NO)][`세부검사명` %in% c("Lithium", "Valproic Acid")][, c("NO", "세부검사명")][, unique(`세부검사명`), by = "NO"][, names(which(table(NO) == 2))]
a <- a[xor(is.na(totDay_Lithium),is.na(totDay_Valproate)),,][!(NO %in% c("2250", NO.tdmboth))]
N_profile<-rbind(N_profile,cbind("Li+Valp combination",as.integer(N_profile[nrow(N_profile),3])-nrow(a),nrow(a),NA,NA))
a[, drug := factor(ifelse(is.na(totDay_Lithium), 0, 1))]
ICD_data <- readRDS("ICD_data.RDS")
setnames(ICD_data,c("개인정보동의여부","정렬순서"),c("Privacy Consent","NO"))
ICD_data$NO<-ICD_data$NO %>% as.character()
ICD_data<-ICD_data[`Privacy Consent`=="Y",,]
a<-merge(a,ICD_data[,.(NO),],by="NO")
N_profile<-rbind(N_profile,cbind("개인정보사용미동의",as.integer(N_profile[nrow(N_profile),3])-nrow(a),nrow(a),a[drug==0,.N,],a[drug==1,.N,]))
a<-a[(totDay_Lithium>180 | totDay_Valproate>180),,]
N_profile<-rbind(N_profile,cbind("총처방일수 180일 초과",as.integer(N_profile[nrow(N_profile),3])-nrow(a),nrow(a),a[drug==0,.N,],a[drug==1,.N,]))
## Date age----------------------------------------
df <- lithium$MEDI[, NO := as.character(NO)][,.SD,]
setnames(df,c("처방일","함량단위투여량","일수"),c("date","dose","day"))
data.main <- a %>%
merge(df[,.(firstPrescriptionDay=min(date, na.rm = T)), by = "NO"], by = "NO",all.x = T) %>%
merge(df[,.(lastPrescriptionDay=max(date, na.rm = T)), by = "NO"], by = "NO",all.x = T) %>%
merge(df[, .(avgDose_1day = sum(dose * day)/sum(day)), by = "NO"], by = "NO",all.x = T)
data.main[, Age := floor(as.numeric(as.Date(firstPrescriptionDay) - as.Date(`생년월일`))/365.25)]
## NEW HTN/DM
HTN_DM <- apply(ICD_data, 1, function(x){
HTN <- substr(x, 1, 3) %in% c(paste0("I", 10:16), 401:405)
HTN_which <- which(HTN == T)[1]
HTN_yn <- as.integer(!is.na(HTN_which))
HTN_date <- ifelse(HTN_yn == 0, NA, x[HTN_which - 1])
DM <- (substr(x, 1, 3) %in% paste0("E", c("08", "09", 10:13))) | (substr(x, 1, 4) %in% paste0("250.", 0:9))
DM_which <- which(DM == T)[1]
DM_yn <- as.integer(!is.na(DM_which))
DM_date <- ifelse(DM_yn == 0, NA, x[DM_which - 1])
return(c(HTN2 = HTN_yn, HTN_date = HTN_date, DM2 = DM_yn, DM_date = DM_date))
}) %>% t %>% data.table
HTN_DM$NO <- ICD_data$NO
HTN_DM_info <- merge(data.main[, c("NO", "firstPrescriptionDay")], HTN_DM, by = "NO")
HTN_DM_info[, `:=`(HTN = factor(as.integer(HTN2 == 1 & as.Date(HTN_date) <= as.Date(firstPrescriptionDay))),
DM = factor(as.integer(DM2 == 1 & as.Date(DM_date) <= as.Date(firstPrescriptionDay))))]
## merge
data.main <- merge(data.main[, -c("HTN", "DM")], HTN_DM_info[, c("NO", "HTN", "DM")], by = "NO")
W210216 <- readRDS("W210216.RDS")
setnames(W210216,c("개인정보동의여부","정렬순서"),c("Privacy Consent","NO"))
W210216$NO<-W210216$NO %>% as.character()
W210216<-W210216[`Privacy Consent`=="Y",,]
W210216<-merge(W210216,data.main[,.(NO),],by="NO")
W210216<-W210216[,alldiagnosis:=Reduce(paste,.SD),.SDcols=grep("진단코드",colnames(W210216))][,c("NO","alldiagnosis"),]
W210216<-W210216[alldiagnosis %like% "F",.SD,]
data.main <- merge(data.main,W210216[,.(NO),],by="NO")
N_profile<-rbind(N_profile,cbind("F코드 포함",as.integer(N_profile[nrow(N_profile),3])-data.main[,.N,],data.main[,.N,],data.main[drug==0,.N,],data.main[drug==1,.N,]))
data.main <- data.main[Age>=18,,]
N_profile<-rbind(N_profile,cbind("첫처방일기준 만 18세 이상",as.integer(N_profile[nrow(N_profile),3])-data.main[,.N,],data.main[,.N,],data.main[drug==0,.N,],data.main[drug==1,.N,]))
# LithiumToxicity----------------------------------------
df <- lithium$`renal function & TDM`[, NO := as.character(NO)][] %>%
merge(data.main[, .(NO, firstPrescriptionDay, lastPrescriptionDay)], by = "NO", all.x = T)
setnames(df,c("세부검사명","결과","시행일시"),c("test","result","testdate"))
data.main <- data.main %>%
merge(df[test=="Lithium" & as.numeric(result) > 1.0 & (testdate - firstPrescriptionDay >= 0) & (lastPrescriptionDay - testdate >= 0), .(LithiumToxicity1.0 = .N), by="NO"], by="NO", all.x = T) %>%
merge(df[test=="Lithium" & as.numeric(result) > 0.8 & (testdate - firstPrescriptionDay >= 0) & (lastPrescriptionDay - testdate >= 0), .(LithiumToxicity0.8 = .N), by="NO"], by="NO", all.x = T) %>%
merge(df[test=="Lithium" & as.numeric(result) > 1.2 & (testdate - firstPrescriptionDay >= 0) & (lastPrescriptionDay - testdate >= 0), .(LithiumToxicity1.2 = .N), by="NO"], by="NO", all.x = T) %>%
merge(df[test=="Lithium" & (testdate - firstPrescriptionDay >= 0), .(avgTDM_Lithium = mean(as.numeric(result), na.rm = T)), by="NO"], by="NO", all.x = T) %>%
merge(df[test=="Valproic Acid" & (testdate - firstPrescriptionDay >= 0), .(avgTDM_Valproate = mean(as.numeric(result), na.rm = T)), by="NO"], by="NO", all.x = T)
for (v in c("LithiumToxicity1.0", "LithiumToxicity0.8", "LithiumToxicity1.2")){
data.main[[v]] <- ifelse(is.na(data.main[[v]]), 0, data.main[[v]])
}
df<-lithium$`renal function & TDM`
df$NO <- as.character(df$NO)
df <- merge(df, lithium$`clinical data`[,.(NO,`성별`,`생년월일`),], by="NO", mult=all)
df$`결과`<- as.numeric(df$`결과`)
df$시행일시 <- as.Date(df$시행일시); df$생년월일 <- as.Date(df$생년월일)
setnames(df,c("세부검사명","시행일시","생년월일","결과","성별"),c("test", "testDate", "birthDate", "result", "sex"))
df[,age:=as.numeric(testDate-birthDate)/365.25]
df[,eGFR:=ifelse(test=="Creatinine",CKDEPI(result,age,sex),NA),by=seq_len(nrow(df))]
## data for figure 1----------------------------------------
data.f1 <- df[!is.na(eGFR),.(NO,testDate,eGFR)]
setnames(data.f1, "testDate", "date")
## Main data----------------------------------------
data.main <- merge(data.main, df[eGFR < 60, .(eGFRbelow60Date = min(testDate)), by = "NO"], all.x = TRUE) %>%
merge(df[test == "Creatinine", .(testNum = .N), by="NO"], by="NO", all.x=TRUE) %>%
.[!is.na(testNum) & (is.na(eGFRbelow60Date) | as.Date(firstPrescriptionDay) < as.Date(eGFRbelow60Date))]
data.main<-merge(data.main,data.f1[,.(lastTestDate=max(date)),by="NO"])
data.main<-data.main[testNum>=2 & (as.Date(lastTestDate)-as.Date(firstPrescriptionDay))/365.25>=0.5,,]
N_profile<-rbind(N_profile,cbind("최소 2개 이상의 eGFR data\n(baseline & 최소 6개월 이상의 post-baseline data)",as.integer(N_profile[nrow(N_profile),3])-data.main[,.N,],data.main[,.N,],data.main[drug==0,.N,],data.main[drug==1,.N,]))
data.main[, duration := ifelse(is.na(eGFRbelow60Date),as.Date(lastPrescriptionDay) - as.Date(firstPrescriptionDay), as.Date(eGFRbelow60Date) - as.Date(firstPrescriptionDay))]
# duration Full
data.main[, year_FU_full := as.numeric(as.Date(lastPrescriptionDay) - as.Date(firstPrescriptionDay))/365.25]
data.main[, eGFRbelow60 := factor(as.integer(!is.na(eGFRbelow60Date)))]
data.main[, `:=`(year_FU= duration/365.25, totYear_Lithium = totDay_Lithium/365.25, totYear_Valproate = totDay_Valproate/365.25)]
setnames(data.main, "성별", "Sex")
data.main[, Sex := factor(Sex)]
data.main <- data.main[, .SD, .SDcols = -c("생년월일", "firstPrescriptionDay", "lastPrescriptionDay", "duration", "totDay_Valproate", "totDay_Lithium","testNum")]
N_profile<-rbind(N_profile,cbind("첫처방일기준 만 18세 이상",as.integer(N_profile[nrow(N_profile),3])-data.main[,.N,],data.main[,.N,],data.main[drug==0,.N,],data.main[drug==1,.N,]))
## Figure 1 data----------------------------------------
# 처방 정보
df <- lithium$MEDI[, c("NO","처방일","처방명","일수","횟수")]
names(df) <- c("NO","date","drug","day","times")
df[, drug := factor(ifelse(drug == "Lithium carbonate 300mg", 1, 0))]
df <- unique(df)[, `:=`(NO = as.character(NO), date = as.Date(date))][]
df <- df[, .(maxday = max(day, na.rm = T)), by=c("NO","date","drug")]
# 일단 합친 후 cumsum
data.f1 <- rbindlist(list(data.f1, df),use.names = TRUE, fill=TRUE)[order(NO,date)][, maxday:=ifelse(is.na(maxday),0,maxday)][]
data.f1[, cumulativePrescriptionDay := cumsum(maxday),by=.(NO)]
data.f1 <- data.f1[!is.na(eGFR), !c("maxday","drug")]
data.f1 <- merge(data.f1, data.main[,.(NO,drug),], by="NO")
## eGFR linear regression ---------------------------------------
data.f1[,cumulativePrescriptionYear:=cumulativePrescriptionDay/365.25,]
data.main<-merge(data.main,data.f1[,.(eGFRchange=coef(lm(eGFR~cumulativePrescriptionYear))[2]),by=NO])
## Base eGFR, GFR change----------------------------------------
data.GFRchange <- data.f1[cumulativePrescriptionDay<365.25,.(year0GFR=mean(eGFR,na.rm=T)),by="NO"] %>%
merge(.,data.f1[, .(base_eGFR = ifelse(any(as.integer(cumulativePrescriptionDay) == 0), eGFR[last(which(cumulativePrescriptionDay == 0))], eGFR[1])), by ="NO"], by= "NO", all = T) %>% ## base eGFR
merge(.,data.f1[(365.25*3)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*4),.(year3GFR=mean(eGFR,na.rm=T)),by="NO"],all=T) %>%
merge(.,data.f1[(365.25*5)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*6),.(year5GFR=mean(eGFR,na.rm=T)),by="NO"],all=T) %>%
merge(.,data.f1[(365.25*7)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*8),.(year7GFR=mean(eGFR,na.rm=T)),by="NO"],all=T) %>%
merge(.,data.f1[(365.25*10)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*11),.(year10GFR=mean(eGFR,na.rm=T)),by="NO"],all=T) %>%
merge(.,data.f1[(365.25*12)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*13),.(year12GFR=mean(eGFR,na.rm=T)),by="NO"],all=T) %>%
merge(.,data.f1[(365.25*15)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*16),.(year15GFR=mean(eGFR,na.rm=T)),by="NO"],all=T) %>%
merge(.,data.f1[(365.25*20)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*21),.(year20GFR=mean(eGFR,na.rm=T)),by="NO"],all=T)
data.main<-merge(data.main,data.GFRchange,all=T)
data.main<-data.main[base_eGFR>=30,,]
N_profile<-rbind(N_profile,cbind("baseline eGFR<30",as.integer(N_profile[nrow(N_profile),3])-data.main[,.N,],data.main[,.N,],data.main[drug==0,.N,],data.main[drug==1,.N,]))
## ----------------------------------------
ICD_data<-merge(ICD_data,data.main[,.(NO,drug),],by="NO")
ICD_data<-ICD_data[,alldiagnosis:=Reduce(paste,.SD),.SDcols=grep("진단코드",colnames(ICD_data))][,c("NO","drug","alldiagnosis"),]
ICD_data<-ICD_data[!(alldiagnosis %like% "N0|N1" & !(alldiagnosis %like% "N09")),.SD,]
N_profile<-rbind(N_profile,cbind("ICD N00-N08 or N10-N19",as.integer(N_profile[nrow(N_profile),3])-ICD_data[,.N,],ICD_data[,.N,],ICD_data[drug==0,.N,],ICD_data[drug==1,.N,]))
ICD_data<-ICD_data[!(alldiagnosis %like% "T86.1"),.SD,]
N_profile<-rbind(N_profile,cbind("ICD T86.1",as.integer(N_profile[nrow(N_profile),3])-ICD_data[,.N,],ICD_data[,.N,],ICD_data[drug==0,.N,],ICD_data[drug==1,.N,]))
ICD_data<-ICD_data[!(alldiagnosis %like% "Z94.0"),.SD,]
N_profile<-rbind(N_profile,cbind("ICD Z94.0",as.integer(N_profile[nrow(N_profile),3])-ICD_data[,.N,],ICD_data[,.N,],ICD_data[drug==0,.N,],ICD_data[drug==1,.N,]))
data.main <- merge(data.main,ICD_data[,.(NO),],by="NO")
## F code --------------------------------------
W210226 <- readRDS("W210226.RDS")
setnames(W210226,c("개인정보동의여부","정렬순서","진단코드"),c("Privacy Consent","NO","dcode"))
W210226$NO<-W210226$NO %>% as.character()
W210226<-W210226[`Privacy Consent`=="Y",,]
W210226<-merge(W210226,data.main[,.(NO),],by="NO")
W210226[,schizo:=factor(as.integer((dcode %like% "F2"))),]
W210226[,mood:=factor(as.integer((dcode %like% "F3"))),]
W210226[,bipolar:=factor(as.integer(((dcode %like% "F30")|(dcode %like% "F31")))),]
W210226[,depressive:=factor(as.integer(((dcode %like% "F32")|(dcode %like% "F33")))),]
data.main<-merge(data.main,W210226[,c("NO","schizo","mood","bipolar","depressive"),],by="NO")
## 복용년수별 n수 ----------------------------------------
Year_N<-data.frame(Year=0:26,
Lithium_N=sapply(0:26,function(x) data.main[totYear_Lithium>x,.N,]),
Valproate_N=sapply(0:26,function(x) data.main[totYear_Valproate>x,.N,]))
## 해당 연차에 eGFR<60 된 n수 ----------------------------------------
data.f1<-merge(data.f1,data.main[,.(NO),],all.y=TRUE)
data.f1<-merge(data.f1,data.main[,.(NO,base_eGFR),],by="NO",all.x=TRUE)
data.f1<-data.f1[!(cumulativePrescriptionDay==0 & eGFR!=base_eGFR),,]
data.f1<-data.f1[,-c("base_eGFR"),]
#data.f1<-data.f1[eGFR>=30,,]
colnames(N_profile)<-c("조건","제외","N","Valproate","Lithium")
eGFRbelow60ratio<-
lapply(0:26,function(x){
NthYear<-unique(data.f1[(365.25*x)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*(x+1)),.(NthYeareGFR=mean(eGFR,na.rm=T),drug),by="NO"])
nth<-merge(NthYear[NthYeareGFR<60,.(below60=.N),by=drug],NthYear[,.N,by=drug],by="drug",all=TRUE)
if(NthYear[drug==1,.N,]==0){ nth<-rbind(nth,data.table(drug=1,below60="NA",N="NA")) }
if(NthYear[drug==0,.N,]==0){ nth<-rbind(nth,data.table(drug=0,below60="NA",N="NA")) }
nth[,yn:=paste(below60,N,sep = "/"),]
nth<-data.table::transpose(nth[,4,])
return(nth)}) %>% Reduce(rbind,.)
eGFRbelow60ratio<-rbind(
merge(data.main[base_eGFR<60,.(aa=.N),by=drug],data.main[,.(bb=.N),by=drug],by="drug",all=TRUE) %>%
.[,.(cc=paste(aa,bb,sep="/")),by="drug"] %>% .[,.(cc),] %>% transpose,
eGFRbelow60ratio)
eGFRbelow60ratio<-as.data.frame(eGFRbelow60ratio)
colnames(eGFRbelow60ratio)<-c("Valproate","Lithium")
rownames(eGFRbelow60ratio)<-c("baseline",unlist(lapply(0:26,function(x){paste0("Year ",x)})))
## eGFR<60 최초발생일 연차별 n수
findCumYear<-function(ID,eGFRbelow60Date){
return(data.f1[NO==ID & date==eGFRbelow60Date,cumulativePrescriptionYear])
}
dt<-unique(data.main[eGFRbelow60==1,.(yy=findCumYear(NO,eGFRbelow60Date),drug),by=c("NO","eGFRbelow60Date")])
eGFRbelow60Years<-
lapply(0:20,function(x){
nth<-dt[x<=yy & yy<x+1,.N,by=drug]
if(dt[x<=yy & yy<x+1 & drug==0,.N,]==0){ nth<-rbind(nth,data.table(drug=0, N=0)) }
if(dt[x<=yy & yy<x+1 & drug==1,.N,]==0){ nth<-rbind(nth,data.table(drug=1, N=0)) }
nth<-data.table::transpose(nth[order(drug)])[2]
}) %>% Reduce(rbind,.)
eGFRbelow60Years <- as.data.frame(eGFRbelow60Years)
colnames(eGFRbelow60Years)<-c("Valproate","Lithium")
rownames(eGFRbelow60Years)<-unlist(lapply(0:20,function(x){paste0("Year ",x)}))
eGFRbelow60Years$Valproate <- as.integer(eGFRbelow60Years$Valproate)
eGFRbelow60Years$Lithium <- as.integer(eGFRbelow60Years$Lithium)
eGFRbelow60Years<-rbind(eGFRbelow60Years,
data.frame(row.names="Sum",Valproate=sum(eGFRbelow60Years$Valproate),Lithium=sum(eGFRbelow60Years$Lithium)))
## ----------------------------------------
data.main <- data.main[, -c("NO", "lastTestDate", "eGFRbelow60Date")] ## NO 제외
label.main <- jstable::mk.lev(data.main)
label.main[variable == "eGFRbelow60", `:=`(var_label = "eGFR < 60", val_label = c("No", "Yes"))]
label.main[variable == "drug", `:=`(var_label = "Drug", val_label = c("Valproate", "Lithium"))]
label.main[variable == "DM", `:=`(var_label = "DM", val_label = c("No", "Yes"))]
label.main[variable == "HTN", `:=`(var_label = "HTN", val_label = c("No", "Yes"))]
label.main[variable == "LithiumToxicity1.0", `:=`(var_label = "Lithium > 1.0 횟수")]
label.main[variable == "LithiumToxicity1.2", `:=`(var_label = "Lithium > 1.2 횟수")]
label.main[variable == "LithiumToxicity0.8", `:=`(var_label = "Lithium > 0.8 횟수")]
label.main[variable == "avgDose_1day", `:=`(var_label = "Average 1day dose")]
label.main[variable == "totYear_Lithium", `:=`(var_label = "Cumulative Lithium year")]
label.main[variable == "totYear_Valproate", `:=`(var_label = "Cumulative Valproate year")]
label.main[variable == "qd_Lithium", `:=`(var_label = "Lithium QD proportion")]
label.main[variable == "qd_Valproate", `:=`(var_label = "Valproate QD proportion")]
label.main[variable == "year0GFR", `:=`(var_label = "복용 1년 이내 GFR")]
label.main[variable == "year3GFR", `:=`(var_label = "복용 3년차 GFR")]
label.main[variable == "year5GFR", `:=`(var_label = "복용 5년차 GFR")]
label.main[variable == "year7GFR", `:=`(var_label = "복용 7년차 GFR")]
label.main[variable == "year10GFR", `:=`(var_label = "복용 10년차 GFR")]
label.main[variable == "year12GFR", `:=`(var_label = "복용 12년차 GFR")]
label.main[variable == "year15GFR", `:=`(var_label = "복용 15년차 GFR")]
label.main[variable == "year20GFR", `:=`(var_label = "복용 20년차 GFR")]
## variable order : 미리 만들어놓은 KM, cox 모듈용
varlist_kmcox <- list(variable = c("eGFRbelow60", "year_FU", "drug", setdiff(names(data.main), c("eGFRbelow60", "year_FU", "drug" ))))
| /shiny/global.R | permissive | zarathucorp/lithium-smcpsy | R | false | false | 18,905 | r | library(data.table);library(magrittr);library(DT);library(jstable);library(dplyr);library(stats)
#setwd("~/ShinyApps/jihyunbaek/lithium")
lithium <- readRDS("lithium.RDS")
# CKD-EPI----------------------------------------
CKDEPI<-function(scr,age,sex){
if(sex=="F"){ k<-0.7; alpha<-(-0.329); const<-1.018 }
else{ k<-0.9; alpha<-(-0.411); const<-1 }
GFR <- 141 * min(scr/k,1)^alpha * max(scr/k,1)^(-1.209) * 0.993^age * const
return(GFR)
}
df <- lithium$MEDI[, c("NO","처방일","처방명","일수","횟수")]
names(df) <- c("NO","date","drug","day","times")
df[, drug := ifelse(drug == "Lithium carbonate 300mg", "Lithium", "Valproate")]
df <- unique(df)
df <- df[order(-day), .(maxday = max(day, na.rm = T), maxnotqd = day[which(times != 1)[1]]), by=c("NO","date","drug")]
df <- df[, .(maxday, qd = ifelse(is.na(maxnotqd), maxday , maxday - maxnotqd)), by=c("NO","date","drug")]
df <- df[, .(totDay = sum(maxday, na.rm = T), qd = sum(qd, na.rm = T)/sum(maxday, na.rm = T)),by = c("NO", "drug")]
df.long <- dcast(df, NO ~ drug, value.var = c("totDay", "qd"))
#left_join 위해서 NO의 class를 맞춰주기----------------------------------------
lithium$`clinical data`$NO <- lithium$`clinical data`$NO %>% as.numeric() %>% as.character()
df.long$NO <- df.long$NO %>% as.character()
lithium$`clinical data` <- merge(lithium$`clinical data`, df.long, by = "NO")
## Dx group
lithium$`clinical data`[, group_bipolar_schizoaffective_other := factor(ifelse(grepl("Bipolar|bipolar", lithium$`clinical data`$주상병명), "Bipolar disorder",
ifelse(grepl("Schizoaffective|schizoaffective", lithium$`clinical data`$주상병명), "Schizoaffective disorder", "vOthers")))]
# Data inclusion----------------------------------------
a <- lithium$`clinical data`[,
.(NO,성별,생년월일,totDay_Lithium,totDay_Valproate,qd_Lithium,qd_Valproate,
HTN = factor(as.integer(!is.na(`고혈압 여부`))), DM = factor(as.integer(!is.na(`당뇨 여부`))), group_bipolar_schizoaffective_other)]
N_profile<-cbind("전체",NA,nrow(a),NA,NA)
## NO. list: TDM both
NO.tdmboth <- lithium$`renal function & TDM`[, NO := as.character(NO)][`세부검사명` %in% c("Lithium", "Valproic Acid")][, c("NO", "세부검사명")][, unique(`세부검사명`), by = "NO"][, names(which(table(NO) == 2))]
a <- a[xor(is.na(totDay_Lithium),is.na(totDay_Valproate)),,][!(NO %in% c("2250", NO.tdmboth))]
N_profile<-rbind(N_profile,cbind("Li+Valp combination",as.integer(N_profile[nrow(N_profile),3])-nrow(a),nrow(a),NA,NA))
a[, drug := factor(ifelse(is.na(totDay_Lithium), 0, 1))]
ICD_data <- readRDS("ICD_data.RDS")
setnames(ICD_data,c("개인정보동의여부","정렬순서"),c("Privacy Consent","NO"))
ICD_data$NO<-ICD_data$NO %>% as.character()
ICD_data<-ICD_data[`Privacy Consent`=="Y",,]
a<-merge(a,ICD_data[,.(NO),],by="NO")
N_profile<-rbind(N_profile,cbind("개인정보사용미동의",as.integer(N_profile[nrow(N_profile),3])-nrow(a),nrow(a),a[drug==0,.N,],a[drug==1,.N,]))
a<-a[(totDay_Lithium>180 | totDay_Valproate>180),,]
N_profile<-rbind(N_profile,cbind("총처방일수 180일 초과",as.integer(N_profile[nrow(N_profile),3])-nrow(a),nrow(a),a[drug==0,.N,],a[drug==1,.N,]))
## Date age----------------------------------------
df <- lithium$MEDI[, NO := as.character(NO)][,.SD,]
setnames(df,c("처방일","함량단위투여량","일수"),c("date","dose","day"))
data.main <- a %>%
merge(df[,.(firstPrescriptionDay=min(date, na.rm = T)), by = "NO"], by = "NO",all.x = T) %>%
merge(df[,.(lastPrescriptionDay=max(date, na.rm = T)), by = "NO"], by = "NO",all.x = T) %>%
merge(df[, .(avgDose_1day = sum(dose * day)/sum(day)), by = "NO"], by = "NO",all.x = T)
data.main[, Age := floor(as.numeric(as.Date(firstPrescriptionDay) - as.Date(`생년월일`))/365.25)]
## NEW HTN/DM
HTN_DM <- apply(ICD_data, 1, function(x){
HTN <- substr(x, 1, 3) %in% c(paste0("I", 10:16), 401:405)
HTN_which <- which(HTN == T)[1]
HTN_yn <- as.integer(!is.na(HTN_which))
HTN_date <- ifelse(HTN_yn == 0, NA, x[HTN_which - 1])
DM <- (substr(x, 1, 3) %in% paste0("E", c("08", "09", 10:13))) | (substr(x, 1, 4) %in% paste0("250.", 0:9))
DM_which <- which(DM == T)[1]
DM_yn <- as.integer(!is.na(DM_which))
DM_date <- ifelse(DM_yn == 0, NA, x[DM_which - 1])
return(c(HTN2 = HTN_yn, HTN_date = HTN_date, DM2 = DM_yn, DM_date = DM_date))
}) %>% t %>% data.table
HTN_DM$NO <- ICD_data$NO
HTN_DM_info <- merge(data.main[, c("NO", "firstPrescriptionDay")], HTN_DM, by = "NO")
HTN_DM_info[, `:=`(HTN = factor(as.integer(HTN2 == 1 & as.Date(HTN_date) <= as.Date(firstPrescriptionDay))),
DM = factor(as.integer(DM2 == 1 & as.Date(DM_date) <= as.Date(firstPrescriptionDay))))]
## merge
data.main <- merge(data.main[, -c("HTN", "DM")], HTN_DM_info[, c("NO", "HTN", "DM")], by = "NO")
W210216 <- readRDS("W210216.RDS")
setnames(W210216,c("개인정보동의여부","정렬순서"),c("Privacy Consent","NO"))
W210216$NO<-W210216$NO %>% as.character()
W210216<-W210216[`Privacy Consent`=="Y",,]
W210216<-merge(W210216,data.main[,.(NO),],by="NO")
W210216<-W210216[,alldiagnosis:=Reduce(paste,.SD),.SDcols=grep("진단코드",colnames(W210216))][,c("NO","alldiagnosis"),]
W210216<-W210216[alldiagnosis %like% "F",.SD,]
data.main <- merge(data.main,W210216[,.(NO),],by="NO")
N_profile<-rbind(N_profile,cbind("F코드 포함",as.integer(N_profile[nrow(N_profile),3])-data.main[,.N,],data.main[,.N,],data.main[drug==0,.N,],data.main[drug==1,.N,]))
data.main <- data.main[Age>=18,,]
N_profile<-rbind(N_profile,cbind("첫처방일기준 만 18세 이상",as.integer(N_profile[nrow(N_profile),3])-data.main[,.N,],data.main[,.N,],data.main[drug==0,.N,],data.main[drug==1,.N,]))
# LithiumToxicity----------------------------------------
df <- lithium$`renal function & TDM`[, NO := as.character(NO)][] %>%
merge(data.main[, .(NO, firstPrescriptionDay, lastPrescriptionDay)], by = "NO", all.x = T)
setnames(df,c("세부검사명","결과","시행일시"),c("test","result","testdate"))
data.main <- data.main %>%
merge(df[test=="Lithium" & as.numeric(result) > 1.0 & (testdate - firstPrescriptionDay >= 0) & (lastPrescriptionDay - testdate >= 0), .(LithiumToxicity1.0 = .N), by="NO"], by="NO", all.x = T) %>%
merge(df[test=="Lithium" & as.numeric(result) > 0.8 & (testdate - firstPrescriptionDay >= 0) & (lastPrescriptionDay - testdate >= 0), .(LithiumToxicity0.8 = .N), by="NO"], by="NO", all.x = T) %>%
merge(df[test=="Lithium" & as.numeric(result) > 1.2 & (testdate - firstPrescriptionDay >= 0) & (lastPrescriptionDay - testdate >= 0), .(LithiumToxicity1.2 = .N), by="NO"], by="NO", all.x = T) %>%
merge(df[test=="Lithium" & (testdate - firstPrescriptionDay >= 0), .(avgTDM_Lithium = mean(as.numeric(result), na.rm = T)), by="NO"], by="NO", all.x = T) %>%
merge(df[test=="Valproic Acid" & (testdate - firstPrescriptionDay >= 0), .(avgTDM_Valproate = mean(as.numeric(result), na.rm = T)), by="NO"], by="NO", all.x = T)
for (v in c("LithiumToxicity1.0", "LithiumToxicity0.8", "LithiumToxicity1.2")){
data.main[[v]] <- ifelse(is.na(data.main[[v]]), 0, data.main[[v]])
}
df<-lithium$`renal function & TDM`
df$NO <- as.character(df$NO)
df <- merge(df, lithium$`clinical data`[,.(NO,`성별`,`생년월일`),], by="NO", mult=all)
df$`결과`<- as.numeric(df$`결과`)
df$시행일시 <- as.Date(df$시행일시); df$생년월일 <- as.Date(df$생년월일)
setnames(df,c("세부검사명","시행일시","생년월일","결과","성별"),c("test", "testDate", "birthDate", "result", "sex"))
df[,age:=as.numeric(testDate-birthDate)/365.25]
df[,eGFR:=ifelse(test=="Creatinine",CKDEPI(result,age,sex),NA),by=seq_len(nrow(df))]
## data for figure 1----------------------------------------
data.f1 <- df[!is.na(eGFR),.(NO,testDate,eGFR)]
setnames(data.f1, "testDate", "date")
## Main data----------------------------------------
data.main <- merge(data.main, df[eGFR < 60, .(eGFRbelow60Date = min(testDate)), by = "NO"], all.x = TRUE) %>%
merge(df[test == "Creatinine", .(testNum = .N), by="NO"], by="NO", all.x=TRUE) %>%
.[!is.na(testNum) & (is.na(eGFRbelow60Date) | as.Date(firstPrescriptionDay) < as.Date(eGFRbelow60Date))]
data.main<-merge(data.main,data.f1[,.(lastTestDate=max(date)),by="NO"])
data.main<-data.main[testNum>=2 & (as.Date(lastTestDate)-as.Date(firstPrescriptionDay))/365.25>=0.5,,]
N_profile<-rbind(N_profile,cbind("최소 2개 이상의 eGFR data\n(baseline & 최소 6개월 이상의 post-baseline data)",as.integer(N_profile[nrow(N_profile),3])-data.main[,.N,],data.main[,.N,],data.main[drug==0,.N,],data.main[drug==1,.N,]))
data.main[, duration := ifelse(is.na(eGFRbelow60Date),as.Date(lastPrescriptionDay) - as.Date(firstPrescriptionDay), as.Date(eGFRbelow60Date) - as.Date(firstPrescriptionDay))]
# duration Full
data.main[, year_FU_full := as.numeric(as.Date(lastPrescriptionDay) - as.Date(firstPrescriptionDay))/365.25]
data.main[, eGFRbelow60 := factor(as.integer(!is.na(eGFRbelow60Date)))]
data.main[, `:=`(year_FU= duration/365.25, totYear_Lithium = totDay_Lithium/365.25, totYear_Valproate = totDay_Valproate/365.25)]
setnames(data.main, "성별", "Sex")
data.main[, Sex := factor(Sex)]
data.main <- data.main[, .SD, .SDcols = -c("생년월일", "firstPrescriptionDay", "lastPrescriptionDay", "duration", "totDay_Valproate", "totDay_Lithium","testNum")]
N_profile<-rbind(N_profile,cbind("첫처방일기준 만 18세 이상",as.integer(N_profile[nrow(N_profile),3])-data.main[,.N,],data.main[,.N,],data.main[drug==0,.N,],data.main[drug==1,.N,]))
## Figure 1 data----------------------------------------
# 처방 정보
df <- lithium$MEDI[, c("NO","처방일","처방명","일수","횟수")]
names(df) <- c("NO","date","drug","day","times")
df[, drug := factor(ifelse(drug == "Lithium carbonate 300mg", 1, 0))]
df <- unique(df)[, `:=`(NO = as.character(NO), date = as.Date(date))][]
df <- df[, .(maxday = max(day, na.rm = T)), by=c("NO","date","drug")]
# 일단 합친 후 cumsum
data.f1 <- rbindlist(list(data.f1, df),use.names = TRUE, fill=TRUE)[order(NO,date)][, maxday:=ifelse(is.na(maxday),0,maxday)][]
data.f1[, cumulativePrescriptionDay := cumsum(maxday),by=.(NO)]
data.f1 <- data.f1[!is.na(eGFR), !c("maxday","drug")]
data.f1 <- merge(data.f1, data.main[,.(NO,drug),], by="NO")
## eGFR linear regression ---------------------------------------
data.f1[,cumulativePrescriptionYear:=cumulativePrescriptionDay/365.25,]
data.main<-merge(data.main,data.f1[,.(eGFRchange=coef(lm(eGFR~cumulativePrescriptionYear))[2]),by=NO])
## Base eGFR, GFR change----------------------------------------
data.GFRchange <- data.f1[cumulativePrescriptionDay<365.25,.(year0GFR=mean(eGFR,na.rm=T)),by="NO"] %>%
merge(.,data.f1[, .(base_eGFR = ifelse(any(as.integer(cumulativePrescriptionDay) == 0), eGFR[last(which(cumulativePrescriptionDay == 0))], eGFR[1])), by ="NO"], by= "NO", all = T) %>% ## base eGFR
merge(.,data.f1[(365.25*3)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*4),.(year3GFR=mean(eGFR,na.rm=T)),by="NO"],all=T) %>%
merge(.,data.f1[(365.25*5)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*6),.(year5GFR=mean(eGFR,na.rm=T)),by="NO"],all=T) %>%
merge(.,data.f1[(365.25*7)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*8),.(year7GFR=mean(eGFR,na.rm=T)),by="NO"],all=T) %>%
merge(.,data.f1[(365.25*10)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*11),.(year10GFR=mean(eGFR,na.rm=T)),by="NO"],all=T) %>%
merge(.,data.f1[(365.25*12)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*13),.(year12GFR=mean(eGFR,na.rm=T)),by="NO"],all=T) %>%
merge(.,data.f1[(365.25*15)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*16),.(year15GFR=mean(eGFR,na.rm=T)),by="NO"],all=T) %>%
merge(.,data.f1[(365.25*20)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*21),.(year20GFR=mean(eGFR,na.rm=T)),by="NO"],all=T)
data.main<-merge(data.main,data.GFRchange,all=T)
data.main<-data.main[base_eGFR>=30,,]
N_profile<-rbind(N_profile,cbind("baseline eGFR<30",as.integer(N_profile[nrow(N_profile),3])-data.main[,.N,],data.main[,.N,],data.main[drug==0,.N,],data.main[drug==1,.N,]))
## ----------------------------------------
ICD_data<-merge(ICD_data,data.main[,.(NO,drug),],by="NO")
ICD_data<-ICD_data[,alldiagnosis:=Reduce(paste,.SD),.SDcols=grep("진단코드",colnames(ICD_data))][,c("NO","drug","alldiagnosis"),]
ICD_data<-ICD_data[!(alldiagnosis %like% "N0|N1" & !(alldiagnosis %like% "N09")),.SD,]
N_profile<-rbind(N_profile,cbind("ICD N00-N08 or N10-N19",as.integer(N_profile[nrow(N_profile),3])-ICD_data[,.N,],ICD_data[,.N,],ICD_data[drug==0,.N,],ICD_data[drug==1,.N,]))
ICD_data<-ICD_data[!(alldiagnosis %like% "T86.1"),.SD,]
N_profile<-rbind(N_profile,cbind("ICD T86.1",as.integer(N_profile[nrow(N_profile),3])-ICD_data[,.N,],ICD_data[,.N,],ICD_data[drug==0,.N,],ICD_data[drug==1,.N,]))
ICD_data<-ICD_data[!(alldiagnosis %like% "Z94.0"),.SD,]
N_profile<-rbind(N_profile,cbind("ICD Z94.0",as.integer(N_profile[nrow(N_profile),3])-ICD_data[,.N,],ICD_data[,.N,],ICD_data[drug==0,.N,],ICD_data[drug==1,.N,]))
data.main <- merge(data.main,ICD_data[,.(NO),],by="NO")
## F code --------------------------------------
W210226 <- readRDS("W210226.RDS")
setnames(W210226,c("개인정보동의여부","정렬순서","진단코드"),c("Privacy Consent","NO","dcode"))
W210226$NO<-W210226$NO %>% as.character()
W210226<-W210226[`Privacy Consent`=="Y",,]
W210226<-merge(W210226,data.main[,.(NO),],by="NO")
W210226[,schizo:=factor(as.integer((dcode %like% "F2"))),]
W210226[,mood:=factor(as.integer((dcode %like% "F3"))),]
W210226[,bipolar:=factor(as.integer(((dcode %like% "F30")|(dcode %like% "F31")))),]
W210226[,depressive:=factor(as.integer(((dcode %like% "F32")|(dcode %like% "F33")))),]
data.main<-merge(data.main,W210226[,c("NO","schizo","mood","bipolar","depressive"),],by="NO")
## 복용년수별 n수 ----------------------------------------
Year_N<-data.frame(Year=0:26,
Lithium_N=sapply(0:26,function(x) data.main[totYear_Lithium>x,.N,]),
Valproate_N=sapply(0:26,function(x) data.main[totYear_Valproate>x,.N,]))
## 해당 연차에 eGFR<60 된 n수 ----------------------------------------
data.f1<-merge(data.f1,data.main[,.(NO),],all.y=TRUE)
data.f1<-merge(data.f1,data.main[,.(NO,base_eGFR),],by="NO",all.x=TRUE)
data.f1<-data.f1[!(cumulativePrescriptionDay==0 & eGFR!=base_eGFR),,]
data.f1<-data.f1[,-c("base_eGFR"),]
#data.f1<-data.f1[eGFR>=30,,]
colnames(N_profile)<-c("조건","제외","N","Valproate","Lithium")
eGFRbelow60ratio<-
lapply(0:26,function(x){
NthYear<-unique(data.f1[(365.25*x)<cumulativePrescriptionDay & cumulativePrescriptionDay<(365.25*(x+1)),.(NthYeareGFR=mean(eGFR,na.rm=T),drug),by="NO"])
nth<-merge(NthYear[NthYeareGFR<60,.(below60=.N),by=drug],NthYear[,.N,by=drug],by="drug",all=TRUE)
if(NthYear[drug==1,.N,]==0){ nth<-rbind(nth,data.table(drug=1,below60="NA",N="NA")) }
if(NthYear[drug==0,.N,]==0){ nth<-rbind(nth,data.table(drug=0,below60="NA",N="NA")) }
nth[,yn:=paste(below60,N,sep = "/"),]
nth<-data.table::transpose(nth[,4,])
return(nth)}) %>% Reduce(rbind,.)
eGFRbelow60ratio<-rbind(
merge(data.main[base_eGFR<60,.(aa=.N),by=drug],data.main[,.(bb=.N),by=drug],by="drug",all=TRUE) %>%
.[,.(cc=paste(aa,bb,sep="/")),by="drug"] %>% .[,.(cc),] %>% transpose,
eGFRbelow60ratio)
eGFRbelow60ratio<-as.data.frame(eGFRbelow60ratio)
colnames(eGFRbelow60ratio)<-c("Valproate","Lithium")
rownames(eGFRbelow60ratio)<-c("baseline",unlist(lapply(0:26,function(x){paste0("Year ",x)})))
## eGFR<60 최초발생일 연차별 n수
findCumYear<-function(ID,eGFRbelow60Date){
return(data.f1[NO==ID & date==eGFRbelow60Date,cumulativePrescriptionYear])
}
dt<-unique(data.main[eGFRbelow60==1,.(yy=findCumYear(NO,eGFRbelow60Date),drug),by=c("NO","eGFRbelow60Date")])
eGFRbelow60Years<-
lapply(0:20,function(x){
nth<-dt[x<=yy & yy<x+1,.N,by=drug]
if(dt[x<=yy & yy<x+1 & drug==0,.N,]==0){ nth<-rbind(nth,data.table(drug=0, N=0)) }
if(dt[x<=yy & yy<x+1 & drug==1,.N,]==0){ nth<-rbind(nth,data.table(drug=1, N=0)) }
nth<-data.table::transpose(nth[order(drug)])[2]
}) %>% Reduce(rbind,.)
eGFRbelow60Years <- as.data.frame(eGFRbelow60Years)
colnames(eGFRbelow60Years)<-c("Valproate","Lithium")
rownames(eGFRbelow60Years)<-unlist(lapply(0:20,function(x){paste0("Year ",x)}))
eGFRbelow60Years$Valproate <- as.integer(eGFRbelow60Years$Valproate)
eGFRbelow60Years$Lithium <- as.integer(eGFRbelow60Years$Lithium)
eGFRbelow60Years<-rbind(eGFRbelow60Years,
data.frame(row.names="Sum",Valproate=sum(eGFRbelow60Years$Valproate),Lithium=sum(eGFRbelow60Years$Lithium)))
## ----------------------------------------
data.main <- data.main[, -c("NO", "lastTestDate", "eGFRbelow60Date")] ## NO 제외
label.main <- jstable::mk.lev(data.main)
label.main[variable == "eGFRbelow60", `:=`(var_label = "eGFR < 60", val_label = c("No", "Yes"))]
label.main[variable == "drug", `:=`(var_label = "Drug", val_label = c("Valproate", "Lithium"))]
label.main[variable == "DM", `:=`(var_label = "DM", val_label = c("No", "Yes"))]
label.main[variable == "HTN", `:=`(var_label = "HTN", val_label = c("No", "Yes"))]
label.main[variable == "LithiumToxicity1.0", `:=`(var_label = "Lithium > 1.0 횟수")]
label.main[variable == "LithiumToxicity1.2", `:=`(var_label = "Lithium > 1.2 횟수")]
label.main[variable == "LithiumToxicity0.8", `:=`(var_label = "Lithium > 0.8 횟수")]
label.main[variable == "avgDose_1day", `:=`(var_label = "Average 1day dose")]
label.main[variable == "totYear_Lithium", `:=`(var_label = "Cumulative Lithium year")]
label.main[variable == "totYear_Valproate", `:=`(var_label = "Cumulative Valproate year")]
label.main[variable == "qd_Lithium", `:=`(var_label = "Lithium QD proportion")]
label.main[variable == "qd_Valproate", `:=`(var_label = "Valproate QD proportion")]
label.main[variable == "year0GFR", `:=`(var_label = "복용 1년 이내 GFR")]
label.main[variable == "year3GFR", `:=`(var_label = "복용 3년차 GFR")]
label.main[variable == "year5GFR", `:=`(var_label = "복용 5년차 GFR")]
label.main[variable == "year7GFR", `:=`(var_label = "복용 7년차 GFR")]
label.main[variable == "year10GFR", `:=`(var_label = "복용 10년차 GFR")]
label.main[variable == "year12GFR", `:=`(var_label = "복용 12년차 GFR")]
label.main[variable == "year15GFR", `:=`(var_label = "복용 15년차 GFR")]
label.main[variable == "year20GFR", `:=`(var_label = "복용 20년차 GFR")]
## variable order : 미리 만들어놓은 KM, cox 모듈용
varlist_kmcox <- list(variable = c("eGFRbelow60", "year_FU", "drug", setdiff(names(data.main), c("eGFRbelow60", "year_FU", "drug" ))))
|
library(tidyverse)
library(glmnet)
library(parallel)
# Prepare the data --------------------------------------------------------
data <- read_rds("data/phenotype/yield_blue_env.rds") %>%
filter(!str_detect(Site, "2017$"))
# Response
y <- data$BLUE
# Sites for LOO evaluation
sites <- data$Site
site_levels <- unique(sites)
# Predictors
X <- data[, -c(1:3)] %>% as.matrix()
# LASSO with leave-one-site-out CV ----------------------------------------
cl <- makeCluster(length(site_levels))
clusterEvalQ(cl, library(glmnet))
clusterExport(cl, list("sites", "y", "X"))
res <- parLapply(cl, site_levels, function(s) {
# Partition into training and testing sets
idx <- which(sites == s)
test_x <- X[idx, ]
test_y <- y[idx]
train_x <- X[-idx, ]
train_y <- y[-idx]
# Train the LASSO model
lasso <- cv.glmnet(train_x, train_y, alpha = 1, nfolds = 20)
# Compute MSE on the left out site
pred <- predict(lasso, newx = test_x, type = "response", s = lasso$lambda.1se)
mse <- mean((test_y - drop(pred))^2)
return(list(lasso = lasso, mse = mse))
})
stopCluster(cl)
write_rds(res, "data/weather/lasso_select.rds")
# LASSO with all data -----------------------------------------------------
lasso <- cv.glmnet(X, y, alpha = 1, nfolds = 20)
write_rds(lasso, "data/weather/lasso_all.rds")
# Analysis ----------------------------------------------------------------
# Number of non-zero coefficients selected by cross-validation
nzero <- tibble(Site = site_levels,
NZero = map_int(res, function(r) {
idx <- which(r$lasso$lambda == r$lasso$lambda.1se)
r$lasso$nzero[idx]
}))
ggplot(nzero, aes(x = Site, y = NZero)) + theme_classic() +
geom_point(size = 3) + theme(axis.text.x = element_text(hjust = 1, angle = 45)) +
geom_hline(yintercept = lasso$nzero[which(lasso$lambda == lasso$lambda.1se)],
linetype = 2, colour = "red") +
labs(x = "", y = "# Non-Zero Coefficients")
ggsave("figures/select/lasso_nzero.pdf", width = 10, height = 6, units = "in", dpi = 300)
mean(nzero$NZero); var(nzero$NZero)
# Variables selected by leave-one-out models
variables <- lapply(res, function(r) {
idx <- max(which(r$lasso$nzero <= 5))
r$lasso$glmnet.fit$beta@Dimnames[[1]][which(r$lasso$glmnet.fit$beta[, idx] != 0)]
})
names(variables) <- site_levels
length(unique(unlist(variables, use.names = FALSE)))
top5 <- names(sort(table(unlist(variables, use.names = FALSE)), decreasing = TRUE)[1:5])
r2adj_top5 <- lm(y ~ X[, top5]) %>% broom::glance() %>% pull(adj.r.squared)
# Quality of the models
r2adj <- sapply(variables, function(v) {
lm(y ~ X[, v]) %>% broom::glance() %>% pull(adj.r.squared)
})
r2adj_all <- lm(y ~ X[, lasso$glmnet.fit$beta@Dimnames[[1]][which(lasso$glmnet.fit$beta[, max(which(lasso$nzero <= 5))] != 0)]]) %>%
broom::glance() %>% pull(adj.r.squared)
tibble(R2 = r2adj) %>%
ggplot(., aes(x = R2)) + theme_classic() +
geom_histogram(binwidth = 0.01, fill = "skyblue", colour = "black") +
geom_vline(xintercept = round(r2adj_all, 2), linetype = 2, colour = "orange") +
labs(x = expression(R[adj]^2), y = "Count")
ggsave("figures/select/lasso_r2.pdf", width = 6, height = 4, units = "in", dpi = 300)
sum(r2adj >= r2adj_all)/length(site_levels)
best_r2 <- which.max(r2adj)
site_levels[best_r2]
res[[best_r2]]$lasso$glmnet.fit$beta@Dimnames[[1]][which(res[[best_r2]]$lasso$glmnet.fit$beta[, max(which(res[[best_r2]]$lasso$nzero <= 5))] != 0)]
best_mse <- which.min(sapply(res, function(x) x$mse))
site_levels[best_mse]
res[[best_mse]]$lasso$glmnet.fit$beta@Dimnames[[1]][which(res[[best_mse]]$lasso$glmnet.fit$beta[, max(which(res[[best_mse]]$lasso$nzero <= 5))] != 0)]
| /03.variable_selection/old/04b.lasso.R | no_license | amkusmec/genomes2fields | R | false | false | 3,721 | r | library(tidyverse)
library(glmnet)
library(parallel)
# Prepare the data --------------------------------------------------------
data <- read_rds("data/phenotype/yield_blue_env.rds") %>%
filter(!str_detect(Site, "2017$"))
# Response
y <- data$BLUE
# Sites for LOO evaluation
sites <- data$Site
site_levels <- unique(sites)
# Predictors
X <- data[, -c(1:3)] %>% as.matrix()
# LASSO with leave-one-site-out CV ----------------------------------------
cl <- makeCluster(length(site_levels))
clusterEvalQ(cl, library(glmnet))
clusterExport(cl, list("sites", "y", "X"))
res <- parLapply(cl, site_levels, function(s) {
# Partition into training and testing sets
idx <- which(sites == s)
test_x <- X[idx, ]
test_y <- y[idx]
train_x <- X[-idx, ]
train_y <- y[-idx]
# Train the LASSO model
lasso <- cv.glmnet(train_x, train_y, alpha = 1, nfolds = 20)
# Compute MSE on the left out site
pred <- predict(lasso, newx = test_x, type = "response", s = lasso$lambda.1se)
mse <- mean((test_y - drop(pred))^2)
return(list(lasso = lasso, mse = mse))
})
stopCluster(cl)
write_rds(res, "data/weather/lasso_select.rds")
# LASSO with all data -----------------------------------------------------
lasso <- cv.glmnet(X, y, alpha = 1, nfolds = 20)
write_rds(lasso, "data/weather/lasso_all.rds")
# Analysis ----------------------------------------------------------------
# Number of non-zero coefficients selected by cross-validation
nzero <- tibble(Site = site_levels,
NZero = map_int(res, function(r) {
idx <- which(r$lasso$lambda == r$lasso$lambda.1se)
r$lasso$nzero[idx]
}))
ggplot(nzero, aes(x = Site, y = NZero)) + theme_classic() +
geom_point(size = 3) + theme(axis.text.x = element_text(hjust = 1, angle = 45)) +
geom_hline(yintercept = lasso$nzero[which(lasso$lambda == lasso$lambda.1se)],
linetype = 2, colour = "red") +
labs(x = "", y = "# Non-Zero Coefficients")
ggsave("figures/select/lasso_nzero.pdf", width = 10, height = 6, units = "in", dpi = 300)
mean(nzero$NZero); var(nzero$NZero)
# Variables selected by leave-one-out models
variables <- lapply(res, function(r) {
idx <- max(which(r$lasso$nzero <= 5))
r$lasso$glmnet.fit$beta@Dimnames[[1]][which(r$lasso$glmnet.fit$beta[, idx] != 0)]
})
names(variables) <- site_levels
length(unique(unlist(variables, use.names = FALSE)))
top5 <- names(sort(table(unlist(variables, use.names = FALSE)), decreasing = TRUE)[1:5])
r2adj_top5 <- lm(y ~ X[, top5]) %>% broom::glance() %>% pull(adj.r.squared)
# Quality of the models
r2adj <- sapply(variables, function(v) {
lm(y ~ X[, v]) %>% broom::glance() %>% pull(adj.r.squared)
})
r2adj_all <- lm(y ~ X[, lasso$glmnet.fit$beta@Dimnames[[1]][which(lasso$glmnet.fit$beta[, max(which(lasso$nzero <= 5))] != 0)]]) %>%
broom::glance() %>% pull(adj.r.squared)
tibble(R2 = r2adj) %>%
ggplot(., aes(x = R2)) + theme_classic() +
geom_histogram(binwidth = 0.01, fill = "skyblue", colour = "black") +
geom_vline(xintercept = round(r2adj_all, 2), linetype = 2, colour = "orange") +
labs(x = expression(R[adj]^2), y = "Count")
ggsave("figures/select/lasso_r2.pdf", width = 6, height = 4, units = "in", dpi = 300)
sum(r2adj >= r2adj_all)/length(site_levels)
best_r2 <- which.max(r2adj)
site_levels[best_r2]
res[[best_r2]]$lasso$glmnet.fit$beta@Dimnames[[1]][which(res[[best_r2]]$lasso$glmnet.fit$beta[, max(which(res[[best_r2]]$lasso$nzero <= 5))] != 0)]
best_mse <- which.min(sapply(res, function(x) x$mse))
site_levels[best_mse]
res[[best_mse]]$lasso$glmnet.fit$beta@Dimnames[[1]][which(res[[best_mse]]$lasso$glmnet.fit$beta[, max(which(res[[best_mse]]$lasso$nzero <= 5))] != 0)]
|
# Stage1Functions_.R
# Helper functions for Stage 1: estimating the community-level outcomes & getting covariate data
#
# Laura B. Balzer, PhD MPhil
# lbalzer@umass.edu
# Lead Statistician for SEARCH
get.subgroup <- function(data, subgroup, time=0){
subgroup <- as.character(subgroup)
this.subgroup <- rep(F, nrow(data))
youth <- get.age.grp(data, time)
if(is.null(subgroup) ){
subgroup <- 'All'
}
if(subgroup=='All'){
# if no subgroups of interest
this.subgroup[1:nrow(data) ] <- T
} else if (subgroup=='EU'){
this.subgroup[ which(data$region_name=='Eastern Uganda') ] <- T
} else if (subgroup=='SWU'){
this.subgroup[ which(data$region_name=='Western Uganda') ] <- T
} else if (subgroup=='Kenya'){
this.subgroup[ which(data$region_name=='Kenya') ] <- T
# SEX
} else if(subgroup=='Male'){
this.subgroup[ which(data$sex_0) ] <- T
} else if(subgroup=='Female'){
this.subgroup[ which(!data$sex_0) ] <- T
# AGE
} else if(subgroup=='Young'){
this.subgroup[ youth ] <- T
} else if(subgroup=='Old'){
this.subgroup[ !youth] <- T
# MOBILITY AND VMC
} else if(subgroup=='NonMobile'){
this.subgroup[ which(data$moAway_0 < 1) ] <- T
} else if(subgroup=='UncircMen'){
this.subgroup[ which(data$non_circum_0) ] <- T
}
print(c(time, subgroup))
this.subgroup
}
get.age.grp<- function(data, time=0){
if(time==0){
youth <- data$age_0 < 25
} else if(time==1){
youth <- data$age_0 < 24
} else if(time==2){
youth <- data$age_0 < 23
} else{
youth <- data$age_0 < 22
}
youth
}
# get relevant covariates for predicting Delta & Censoring
get.X <- function(data, analysis='HIV', time=3, adj.full=T){
n <- nrow(data)
# age # reference age group <20
age.20.29 <- age.30.39 <- age.40.49 <- age.50.59 <- age.60.plus <- rep(0, n)
age.20.29[ which(data$age_0>19 & data$age_0<30) ] <- 1
age.30.39[ which(data$age_0>29 & data$age_0<40) ] <- 1
age.40.49[ which(data$age_0>39 & data$age_0<50) ] <- 1
age.50.59 [which(data$age_0>49 & data$age_0<60) ] <- 1
age.60.plus[which(data$age_0>59) ] <- 1
age.matrix <- data.frame(cbind(age.20.29, age.30.39, age.40.49, age.50.59, age.60.plus))
# reference is missing
single <- married <- widowed <- divorced.separated <- rep(0, n)
single[ which(data$marital_0==1)] <- 1
married[ which(data$marital_0==2) ] <-1
widowed[ which(data$marital_0 ==3)] <-1
divorced.separated[ which(data$marital_0==4 | data$marital_0==5)] <-1
marital <- data.frame(single, married, widowed, divorced.separated)
# education: reference is less than primary or missing
primary <- as.numeric(data$edu_primary_0)
secondary.plus <- as.numeric(data$edu_secondary_plus_0)
education <- data.frame(primary, secondary.plus)
# occupation: reference NA
formal.hi <- as.numeric(data$formal_hi_occup_0)
informal.hi <- as.numeric(data$informal_hi_occup_0)
informal.lo <- as.numeric(data$informal_low_occup_0)
jobless <- as.numeric(data$jobless_0)
student <- as.numeric(data$student_0)
fisherman <- as.numeric(data$fisherman_0)
occupation<- data.frame(formal.hi, informal.hi, informal.lo, jobless, student, fisherman)
# alcohol use: ref is NA
alcohol.yes <- alcohol.no <- rep(0, n)
alcohol.yes[which(data$alcohol_0) ] <- 1
alcohol.no[which(!data$alcohol_0) ] <- 1
# reference wealth is NA missing
wealth0 <- wealth1<- wealth2 <- wealth3 <- wealth4 <- rep(0, n)
wealth0[ which(data$wealth_0==0)] <- 1
wealth1[ which(data$wealth_0==1)] <- 1
wealth2[ which(data$wealth_0==2)] <- 1
wealth3[ which(data$wealth_0==3)] <- 1
wealth4[ which(data$wealth_0==4)] <- 1
wealth <- data.frame(cbind(wealth0, wealth1, wealth2, wealth3, wealth4))
#mobility indicators
mobile <- as.numeric(data$mobile_0)
# shifted main residence
shift.no <- shift.yes <- rep(0,n)
shift.no[which(!data$shifted_0)] <-1
shift.yes[which(data$shifted_0)] <-1
# nights home
nights <- as.numeric(as.character(data$nightsHome_0))
nights0 <- nights1.2 <- nights3.4 <- nights5 <- rep(0,n)
nights0[which(nights==0)] <-1
nights1.2[which(nights==1 | nights==2)] <-1
nights3.4[which(nights==3 | nights==4)] <-1
nights5[which(nights==5)] <- 1
mobility <- data.frame(mobile, shift.no, shift.yes, nights0, nights1.2, nights3.4, nights5)
# health-seeking
chc.BL <- as.numeric(data$chc_0)
self.hivtest.yes <- self.hivtest.no <- rep(0,n)
self.hivtest.yes[which(data$self_hivtest_0)]<-1
self.hivtest.no[which(!data$self_hivtest_0)] <-1
health<- data.frame(chc.BL, self.hivtest.yes, self.hivtest.no)
male <- rep(0,n)
male[which(data$sex_0)] <- 1
X <- cbind(
age.matrix, marital,
education, occupation,
alcohol.yes, alcohol.no, wealth, mobility, male)
if(analysis=='HIV'){
X<- cbind(X, health)
} else if(analysis=='NCD'){
# reference is underweight or NA
#set NA if <15 or >40
bmi <- data$bmi_0
bmi[which(bmi<15)] <- NA
bmi[which(bmi>40)] <- NA
bmi.norm <- bmi.over <- bmi.obese <- rep(0,n)
bmi.norm[ which(bmi >=18 & bmi <25) ] <-1
bmi.over[ which(bmi >=25 & bmi <30) ] <-1
bmi.obese[ which(bmi >= 30) ] <-1
X<- cbind(X, bmi.norm, bmi.over, bmi.obese )
X<- subset(X, select=- c( age.20.29,age.30.39, alcohol.no) )
if(time>0){
# adjust for baseline CHC attendance
X<- cbind(X, chc.BL)
}
} else if(analysis=='Cascade' & !adj.full){
X <- data.frame(cbind(mobile, male))
}
X
}
# get.var - function to get inference via the delta method
# assumes inputed estimators are asymptotically linear
# i.e. written in first order as an empircal mean of an influence curve (IC)
# input: point estimates (mu1, mu0), corresponding influence curves (IC1, IC0)
# significance level
# output: point estimate, var, wald-type CI
get.var.bayes <- function(mu1, mu0=NULL, IC1, IC0=NULL, alpha=0.05){
mu1<- unlist(mu1)
if(is.null(mu0)){
# if single TMLE
psi<- mu1
IC<- IC1
log= F
} else {
# if ratio of TMLEs (i.e. target = psi/psi0)
mu0<- unlist(mu0)
# get inference via the delta method on log scale
psi<- log(mu1/mu0)
IC <- 1/mu1*(IC1) - 1/mu0*IC0
log=T
}
# variance of asy lin est is var(IC)/n
var<- var(IC)/length(IC)
# testing and CI
cutoff <- qnorm(alpha/2, lower.tail=F)
se<- sqrt(var)
CI.lo <- psi - cutoff*se
CI.hi <- psi + cutoff*se
if(log){
est<- data.frame(pt=exp(psi), CI.lo=exp(CI.lo), CI.hi=exp(CI.hi) )
}else{
est<- data.frame(pt=psi, CI.lo=CI.lo, CI.hi=CI.hi)
}
list(est=est, IC=IC)
}
#===================================================#===================================================
# SCREENING ALGORITHMS FOR SUPERLEARNER
# See SuperLearner help file for more info: ?SuperLearner
#===================================================#===================================================
screen.corRank10 <- function(Y, X, family, ...) screen.corRank(Y, X, family, rank = 10, ...)
screen.corRank20 <- function(Y, X, family, ...) screen.corRank(Y, X, family, rank = 20, ...)
screen.corRank5 <- function(Y, X, family, ...) screen.corRank(Y, X, family, rank = 5, ...)
screen.corRank3 <- function(Y, X, family, ...) screen.corRank(Y, X, family, rank = 3, ...)
screen.corP3<- function(Y, X, family, ...) screen.corP(Y, X, family, minscreen = 3, ...)
#===================================================#===================================================
# FUNCTIONS TO ENCODE OUR DETERMINISTIC KNOWLEDGE
# See ltmle help file for more info: ?ltmle
# Also see the Analysis Plan
#===================================================#===================================================
# deterministicQ_YES
# if detQ.variable==1, then outcome==1 with probability 1
deterministicQ_YES<- function(data, current.node, nodes, called.from.estimate.g) {
L2.index <- which(names(data) == "detQ.variable")
stopifnot(length(L2.index) == 1)
L2.in.history <- L2.index < current.node
if (! L2.in.history) return(NULL)
is.deterministic <- data[,L2.index]==1
return(list(is.deterministic=is.deterministic, Q.value=1))
}
# deterministicQ_NO
# if detQ.variable==0, then outcome==0 with probability 1
deterministicQ_NO<- function(data, current.node, nodes, called.from.estimate.g) {
L2.index <- which(names(data) == "detQ.variable")
stopifnot(length(L2.index) == 1)
L2.in.history <- L2.index < current.node
if (! L2.in.history) return(NULL)
is.deterministic <- data[,L2.index]== 0
return(list(is.deterministic=is.deterministic, Q.value=0))
}
# deterministicQ_combo
# cannot be suppressed if dead, outmigrated or not on ART
# cannot have Z*=1 if combo= (D=1 OR M=1 OR eART=0)
deterministicQ_combo<- function(data, current.node, nodes, called.from.estimate.g) {
L2.index <- which(names(data) == "combo")
stopifnot(length(L2.index) == 1)
L2.in.history <- L2.index < current.node
if (! L2.in.history) return(NULL)
is.deterministic <- data[,L2.index]==1
return(list(is.deterministic=is.deterministic, Q.value=0))
}
| /Stage1_Functions.R | permissive | LauraBalzer/SEARCH_Analysis_Adults | R | false | false | 8,891 | r | # Stage1Functions_.R
# Helper functions for Stage 1: estimating the community-level outcomes & getting covariate data
#
# Laura B. Balzer, PhD MPhil
# lbalzer@umass.edu
# Lead Statistician for SEARCH
get.subgroup <- function(data, subgroup, time=0){
subgroup <- as.character(subgroup)
this.subgroup <- rep(F, nrow(data))
youth <- get.age.grp(data, time)
if(is.null(subgroup) ){
subgroup <- 'All'
}
if(subgroup=='All'){
# if no subgroups of interest
this.subgroup[1:nrow(data) ] <- T
} else if (subgroup=='EU'){
this.subgroup[ which(data$region_name=='Eastern Uganda') ] <- T
} else if (subgroup=='SWU'){
this.subgroup[ which(data$region_name=='Western Uganda') ] <- T
} else if (subgroup=='Kenya'){
this.subgroup[ which(data$region_name=='Kenya') ] <- T
# SEX
} else if(subgroup=='Male'){
this.subgroup[ which(data$sex_0) ] <- T
} else if(subgroup=='Female'){
this.subgroup[ which(!data$sex_0) ] <- T
# AGE
} else if(subgroup=='Young'){
this.subgroup[ youth ] <- T
} else if(subgroup=='Old'){
this.subgroup[ !youth] <- T
# MOBILITY AND VMC
} else if(subgroup=='NonMobile'){
this.subgroup[ which(data$moAway_0 < 1) ] <- T
} else if(subgroup=='UncircMen'){
this.subgroup[ which(data$non_circum_0) ] <- T
}
print(c(time, subgroup))
this.subgroup
}
get.age.grp<- function(data, time=0){
if(time==0){
youth <- data$age_0 < 25
} else if(time==1){
youth <- data$age_0 < 24
} else if(time==2){
youth <- data$age_0 < 23
} else{
youth <- data$age_0 < 22
}
youth
}
# get relevant covariates for predicting Delta & Censoring
get.X <- function(data, analysis='HIV', time=3, adj.full=T){
n <- nrow(data)
# age # reference age group <20
age.20.29 <- age.30.39 <- age.40.49 <- age.50.59 <- age.60.plus <- rep(0, n)
age.20.29[ which(data$age_0>19 & data$age_0<30) ] <- 1
age.30.39[ which(data$age_0>29 & data$age_0<40) ] <- 1
age.40.49[ which(data$age_0>39 & data$age_0<50) ] <- 1
age.50.59 [which(data$age_0>49 & data$age_0<60) ] <- 1
age.60.plus[which(data$age_0>59) ] <- 1
age.matrix <- data.frame(cbind(age.20.29, age.30.39, age.40.49, age.50.59, age.60.plus))
# reference is missing
single <- married <- widowed <- divorced.separated <- rep(0, n)
single[ which(data$marital_0==1)] <- 1
married[ which(data$marital_0==2) ] <-1
widowed[ which(data$marital_0 ==3)] <-1
divorced.separated[ which(data$marital_0==4 | data$marital_0==5)] <-1
marital <- data.frame(single, married, widowed, divorced.separated)
# education: reference is less than primary or missing
primary <- as.numeric(data$edu_primary_0)
secondary.plus <- as.numeric(data$edu_secondary_plus_0)
education <- data.frame(primary, secondary.plus)
# occupation: reference NA
formal.hi <- as.numeric(data$formal_hi_occup_0)
informal.hi <- as.numeric(data$informal_hi_occup_0)
informal.lo <- as.numeric(data$informal_low_occup_0)
jobless <- as.numeric(data$jobless_0)
student <- as.numeric(data$student_0)
fisherman <- as.numeric(data$fisherman_0)
occupation<- data.frame(formal.hi, informal.hi, informal.lo, jobless, student, fisherman)
# alcohol use: ref is NA
alcohol.yes <- alcohol.no <- rep(0, n)
alcohol.yes[which(data$alcohol_0) ] <- 1
alcohol.no[which(!data$alcohol_0) ] <- 1
# reference wealth is NA missing
wealth0 <- wealth1<- wealth2 <- wealth3 <- wealth4 <- rep(0, n)
wealth0[ which(data$wealth_0==0)] <- 1
wealth1[ which(data$wealth_0==1)] <- 1
wealth2[ which(data$wealth_0==2)] <- 1
wealth3[ which(data$wealth_0==3)] <- 1
wealth4[ which(data$wealth_0==4)] <- 1
wealth <- data.frame(cbind(wealth0, wealth1, wealth2, wealth3, wealth4))
#mobility indicators
mobile <- as.numeric(data$mobile_0)
# shifted main residence
shift.no <- shift.yes <- rep(0,n)
shift.no[which(!data$shifted_0)] <-1
shift.yes[which(data$shifted_0)] <-1
# nights home
nights <- as.numeric(as.character(data$nightsHome_0))
nights0 <- nights1.2 <- nights3.4 <- nights5 <- rep(0,n)
nights0[which(nights==0)] <-1
nights1.2[which(nights==1 | nights==2)] <-1
nights3.4[which(nights==3 | nights==4)] <-1
nights5[which(nights==5)] <- 1
mobility <- data.frame(mobile, shift.no, shift.yes, nights0, nights1.2, nights3.4, nights5)
# health-seeking
chc.BL <- as.numeric(data$chc_0)
self.hivtest.yes <- self.hivtest.no <- rep(0,n)
self.hivtest.yes[which(data$self_hivtest_0)]<-1
self.hivtest.no[which(!data$self_hivtest_0)] <-1
health<- data.frame(chc.BL, self.hivtest.yes, self.hivtest.no)
male <- rep(0,n)
male[which(data$sex_0)] <- 1
X <- cbind(
age.matrix, marital,
education, occupation,
alcohol.yes, alcohol.no, wealth, mobility, male)
if(analysis=='HIV'){
X<- cbind(X, health)
} else if(analysis=='NCD'){
# reference is underweight or NA
#set NA if <15 or >40
bmi <- data$bmi_0
bmi[which(bmi<15)] <- NA
bmi[which(bmi>40)] <- NA
bmi.norm <- bmi.over <- bmi.obese <- rep(0,n)
bmi.norm[ which(bmi >=18 & bmi <25) ] <-1
bmi.over[ which(bmi >=25 & bmi <30) ] <-1
bmi.obese[ which(bmi >= 30) ] <-1
X<- cbind(X, bmi.norm, bmi.over, bmi.obese )
X<- subset(X, select=- c( age.20.29,age.30.39, alcohol.no) )
if(time>0){
# adjust for baseline CHC attendance
X<- cbind(X, chc.BL)
}
} else if(analysis=='Cascade' & !adj.full){
X <- data.frame(cbind(mobile, male))
}
X
}
# get.var - function to get inference via the delta method
# assumes inputed estimators are asymptotically linear
# i.e. written in first order as an empircal mean of an influence curve (IC)
# input: point estimates (mu1, mu0), corresponding influence curves (IC1, IC0)
# significance level
# output: point estimate, var, wald-type CI
get.var.bayes <- function(mu1, mu0=NULL, IC1, IC0=NULL, alpha=0.05){
mu1<- unlist(mu1)
if(is.null(mu0)){
# if single TMLE
psi<- mu1
IC<- IC1
log= F
} else {
# if ratio of TMLEs (i.e. target = psi/psi0)
mu0<- unlist(mu0)
# get inference via the delta method on log scale
psi<- log(mu1/mu0)
IC <- 1/mu1*(IC1) - 1/mu0*IC0
log=T
}
# variance of asy lin est is var(IC)/n
var<- var(IC)/length(IC)
# testing and CI
cutoff <- qnorm(alpha/2, lower.tail=F)
se<- sqrt(var)
CI.lo <- psi - cutoff*se
CI.hi <- psi + cutoff*se
if(log){
est<- data.frame(pt=exp(psi), CI.lo=exp(CI.lo), CI.hi=exp(CI.hi) )
}else{
est<- data.frame(pt=psi, CI.lo=CI.lo, CI.hi=CI.hi)
}
list(est=est, IC=IC)
}
#===================================================#===================================================
# SCREENING ALGORITHMS FOR SUPERLEARNER
# See SuperLearner help file for more info: ?SuperLearner
#===================================================#===================================================
screen.corRank10 <- function(Y, X, family, ...) screen.corRank(Y, X, family, rank = 10, ...)
screen.corRank20 <- function(Y, X, family, ...) screen.corRank(Y, X, family, rank = 20, ...)
screen.corRank5 <- function(Y, X, family, ...) screen.corRank(Y, X, family, rank = 5, ...)
screen.corRank3 <- function(Y, X, family, ...) screen.corRank(Y, X, family, rank = 3, ...)
screen.corP3<- function(Y, X, family, ...) screen.corP(Y, X, family, minscreen = 3, ...)
#===================================================#===================================================
# FUNCTIONS TO ENCODE OUR DETERMINISTIC KNOWLEDGE
# See ltmle help file for more info: ?ltmle
# Also see the Analysis Plan
#===================================================#===================================================
# deterministicQ_YES
# if detQ.variable==1, then outcome==1 with probability 1
deterministicQ_YES<- function(data, current.node, nodes, called.from.estimate.g) {
L2.index <- which(names(data) == "detQ.variable")
stopifnot(length(L2.index) == 1)
L2.in.history <- L2.index < current.node
if (! L2.in.history) return(NULL)
is.deterministic <- data[,L2.index]==1
return(list(is.deterministic=is.deterministic, Q.value=1))
}
# deterministicQ_NO
# if detQ.variable==0, then outcome==0 with probability 1
deterministicQ_NO<- function(data, current.node, nodes, called.from.estimate.g) {
L2.index <- which(names(data) == "detQ.variable")
stopifnot(length(L2.index) == 1)
L2.in.history <- L2.index < current.node
if (! L2.in.history) return(NULL)
is.deterministic <- data[,L2.index]== 0
return(list(is.deterministic=is.deterministic, Q.value=0))
}
# deterministicQ_combo
# cannot be suppressed if dead, outmigrated or not on ART
# cannot have Z*=1 if combo= (D=1 OR M=1 OR eART=0)
deterministicQ_combo<- function(data, current.node, nodes, called.from.estimate.g) {
L2.index <- which(names(data) == "combo")
stopifnot(length(L2.index) == 1)
L2.in.history <- L2.index < current.node
if (! L2.in.history) return(NULL)
is.deterministic <- data[,L2.index]==1
return(list(is.deterministic=is.deterministic, Q.value=0))
}
|
#Full length 16S Analysis using DADA2
#EM Sogin
#Update March 2020
#Description: Analysis of pacbio data with dada2 package
#Set up working space
rm(list=ls())
#libraries
library(ggplot2)
library(ape)
library(dada2)
library(phyloseq)
library(ShortRead)
library(Biostrings)
#Others
path<-"/home/maggie/Documents/Projects/MS1_Seagrass/FullLenth_16S/Analysis/"
path_to_data<-file.path(path, 'Data','CCS_10Passes','fastq')
path.rds <- "Results/RDS/"
path.out<-'Results/'
files<-list.files(path_to_data, pattern="fastq", full.names = T)
GM3F<-'AGAGTTTGATCMTGGC'
GM4R<-"TACCTTGTTACGACTT"
rc <- dada2:::rc
theme_set(theme_bw())
##-----------------
##Process data with dada2 package
# 1. Remove primers
nops<-file.path(path_to_data, 'noprimers',basename(files))
for(i in seq_along(files)) {
fn <- files[[i]]; nop <- nops[[i]]
dada2:::removePrimers(fn, nop, primer.fwd=GM3F, primer.rev=dada2:::rc(GM4R), orient=TRUE, verbose=T)
}
# 2. Inspect sequence length distribution
lens.fn <- lapply(nops, function(fn) nchar(getSequences(fn)))
lens <- do.call(c, lens.fn)
hist(lens, 100)
summary(lens)
#3. Filter data to control for expected sequence lenghts (at least 1000 nt) and quality control.
#https://academic.oup.com/bioinformatics/article/31/21/3476/194979 for more info on EE filtering of data
filts <- file.path(path_to_data, "noprimers", "filtered", basename(files))
track <- filterAndTrim(nops, filts, minQ=3, minLen=1000, maxLen=1600, maxN=0, rm.phix=FALSE,maxEE=2)
track
# Plot the quality of the basepairs after filtering for each sample
#looks like the quality kicks out after 1600 bp, good place to top the sequence analysis
plotQualityProfile(filts)
plotQualityProfile(filts[1])
#4. Learn error rates
err <- learnErrors(filts, errorEstimationFunction=PacBioErrfun, BAND_SIZE=32, multithread=TRUE)
plotErrors(err)
saveRDS(err, file.path(path.rds, "errors.rds"))
readRDS(file.path(path.rds, "errors.rds"))
#5. Dereplicate fastq files and run dada2
drp <- derepFastq(filts)
dd <- dada(drp, err=err, multithread=TRUE, BAND_SIZE=32, pool=T)
saveRDS(dd, file = "Results/dada_ASV_full_data.rds")
dd<-readRDS( "Results/dada_ASV_full_data.rds")
dd.rare<-readRDS('Results/dada_ASV.rds')
#6. Make sequence table
st <- makeSequenceTable(dd); dim(st)
str(st)
rowSums(st)
#7. Assign taxonomy
tax <- assignTaxonomy(st, "~/tax/GTDB_bac-arc_ssu_r86.fa.gz", multithread=TRUE)
#8. Check for chimeras
bim2 <- isBimeraDenovo(st, minFoldParentOverAbundance=3.5, multithread=TRUE)
table(bim2)
sum(st[,bim2])/ sum(st)
saveRDS(st, 'Results/sequence_table.rds')
saveRDS(tax,'Results/sediment_tax_gtbtk.rds')
#9. Get count table
taxa<-as.data.frame(taxa)
taxa$ASV<-paste("ASV",seq(1:nrow(taxa)), sep="_")
asv_tab <- t(st)
names<-taxa[match(rownames(asv_tab), rownames(taxa)),'ASV']
row.names(asv_tab) <-names
head(asv_tab)
write.table(asv_tab, "Results/ASVs_counts.txt", sep="\t", quote=F, col.names=NA)
#10. Other tasks for getting data into working format
# incorperate count data and sample names into fasta headers
asv_df<-data.frame(asvs=rownames(asv_tab), asv_tab)
asv_long<-reshape2::melt(asv_df)
asvs<-unique(asv_long$asvs)
headers.fa<-data.frame()
for (i in 1:length(asvs)){
v1<-asvs[i]
subset<-asv_long[asv_long$asvs==asvs[i] & asv_long$value > 0,]
s<-paste(subset$variable,'_size=',subset$value,sep="")
v2<-paste(t(matrix(s)), collapse = ";")
headers.fa<-rbind(headers.fa, data.frame(v1, v2))
}
headers.fa$headers2<-paste('>',headers.fa$v1,';',headers.fa$v2, sep='')
#match sequences and asv ideas with new fasta headers
taxa$seqs<-rownames(taxa)
fasta.df<-headers.fa
fasta.df$seqs<-taxa[match(fasta.df$v1, taxa$ASV),'seqs']
#Make fasta file
asv_fasta<-c(rbind(fasta.df$headers2, fasta.df$seqs))
write(asv_fasta, "Results/ASVs.fa")
#11. Make phyloseq object
#OTU table
head(asv_tab)
otus<-otu_table(asv_tab, taxa_are_rows = T)
#sample table
samples<-data.frame(samples=colnames(asv_tab), location=rep(c('Out', 'Edge','In'), c(3,3,3)))
samps<-sample_data(samples)
rownames(samps)<-samples$samples
#Taxa Table
taxa.df<-data.frame(taxa[,colnames(taxa) %in% c("Kingdom" ,"Phylum", "Class", "Order", "Family", "Genus" )])
rownames(taxa.df)<-taxa$ASV
tax.mat<-as.matrix(taxa.df)
tx<-tax_table(tax.mat)
#merge
ps<-merge_phyloseq(samps, otus, tx)
save(list=c('ps','taxa'), file = 'Results/16s_phyloseq.RData')
#END | /scripts/metagenomic/pac_bio_16S.R | no_license | esogin/sweet_spots_in_the_sea | R | false | false | 4,340 | r | #Full length 16S Analysis using DADA2
#EM Sogin
#Update March 2020
#Description: Analysis of pacbio data with dada2 package
#Set up working space
rm(list=ls())
#libraries
library(ggplot2)
library(ape)
library(dada2)
library(phyloseq)
library(ShortRead)
library(Biostrings)
#Others
path<-"/home/maggie/Documents/Projects/MS1_Seagrass/FullLenth_16S/Analysis/"
path_to_data<-file.path(path, 'Data','CCS_10Passes','fastq')
path.rds <- "Results/RDS/"
path.out<-'Results/'
files<-list.files(path_to_data, pattern="fastq", full.names = T)
GM3F<-'AGAGTTTGATCMTGGC'
GM4R<-"TACCTTGTTACGACTT"
rc <- dada2:::rc
theme_set(theme_bw())
##-----------------
##Process data with dada2 package
# 1. Remove primers
nops<-file.path(path_to_data, 'noprimers',basename(files))
for(i in seq_along(files)) {
fn <- files[[i]]; nop <- nops[[i]]
dada2:::removePrimers(fn, nop, primer.fwd=GM3F, primer.rev=dada2:::rc(GM4R), orient=TRUE, verbose=T)
}
# 2. Inspect sequence length distribution
lens.fn <- lapply(nops, function(fn) nchar(getSequences(fn)))
lens <- do.call(c, lens.fn)
hist(lens, 100)
summary(lens)
#3. Filter data to control for expected sequence lenghts (at least 1000 nt) and quality control.
#https://academic.oup.com/bioinformatics/article/31/21/3476/194979 for more info on EE filtering of data
filts <- file.path(path_to_data, "noprimers", "filtered", basename(files))
track <- filterAndTrim(nops, filts, minQ=3, minLen=1000, maxLen=1600, maxN=0, rm.phix=FALSE,maxEE=2)
track
# Plot the quality of the basepairs after filtering for each sample
#looks like the quality kicks out after 1600 bp, good place to top the sequence analysis
plotQualityProfile(filts)
plotQualityProfile(filts[1])
#4. Learn error rates
err <- learnErrors(filts, errorEstimationFunction=PacBioErrfun, BAND_SIZE=32, multithread=TRUE)
plotErrors(err)
saveRDS(err, file.path(path.rds, "errors.rds"))
readRDS(file.path(path.rds, "errors.rds"))
#5. Dereplicate fastq files and run dada2
drp <- derepFastq(filts)
dd <- dada(drp, err=err, multithread=TRUE, BAND_SIZE=32, pool=T)
saveRDS(dd, file = "Results/dada_ASV_full_data.rds")
dd<-readRDS( "Results/dada_ASV_full_data.rds")
dd.rare<-readRDS('Results/dada_ASV.rds')
#6. Make sequence table
st <- makeSequenceTable(dd); dim(st)
str(st)
rowSums(st)
#7. Assign taxonomy
tax <- assignTaxonomy(st, "~/tax/GTDB_bac-arc_ssu_r86.fa.gz", multithread=TRUE)
#8. Check for chimeras
bim2 <- isBimeraDenovo(st, minFoldParentOverAbundance=3.5, multithread=TRUE)
table(bim2)
sum(st[,bim2])/ sum(st)
saveRDS(st, 'Results/sequence_table.rds')
saveRDS(tax,'Results/sediment_tax_gtbtk.rds')
#9. Get count table
taxa<-as.data.frame(taxa)
taxa$ASV<-paste("ASV",seq(1:nrow(taxa)), sep="_")
asv_tab <- t(st)
names<-taxa[match(rownames(asv_tab), rownames(taxa)),'ASV']
row.names(asv_tab) <-names
head(asv_tab)
write.table(asv_tab, "Results/ASVs_counts.txt", sep="\t", quote=F, col.names=NA)
#10. Other tasks for getting data into working format
# incorperate count data and sample names into fasta headers
asv_df<-data.frame(asvs=rownames(asv_tab), asv_tab)
asv_long<-reshape2::melt(asv_df)
asvs<-unique(asv_long$asvs)
headers.fa<-data.frame()
for (i in 1:length(asvs)){
v1<-asvs[i]
subset<-asv_long[asv_long$asvs==asvs[i] & asv_long$value > 0,]
s<-paste(subset$variable,'_size=',subset$value,sep="")
v2<-paste(t(matrix(s)), collapse = ";")
headers.fa<-rbind(headers.fa, data.frame(v1, v2))
}
headers.fa$headers2<-paste('>',headers.fa$v1,';',headers.fa$v2, sep='')
#match sequences and asv ideas with new fasta headers
taxa$seqs<-rownames(taxa)
fasta.df<-headers.fa
fasta.df$seqs<-taxa[match(fasta.df$v1, taxa$ASV),'seqs']
#Make fasta file
asv_fasta<-c(rbind(fasta.df$headers2, fasta.df$seqs))
write(asv_fasta, "Results/ASVs.fa")
#11. Make phyloseq object
#OTU table
head(asv_tab)
otus<-otu_table(asv_tab, taxa_are_rows = T)
#sample table
samples<-data.frame(samples=colnames(asv_tab), location=rep(c('Out', 'Edge','In'), c(3,3,3)))
samps<-sample_data(samples)
rownames(samps)<-samples$samples
#Taxa Table
taxa.df<-data.frame(taxa[,colnames(taxa) %in% c("Kingdom" ,"Phylum", "Class", "Order", "Family", "Genus" )])
rownames(taxa.df)<-taxa$ASV
tax.mat<-as.matrix(taxa.df)
tx<-tax_table(tax.mat)
#merge
ps<-merge_phyloseq(samps, otus, tx)
save(list=c('ps','taxa'), file = 'Results/16s_phyloseq.RData')
#END |
library(geozoning)
### Name: testInterSpeZ1
### Title: testInterSpeZ1
### Aliases: testInterSpeZ1
### Keywords: internal
### ** Examples
## No test:
data(mapTest)
qProb=c(0.2,0.5)
ZK = initialZoning(qProb, mapTest)
K=ZK$resZ
Z=K$zonePolygone
plotZ(Z)
Z58=rgeos::gConvexHull(rgeos::gUnion(Z[[8]],Z[[5]]))
Z[[length(Z)+1]]=Z58 # add new zone to zoning
plotZ(Z)
geozoning:::testInterSpe(Z,6,length(Z))
## End(No test)
| /data/genthat_extracted_code/geozoning/examples/testInterSpeZ1.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 423 | r | library(geozoning)
### Name: testInterSpeZ1
### Title: testInterSpeZ1
### Aliases: testInterSpeZ1
### Keywords: internal
### ** Examples
## No test:
data(mapTest)
qProb=c(0.2,0.5)
ZK = initialZoning(qProb, mapTest)
K=ZK$resZ
Z=K$zonePolygone
plotZ(Z)
Z58=rgeos::gConvexHull(rgeos::gUnion(Z[[8]],Z[[5]]))
Z[[length(Z)+1]]=Z58 # add new zone to zoning
plotZ(Z)
geozoning:::testInterSpe(Z,6,length(Z))
## End(No test)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatial.R
\name{UTM30}
\alias{UTM30}
\title{UTM30}
\usage{
UTM30(x)
}
\arguments{
\item{x}{}
}
\description{
UTM30
}
| /man/UTM30.Rd | no_license | davesteps/randomFuns | R | false | true | 196 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatial.R
\name{UTM30}
\alias{UTM30}
\title{UTM30}
\usage{
UTM30(x)
}
\arguments{
\item{x}{}
}
\description{
UTM30
}
|
library(magick)
library(hexSticker)
ua <- image_read("img/ua_white_noname.png")
#raven <- image_flop(raven) # flying away
#raven <- image_background(raven, "none")
print(sticker(ua,
package = "maranR",
#p_family = "Lato",
p_size = 6,
p_y = 1.5,
p_color = "#ffffff",
s_x = 1.02,
s_y = .9,
s_width = 1.5,
s_height = 1.5,
h_size = 1.5,
h_color = "#612E2B", #"#",
h_fill = "#000000"
))
| /maranr_gen.R | no_license | LizLeki/hexSticker_generation | R | false | false | 575 | r | library(magick)
library(hexSticker)
ua <- image_read("img/ua_white_noname.png")
#raven <- image_flop(raven) # flying away
#raven <- image_background(raven, "none")
print(sticker(ua,
package = "maranR",
#p_family = "Lato",
p_size = 6,
p_y = 1.5,
p_color = "#ffffff",
s_x = 1.02,
s_y = .9,
s_width = 1.5,
s_height = 1.5,
h_size = 1.5,
h_color = "#612E2B", #"#",
h_fill = "#000000"
))
|
# Loading data
file_path <- c('states/geo1_ar1970_2010/geo1_ar1970_2010.shp',
'states/geo1_bo1976_2001/geo1_bo1976_2001.shp',
'states/geo1_cl1982_2002/geo1_cl1982_2002.shp',
'states/geo1_co1964_2005/geo1_co1964_2005.shp',
'states/geo1_cr1963_2011/geo1_cr1963_2011.shp',
'states/geo1_cu2002_2002/geo1_cu2002_2002.shp',
'states/geo1_do1960_2010/geo1_do1960_2010.shp',
'states/geo1_ec1962_2010/geo1_ec1962_2010.shp',
'states/geo1_gt1964_2002/geo1_gt1964_2002.shp',
'states/geo1_hn1961_2001/geo1_hn1961_2001.shp',
'states/geo1_mx1960_2015/geo1_mx1960_2015.shp',
'states/geo1_ni1971_2005/geo1_ni1971_2005.shp',
'states/geo1_pa1960_2010/geo1_pa1960_2010.shp',
'states/geo1_pe1993_2007/geo1_pe1993_2007.shp',
'states/geo1_py1962_2002/geo1_py1962_2002.shp',
'states/geo1_sv1992_2007/geo1_sv1992_2007.shp',
'states/geo1_uy1963_2011/geo1_uy1963_2011.shp',
'states/geo1_ve1971_2001/geo1_ve1971_2001.shp')
path <- file_path[1]
loading_states <- function(path){
states <- readOGR(path, substr(path, 25, nchar(path)-4))
states <- spTransform(states, crs(south_america))
return(states)
}
states <- sapply(file_path, function(x) loading_states(x))
states <- bind(states)
## Aggregating data by state
## Measuring the consentration along the coastline
## Nightlights in coastal areas
coast_by_state <- function(x){
state <- states[rownames(states@data) == x,]
df <- over(state, south_america, returnList = T)[[1]]
coastal <- sum(df[df$coastal == 1,]$night_lights, na.rm = T)
total <- sum(df$night_lights, na.rm = T)
return(coastal/total)
}
states@data$coast_night_lights <- sapply(rownames(states@data),
function(x) coast_by_state(x))
## Nightlights in coastal areas
coast_by_state <- function(x){
state <- states[rownames(states@data) == x,]
df <- over(state, south_america, returnList = T)[[1]]
coastal <- sum(df[df$coastal == 1,]$pop, na.rm = T)
total <- sum(df$pop, na.rm = T)
return(coastal/total)
}
states@data$coast_pop <- sapply(rownames(states@data),
function(x) coast_by_state(x))
## Find change in market access for each state
access_by_state <- function(x, var){
state <- states[rownames(states@data) == x,]
return(mean(over(state, south_america, returnList = T)[[1]][[var]],
na.rm=T))
}
# Travel time
states@data$ma <- sapply(rownames(states@data),
function(x) access_by_state(x, 'log_d'))
# Mineral deposits
states@data$mine <- sapply(rownames(states@data),
function(x) access_by_state(x, 'mine'))
# Cotton suitability
states@data$cotton <- sapply(rownames(states@data),
function(x) access_by_state(x, 'cotton'))
# Cotton suitability
states@data$coffee <- sapply(rownames(states@data),
function(x) access_by_state(x, 'coffee'))
# Banana suitability
states@data$banana <- sapply(rownames(states@data),
function(x) access_by_state(x, 'banana'))
# Terrain ruggedness
states@data$tri <- sapply(rownames(states@data),
function(x) access_by_state(x, 'tri'))
# Slope
states@data$slope <- sapply(rownames(states@data),
function(x) access_by_state(x, 'slope'))
# Temperature
states@data$bio1 <- sapply(rownames(states@data),
function(x) access_by_state(x, 'bio1'))
# Precipitation
states@data$bio12 <- sapply(rownames(states@data),
function(x) access_by_state(x, 'bio12'))
# Share of cells in a coastal area
states@data$coastal <- sapply(rownames(states@data),
function(x) access_by_state(x, 'coastal'))
## Mean distance to historical port
states@data$dist_port_1777 <- sapply(rownames(states@data),
function(x) access_by_state(x, 'dist_port_1777'))
## Mean population
states@data$pop <- sapply(rownames(states@data),
function(x) access_by_state(x, 'pop'))
## Mean elevation
states@data$elev <- sapply(rownames(states@data),
function(x) access_by_state(x, 'elev'))
## Mean distance to coastline
states@data$coast_ds <- sapply(rownames(states@data),
function(x) access_by_state(x, 'coast_ds'))
## Finding the audiencia and viceroyalty
access_by_state <- function(x, var){
state <- states[rownames(states@data) == x,]
return(over(state, south_america)[[var]])
}
states@data$audiencia <- sapply(rownames(states@data),
function(x) access_by_state(x, 'audiencia'))
states@data$viceroyalty <- sapply(rownames(states@data),
function(x) access_by_state(x, 'viceroyalty'))
states_level <- states@data %>%
mutate(ma1 = ifelse(ma>1.032, 1, 0)) %>%
dplyr::rename(country = CNTRY_NAME,
state = ADMIN_NAME) %>%
filter(!is.na(ma))
## Merging with data from Maloney and Caicedo
# id <- read_csv('states/data.csv') %>%
# dplyr::full_join(data, by = c('country', 'state')) %>%
# mutate(id = as.character(id)) %>%
# dplyr::full_join(df, by = c('country', 'state')) %>%
# filter(!is.na(id))
#
# rownames(id) <- id$id
#
# states1 <- states[rownames(states@data) %in% id$id, ]
# states1 <- SpatialPolygonsDataFrame(states1, id, match.ID = TRUE)
# m1 <- felm(data = df,
# coast_night_lights~ma1 + coastal +dist_port_1777|
# country|
# 0|
# state)
| /scripts/data_prep_state_level.R | no_license | sebastianellingsen/ports_ml | R | false | false | 5,865 | r |
# Loading data
file_path <- c('states/geo1_ar1970_2010/geo1_ar1970_2010.shp',
'states/geo1_bo1976_2001/geo1_bo1976_2001.shp',
'states/geo1_cl1982_2002/geo1_cl1982_2002.shp',
'states/geo1_co1964_2005/geo1_co1964_2005.shp',
'states/geo1_cr1963_2011/geo1_cr1963_2011.shp',
'states/geo1_cu2002_2002/geo1_cu2002_2002.shp',
'states/geo1_do1960_2010/geo1_do1960_2010.shp',
'states/geo1_ec1962_2010/geo1_ec1962_2010.shp',
'states/geo1_gt1964_2002/geo1_gt1964_2002.shp',
'states/geo1_hn1961_2001/geo1_hn1961_2001.shp',
'states/geo1_mx1960_2015/geo1_mx1960_2015.shp',
'states/geo1_ni1971_2005/geo1_ni1971_2005.shp',
'states/geo1_pa1960_2010/geo1_pa1960_2010.shp',
'states/geo1_pe1993_2007/geo1_pe1993_2007.shp',
'states/geo1_py1962_2002/geo1_py1962_2002.shp',
'states/geo1_sv1992_2007/geo1_sv1992_2007.shp',
'states/geo1_uy1963_2011/geo1_uy1963_2011.shp',
'states/geo1_ve1971_2001/geo1_ve1971_2001.shp')
path <- file_path[1]
loading_states <- function(path){
states <- readOGR(path, substr(path, 25, nchar(path)-4))
states <- spTransform(states, crs(south_america))
return(states)
}
states <- sapply(file_path, function(x) loading_states(x))
states <- bind(states)
## Aggregating data by state
## Measuring the consentration along the coastline
## Nightlights in coastal areas
coast_by_state <- function(x){
state <- states[rownames(states@data) == x,]
df <- over(state, south_america, returnList = T)[[1]]
coastal <- sum(df[df$coastal == 1,]$night_lights, na.rm = T)
total <- sum(df$night_lights, na.rm = T)
return(coastal/total)
}
states@data$coast_night_lights <- sapply(rownames(states@data),
function(x) coast_by_state(x))
## Nightlights in coastal areas
coast_by_state <- function(x){
state <- states[rownames(states@data) == x,]
df <- over(state, south_america, returnList = T)[[1]]
coastal <- sum(df[df$coastal == 1,]$pop, na.rm = T)
total <- sum(df$pop, na.rm = T)
return(coastal/total)
}
states@data$coast_pop <- sapply(rownames(states@data),
function(x) coast_by_state(x))
## Find change in market access for each state
access_by_state <- function(x, var){
state <- states[rownames(states@data) == x,]
return(mean(over(state, south_america, returnList = T)[[1]][[var]],
na.rm=T))
}
# Travel time
states@data$ma <- sapply(rownames(states@data),
function(x) access_by_state(x, 'log_d'))
# Mineral deposits
states@data$mine <- sapply(rownames(states@data),
function(x) access_by_state(x, 'mine'))
# Cotton suitability
states@data$cotton <- sapply(rownames(states@data),
function(x) access_by_state(x, 'cotton'))
# Cotton suitability
states@data$coffee <- sapply(rownames(states@data),
function(x) access_by_state(x, 'coffee'))
# Banana suitability
states@data$banana <- sapply(rownames(states@data),
function(x) access_by_state(x, 'banana'))
# Terrain ruggedness
states@data$tri <- sapply(rownames(states@data),
function(x) access_by_state(x, 'tri'))
# Slope
states@data$slope <- sapply(rownames(states@data),
function(x) access_by_state(x, 'slope'))
# Temperature
states@data$bio1 <- sapply(rownames(states@data),
function(x) access_by_state(x, 'bio1'))
# Precipitation
states@data$bio12 <- sapply(rownames(states@data),
function(x) access_by_state(x, 'bio12'))
# Share of cells in a coastal area
states@data$coastal <- sapply(rownames(states@data),
function(x) access_by_state(x, 'coastal'))
## Mean distance to historical port
states@data$dist_port_1777 <- sapply(rownames(states@data),
function(x) access_by_state(x, 'dist_port_1777'))
## Mean population
states@data$pop <- sapply(rownames(states@data),
function(x) access_by_state(x, 'pop'))
## Mean elevation
states@data$elev <- sapply(rownames(states@data),
function(x) access_by_state(x, 'elev'))
## Mean distance to coastline
states@data$coast_ds <- sapply(rownames(states@data),
function(x) access_by_state(x, 'coast_ds'))
## Finding the audiencia and viceroyalty
access_by_state <- function(x, var){
state <- states[rownames(states@data) == x,]
return(over(state, south_america)[[var]])
}
states@data$audiencia <- sapply(rownames(states@data),
function(x) access_by_state(x, 'audiencia'))
states@data$viceroyalty <- sapply(rownames(states@data),
function(x) access_by_state(x, 'viceroyalty'))
states_level <- states@data %>%
mutate(ma1 = ifelse(ma>1.032, 1, 0)) %>%
dplyr::rename(country = CNTRY_NAME,
state = ADMIN_NAME) %>%
filter(!is.na(ma))
## Merging with data from Maloney and Caicedo
# id <- read_csv('states/data.csv') %>%
# dplyr::full_join(data, by = c('country', 'state')) %>%
# mutate(id = as.character(id)) %>%
# dplyr::full_join(df, by = c('country', 'state')) %>%
# filter(!is.na(id))
#
# rownames(id) <- id$id
#
# states1 <- states[rownames(states@data) %in% id$id, ]
# states1 <- SpatialPolygonsDataFrame(states1, id, match.ID = TRUE)
# m1 <- felm(data = df,
# coast_night_lights~ma1 + coastal +dist_port_1777|
# country|
# 0|
# state)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Lock5withR-package.R
\docType{data}
\name{RandomP50N200}
\alias{RandomP50N200}
\title{Simulated proportions}
\format{A data frame with 5000 observations on the following 2 variables.
\itemize{
\item{\code{Count}} {Number of simulated "yes" responses in 200
trials}
\item{\code{Phat}} {Sample proportion (Count/200)} }}
\source{
Computer simulation
}
\description{
Counts and proportions for 5000 simulated samples with n=200 and p=0.50
}
\details{
Results from 5000 simulations of samples of size n=200 from a population
with proportoin of "yes" responses at p=0.50.
}
\examples{
data(RandomP50N200)
}
\keyword{datasets}
| /man/RandomP50N200.Rd | no_license | klaassenj/Lock5withR | R | false | true | 710 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Lock5withR-package.R
\docType{data}
\name{RandomP50N200}
\alias{RandomP50N200}
\title{Simulated proportions}
\format{A data frame with 5000 observations on the following 2 variables.
\itemize{
\item{\code{Count}} {Number of simulated "yes" responses in 200
trials}
\item{\code{Phat}} {Sample proportion (Count/200)} }}
\source{
Computer simulation
}
\description{
Counts and proportions for 5000 simulated samples with n=200 and p=0.50
}
\details{
Results from 5000 simulations of samples of size n=200 from a population
with proportoin of "yes" responses at p=0.50.
}
\examples{
data(RandomP50N200)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{human_equilibrium_no_het}
\alias{human_equilibrium_no_het}
\title{Equilibrium solution without biting heterogeneity}
\usage{
human_equilibrium_no_het(EIR, ft, p, age)
}
\arguments{
\item{EIR}{EIR for adults, in units of infectious bites per person per year}
\item{ft}{proportion of clinical cases effectively treated}
\item{p}{vector of model parameters}
\item{age}{vector of age groups, in units of years}
}
\description{
Returns the equilibrium states for the model of Griffin et al.
(2014). A derivation of the equilibrium solutions can be found in Griffin
(2016).
This function does not account for biting heterogeneity - see
\code{human_equilibrium()} for function that takes this into account.
}
\references{
Griffin et. al. (2014). Estimates of the changing age-burden of
Plasmodium falciparum malaria disease in sub-Saharan Africa.
doi:10.1038/ncomms4136
Griffin (2016). Is a reproduction number of one a threshold for Plasmodium
falciparum malaria elimination? doi:10.1186/s12936-016-1437-9 (see
supplementary material)
}
| /man/human_equilibrium_no_het.Rd | permissive | mrc-ide/malariaEquilibrium | R | false | true | 1,145 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{human_equilibrium_no_het}
\alias{human_equilibrium_no_het}
\title{Equilibrium solution without biting heterogeneity}
\usage{
human_equilibrium_no_het(EIR, ft, p, age)
}
\arguments{
\item{EIR}{EIR for adults, in units of infectious bites per person per year}
\item{ft}{proportion of clinical cases effectively treated}
\item{p}{vector of model parameters}
\item{age}{vector of age groups, in units of years}
}
\description{
Returns the equilibrium states for the model of Griffin et al.
(2014). A derivation of the equilibrium solutions can be found in Griffin
(2016).
This function does not account for biting heterogeneity - see
\code{human_equilibrium()} for function that takes this into account.
}
\references{
Griffin et. al. (2014). Estimates of the changing age-burden of
Plasmodium falciparum malaria disease in sub-Saharan Africa.
doi:10.1038/ncomms4136
Griffin (2016). Is a reproduction number of one a threshold for Plasmodium
falciparum malaria elimination? doi:10.1186/s12936-016-1437-9 (see
supplementary material)
}
|
## These are some of the core functions used in the analyses
## Some initial setup
library(RColorBrewer)
palette(brewer.pal(8,"Dark2"))
library("rootSolve")
library("deSolve")
library(xtable)
library(fields)
############################
## The model ###
###########################
##' Single age class model for adult TB
##' This model has an explicit Tx compartment and a presymptomatic compartment
##' @param t
##' @param y
##' @param parms
##' @return
##' @author Andrew Azman
dxdt.TBHIV3 <- function(t,y,parms){
with(as.list(c(parms,y)),{
ac <- 1
hivc <- 4
tbc <- 9
inds <- seq(1,tbc*hivc*ac+1,by=hivc*ac) #indices for state arrays below
S <- array(y[1:(inds[2]-1)],dim=c(ac,4))
Lf <- array(y[inds[2]:(inds[3]-1)],dim=c(ac,4))
Ls <- array(y[inds[3]:(inds[4]-1)],dim=c(ac,4))
Ps <- array(y[inds[4]:(inds[5]-1)],dim=c(ac,4))
Asp <- array(y[inds[5]:(inds[6]-1)],dim=c(ac,4))
Asn <- array(y[inds[6]:(inds[7]-1)],dim=c(ac,4))
Aep <- array(y[inds[7]:(inds[8]-1)],dim=c(ac,4))
Tx <- array(y[inds[8]:(inds[9]-1)],dim=c(ac,4))
Rtx <- array(y[inds[9]:(inds[10]-1)],dim=c(ac,4))
N <- sum(S + Lf + Ls + Ps + Asp + Asn + Aep + Tx + Rtx)
## may want to add a real hiv force of infection here later
foi <- as.numeric(Asp %*% c(beta.sp/N*rep(1,4)) +
Asn %*% c((beta.sp/N)*phi.sn) +
Ps %*% c((beta.sp/N)*phi.ps))
theta.sp.c <- theta.sp + theta.spI
theta.sn.c <- theta.sn + theta.snI
theta.ep.c <- theta.ep + theta.epI
dS <- dLf <- dLs <- dPs <- dAsp <- dAsn <- dAep <- dTx <- dRtx <- array(0,dim=c(ac,hivc))
##hiv uninfected susceptibles
dS <- S*(nu - foi - foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
c(0,(S*foi.hiv)[-hivc]) +
c(0,(S*chi.elg)[-hivc]) +
c(0,(S*chi.tx)[-hivc])
## keeping population size constant
dS[1,1] <- dS[1,1] +
Asp %*% mu.sp + Asn %*% mu.sn + Aep %*% mu.ep + ## TB Deaths
(S + Lf + Ls + Ps + Asp + Asn + Aep + Tx + Rtx) %*% delta + ## Old Age
(S + Lf + Ls + Ps + Asp + Asn + Aep + Tx + Rtx) %*% mu.hiv ## HIV Deaths
## Latent fast
dLf <-Lf*(nu - gamma.lf.ls - rho.lf -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
foi*(Ls*phi.l + Rtx*phi.l + S) +
c(0,(Lf*foi.hiv)[-hivc]) +
c(0,(Lf*chi.elg)[-hivc]) +
c(0,(Lf*chi.tx)[-hivc])
## Latent slow (remote infection)
dLs <- Ls*(nu - foi*phi.l - rho.ls -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
Lf * gamma.lf.ls +
Rtx*gamma.rtx.ls +
c(0,(Ls*foi.hiv)[-hivc]) +
c(0,(Ls*chi.elg)[-hivc]) +
c(0,(Ls*chi.tx)[-hivc])
## Pre-symptomatic period
dPs <- Ps*(nu - rho.ps - zeta.sn -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
Lf*rho.lf + Ls*rho.ls +
c(0,(Ps*foi.hiv)[-hivc]) +
c(0,(Ps*chi.elg)[-hivc]) +
c(0,(Ps*chi.tx)[-hivc])
## Smear Positive
dAsp <- Asp*(nu - mu.sp - theta.sp.c - zeta.sp -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
(Ps*rho.ps + Rtx*rho.rel)*pi.sp*(1-pi.ep) +
c(0,(Asp*foi.hiv)[-hivc]) +
c(0,(Asp*chi.elg)[-hivc]) +
c(0,(Asp*chi.tx)[-hivc])
dAsn <- Asn*(nu - mu.sn - theta.sn.c - zeta.sn -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
(Ps*rho.ps + Rtx*rho.rel)*(1-pi.sp)*(1-pi.ep) +
c(0,(Asn*foi.hiv)[-hivc]) +
c(0,(Asn*chi.elg)[-hivc]) +
c(0,(Asn*chi.tx)[-hivc])
dAep <- Aep*(nu - mu.ep - theta.ep.c - zeta.ep -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
(Ps*rho.ps + Rtx*rho.rel)*pi.ep+
c(0,(Aep*foi.hiv)[-hivc]) +
c(0,(Aep*chi.elg)[-hivc]) +
c(0,(Aep*chi.tx)[-hivc])
dTx <- Tx*(nu - gamma.tx.rtx -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
Asp*theta.sp.c +
Asn*theta.sn.c +
Aep*theta.ep.c +
c(0,(Tx*foi.hiv)[-hivc]) +
c(0,(Tx*chi.elg)[-hivc]) +
c(0,(Tx*chi.tx)[-hivc])
dRtx <- Rtx*(nu - gamma.rtx.ls - rho.rel - foi*phi.l -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
Asp*zeta.sp + (Asn + Ps)*zeta.sn + Aep*zeta.ep +
Tx*(gamma.tx.rtx) +
c(0,(Rtx*foi.hiv)[-hivc]) +
c(0,(Rtx*chi.elg)[-hivc]) +
c(0,(Rtx*chi.tx)[-hivc])
list(c(dS,dLf,dLs,dPs,dAsp,dAsn,dAep,dTx,dRtx))
})}
##' take dxdt.TBHIV3 odes and appends some summary statistics to each time step
##' @param t
##' @param state
##' @param params
##' @return vector of state changes
##' @author Andrew Azman
dxdt.TBHIV.CI <- function(t,state,params){
## a little pre-processing
ac <- 1 ## number of age classes
hivc <- 4
tbc <- 9
inds <- seq(1,tbc*hivc*ac+1,by=hivc*ac) #indices for state arrays
S <- array(state[1:(inds[2]-1)],dim=c(ac,4))
Lf <- array(state[inds[2]:(inds[3]-1)],dim=c(ac,4))
Ls <- array(state[inds[3]:(inds[4]-1)],dim=c(ac,4))
Ps <- array(state[inds[4]:(inds[5]-1)],dim=c(ac,4))
Asp <- array(state[inds[5]:(inds[6]-1)],dim=c(ac,4))
Asn <- array(state[inds[6]:(inds[7]-1)],dim=c(ac,4))
Aep <- array(state[inds[7]:(inds[8]-1)],dim=c(ac,4))
Tx <- array(state[inds[8]:(inds[9]-1)],dim=c(ac,4))
Rtx <- array(state[inds[9]:(inds[10]-1)],dim=c(ac,4))
with(as.list(c(state,params)),{
## rho.lf <- array(rho.lf,dim=c(ac,length(rho.lf)/2))
## rho.ls <- array(rho.ls,dim=c(ac,length(rho.ls)/2))
## rho.rel <- array(rho.rel,dim=c(ac,length(rho.rel)/2))
dCI <- c((Lf * rho.lf) + (Ls * rho.ls) + (Rtx * rho.rel)) #1x4 number of new cases of each type
#dCI <- c((Ps * rho.ps) + (Rtx * rho.rel)) #1x4 number of new cases of each type
dCIall <- sum(dCI) #1x1 sum of all incident tb types
## tb deaths in each age class and hiv status 1x8
dMtb <- c((Asp * mu.sp) + (Asn * mu.sn) + (Aep * mu.sn)) # number of new TB deaths
## cases detected of each type (this is what we will fit to)
dN.Asp <- c(Asp * (theta.sp + theta.spI)) #1x4
dN.Asn <- c(Asn * (theta.sn + theta.snI)) #1x4
dN.Aep <- c(Aep * (theta.ep + theta.epI)) #1x4
dReTx <- c(Rtx * rho.rel) #1x4
c(dCI,dCIall,dMtb,dN.Asp,dN.Asn,dN.Aep,dReTx)
}) -> dInds
##run TB model
TBout <- dxdt.TBHIV3(t,state,params)
##Return the results
rc <- list(c(TBout[[1]],dInds))
return(rc)
}
##' take dxdt.TBHIV3 odes and appends some summary statistics to each time step
##' and allows beta to vary by a fixed amount per year
##' @param t
##' @param state
##' @param params
##' @return vector of state changes
##' @author Andrew Azman
dxdt.TBHIV.CI.var.beta <- function(t,state,params){
## a little pre-processing
ac <- 1 ## number of age classes
hivc <- 4
tbc <- 9
inds <- seq(1,tbc*hivc*ac+1,by=hivc*ac) #indices for state arrays
S <- array(state[1:(inds[2]-1)],dim=c(ac,4))
Lf <- array(state[inds[2]:(inds[3]-1)],dim=c(ac,4))
Ls <- array(state[inds[3]:(inds[4]-1)],dim=c(ac,4))
Ps <- array(state[inds[4]:(inds[5]-1)],dim=c(ac,4))
Asp <- array(state[inds[5]:(inds[6]-1)],dim=c(ac,4))
Asn <- array(state[inds[6]:(inds[7]-1)],dim=c(ac,4))
Aep <- array(state[inds[7]:(inds[8]-1)],dim=c(ac,4))
Tx <- array(state[inds[8]:(inds[9]-1)],dim=c(ac,4))
Rtx <- array(state[inds[9]:(inds[10]-1)],dim=c(ac,4))
params$beta.sp <- params$beta.sp*exp(params$beta.delta*t)
#cat(sprintf("beta.sp = %.2f, and beta.delta = %.3f \n",params$beta.sp[1],params$beta.delta[1]))
with(as.list(c(state,params)),{
## rho.lf <- array(rho.lf,dim=c(ac,length(rho.lf)/2))
## rho.ls <- array(rho.ls,dim=c(ac,length(rho.ls)/2))
## rho.rel <- array(rho.rel,dim=c(ac,length(rho.rel)/2))
dCI <- c((Lf * rho.lf) + (Ls * rho.ls) + (Rtx * rho.rel)) #1x4
#dCI <- c((Ps * rho.ps) + (Rtx * rho.rel)) #1x4
dCIall <- sum(dCI) #1x1
## tb deaths in each age class and hiv status 1x8
dMtb <- c((Asp * mu.sp) + (Asn * mu.sn) + (Aep * mu.sn))
## cases detected of each type in formal sector (this is what we will fit to)
dN.Asp <- c(Asp * (theta.sp + theta.spI)) #1x4
dN.Asn <- c(Asn * (theta.sn + theta.snI)) #1x4
dN.Aep <- c(Aep * (theta.ep + theta.epI)) #1x4
dReTx <- c(Rtx * rho.rel) #1x4
c(dCI,dCIall,dMtb,dN.Asp,dN.Asn,dN.Aep,dReTx)
}) -> dInds
##run TB model
TBout <- dxdt.TBHIV3(t,state,params)
##Return the results
rc <- list(c(TBout[[1]],dInds))
return(rc)
}
######################
## Helper functions ##
######################
##' Adds column names to output from ode
##' @param mod
##' @param time
##' @param ext
##' @param ac
##' @return
##' @author Andrew Azman
addColNames <- function(mod,time=T,ext=F,ac=1){
ts <- c()
if (time) ts <- "time"
tmp <- c(ts,paste0("S",1:ac),
paste0("hS",1:ac),
paste0("aS",1:ac),
paste0("nS",1:ac),
paste0("Lf",1:ac),
paste0("hLf",1:ac),
paste0("aLf",1:ac),
paste0("nLf",1:ac),
paste0("Ls",1:ac),
paste0("hLs",1:ac),
paste0("aLs",1:ac),
paste0("nLs",1:ac),
paste0("Ps",1:ac),
paste0("hPs",1:ac),
paste0("aPs",1:ac),
paste0("nPs",1:ac),
paste0("Asp",1:ac),
paste0("hAsp",1:ac),
paste0("aAsp",1:ac),
paste0("nAsp",1:ac),
paste0("Asn",1:ac),
paste0("hAsn",1:ac),
paste0("aAsn",1:ac),
paste0("nAsn",1:ac),
paste0("Aep",1:ac),
paste0("hAep",1:ac),
paste0("aAep",1:ac),
paste0("nAep",1:ac),
paste0("Tx",1:ac),
paste0("hTx",1:ac),
paste0("aTx",1:ac),
paste0("nTx",1:ac),
paste0("Rtx",1:ac),
paste0("hRtx",1:ac),
paste0("aRtx",1:ac),
paste0("nRtx",1:ac))
if (ext) {
tmp <- c(tmp,paste0("CI",1:ac),paste0("hCI",1:ac),paste0("aCI",1:ac),
paste0("nCI",1:ac),"CIall",paste0("Mtb",1:ac),
paste0("hMtb",1:ac),paste0("aMtb",1:ac),paste0("nMtb",1:ac),
paste0("N.Asp",1:ac),paste0("hN.Asp",1:ac),paste0("aN.Asp",1:ac),
paste0("nN.Asp",1:ac),paste0("N.Asn",1:ac),paste0("hN.Asn",1:ac),
paste0("aN.Asn",1:ac),paste0("nN.Asn",1:ac),
paste0("N.Aep",1:ac),paste0("hN.Aep",1:ac),paste0("aN.Aep",1:ac),paste0("nN.Aep",1:ac),
paste0("ReTx",1:ac),paste0("hReTx",1:ac),paste0("aReTx",1:ac),paste0("nReTx",1:ac))
}
if (!is.null(nrow(mod))){
colnames(mod) <- tmp
} else {
names(mod) <- tmp
}
return(mod)
}
##' takes parameters from csv file
##' @param country name of country whose parameters we want (assumes in common form)
##' @param cols column numbers for data
##' @return list with each entry being a vector for that parameter
##' @author Andrew Azman
make.params <- function(country,cols=2:5){
filename <- sprintf("Data/%s_params.csv",country)
tmp <- read.csv(filename)
params.block <- tmp[cols]
rownames(params.block) <- tmp[,1]
params.list <- do.call("list",as.data.frame(t(params.block)))
return(params.list)
}
## runs TBHIV.CI model
##' @param params
##' @param initial.state
##' @param max.time
##' @param var.beta
##' @return output of lsoda or other ode solver
runTBHIVMod <- function(params,
initial.state,
max.time=1,
var.beta = FALSE
){
library(deSolve)
times <- seq(0,max.time,by=0.1)
##print(params)
if (var.beta){
mod.out <- ode(initial.state,times,dxdt.TBHIV.CI.var.beta,params)
} else {
mod.out <- ode(initial.state,times,dxdt.TBHIV.CI,params)
}
return(mod.out)
}
##' takes a matrix with column names of model
##' and outputs just the columns needed for prevalence
##' @param run.mat
##' @param hiv.only
##' @return matrix of only columns of prev cases
##' @author Andrew Azman
getPrevCols <- function(run.mat,hiv.only=F){
## in case it is a vector
if (is.null(nrow(run.mat))) run.mat <- t(as.matrix(run.mat))
if (hiv.only){
run.mat[,grep("(n|a|h)(A(sp|sn|ep)|Tx|Ps)1$",colnames(run.mat))]
} else {
run.mat[,grep("(n|a|h|^)(A(sp|sn|ep)|Tx|Ps)1$",colnames(run.mat))]
}}
##' Objective function for fitting Incidence and CDR
##' @param params.fit
##' @param params
##' @param state
##' @param target.ci
##' @param target.cdr
##' @param target.prev.tb
##' @param plot.it
##' @param beta.or.theta - if we want to only fit one param ("beta" if we only want to fit beta, "theta" if we want to fit theta only)
##' @param weight.ci
##' @param weight.other
##' @return
##' @author Andrew Azman
incObFunc <- function(params.fit,
params,
state,
target.ci,
target.cdr,
target.prev.tb,
plot.it=FALSE,
beta.or.theta="",
weight.ci = 3,
weight.other=1
){
if (length(params.fit) == 1 && !missing(beta.or.theta)){
if (beta.or.theta == "beta") params$beta.sp <- rep(params.fit,4)
else if (beta.or.theta == "theta") params$theta.sp <- rep(params.fit,4)
else stop("beta.or.theta is mispecficified")
} else {
params$beta.sp <- rep(params.fit[1],4)
params$theta.sp <- rep(params.fit[2],4)
}
## cat(sprintf("fit.pars (post optim) = %f, %f \n",exp(params.fit)[1],exp(params.fit)[2]))
## assuming that the case detection rate of ep is same as sp
## and that sn is 0.75* sp
ep.sn.mult <- 1
params$theta.ep <- params$theta.sp*ep.sn.mult
params$theta.sn <- params$theta.sp*ep.sn.mult
tryCatch(
RS <- runsteady(y=state[1:36],
fun=dxdt.TBHIV3,
parms=params,
verbose=F)
,
error = function(e){
ss.vals <- state
cat(sprintf(e$message))
}
)
if (attr(RS,"steady")){
ss.vals <- c(RS$y,state[37:length(state)])
} else {
print("Couldn't reach steady state but proceeding to next set of paramters in optimization")
ss.vals <- state
}
run <- runTBHIVMod(params,initial.state=ss.vals,max.time=1,var.beta=FALSE)
run <- addColNames(run,ext=T,time=T)
ci <- run[11,"CIall"] - run[1,"CIall"]
if (!missing(target.prev.tb)){
## calc prevalance stats
prev <- sum(getPrevCols(run)[11,])
if (!missing(beta.or.theta) && beta.or.theta == "theta"){
obj <- ((prev/target.prev.tb) - 1)^2
obj.no.trans <- 1 # value if there is no tranmission
} else if (!missing(beta.or.theta) && beta.or.theta == "beta"){
obj <- ((ci/target.ci) - 1)^2
obj.no.trans <- 1
} else {
obj <- weight.ci*((ci/target.ci) - 1)^2 + weight.other*((prev/target.prev.tb) - 1)^2
obj.no.trans <- 2
}
print(c(ci,target.ci=target.ci,prev=prev,target.prev=target.prev.tb))
} else {
cd <- (run[11,grep("N.Asp",colnames(run))] +
run[11,grep("N.Asn",colnames(run))] +
run[11,grep("N.Aep",colnames(run))]) -
(run[1,grep("N.Asp",colnames(run))] +
run[1,grep("N.Asn",colnames(run))] +
run[1,grep("N.Aep",colnames(run))])
## but we really want to fit to cases detected which is not implicitly a function of ci
cd.num <- sum(cd)
cdr <- (sum(cd)/ci)*100
cd.num.target <- target.cdr*target.ci
print(c(ci,target.ci=target.ci,cdr=cdr,target.cdr=100*target.cdr))
if (!missing(beta.or.theta) && beta.or.theta == "theta"){
obj <- (cdr - target.cdr*100)^2
obj.no.trans <- 1000000 # value if there is no tranmission
} else if (!missing(beta.or.theta) && beta.or.theta == "beta"){
print("beta")
obj <- (ci - target.ci)^2
obj.no.trans <- 1000000
} else {
obj <- weight.ci*((ci/target.ci) - 1)^2 + weight.other*((cd.num/cd.num.target) - 1)^2
obj.no.trans <- 2
}
}
print(c(params$beta.sp[1],params$theta.sp[1]))
if (is.nan(obj) || obj == obj.no.trans) obj <- Inf #when we get no tranmission the ob func = 2
cat(sprintf("objective func = %f \n",obj))
if (plot.it){
points(params$theta.sp[1],obj,col=2)
}
return(obj) # may think about scaling the objective function
}
##' For fitting incidence and % cases detected to thetea and beta
##' @param initial.state
##' @param params
##' @param target.ci
##' @param target.cdr
##' @return
##' @author Andrew Azman
fitIncCDR <- function(initial.state,
params,
target.ci,
target.cdr,
epsilon.cdr.inc.target=0.1
){
require("rootSolve")
## set all theta's to theta sp
fit.pars <- c(params$beta.sp[1],params$theta.sp[1])
print(fit.pars)
##fit each serperatley and iterate between em.
epsilon.cdr.inc <- Inf
while (epsilon.cdr.inc >= epsilon.cdr.inc.target){
cur.beta <- params$beta.sp[1]
cur.theta <- params$theta.sp[1]
out.beta <- optim(fit.pars[1],
fn=incObFunc,
params=params,
state=initial.state,
target.ci=target.ci,
target.cdr=target.cdr,
beta.or.theta = "beta",
method="Brent",
lower=2,upper=100, #optimization is finicky! adjust lower bound
control=list(trace=T,abstol=1))
#update beta
params$beta.sp <- rep(out.beta$par,4)
#update initial state
out.theta <- optim(fit.pars[2],
fn=incObFunc,
params=params,
state=initial.state,
target.ci=target.ci,
target.cdr=target.cdr,
beta.or.theta = "theta",
method="Brent",
lower=0.1,
upper=2.5, #optimization is finicky! adjust lower bound
control=list(trace=T,abstol=1))
## update thetas
ep.sn.mult <- 1 ## Assuming equal impcat on all tb types
params$theta.sp <- rep(out.theta$par,4)
params$theta.sn <- ep.sn.mult*rep(out.theta$par,4)
params$theta.ep <- ep.sn.mult*rep(out.theta$par,4)
## now calculate the change
epsilon.cdr.inc <- max(c(abs(cur.theta - out.theta$par)/cur.theta,abs(cur.beta - out.beta$par)/cur.beta))
}
## start.state.min <- initial.state
tryCatch(RS <- runsteady(y=initial.state,fun=dxdt.TBHIV.CI,parms=params,times=c(0,10000),verbose=F),
error = function(e){
stop("Sorry can't reach steady state from optimized params")
})
ss.vals <- RS$y
return(list(final.pars=params,ss=ss.vals))
}
##' Function to fit theta.sp and beta to TB preva and incidence
##' @param initial.state
##' @param params
##' @param target.ci
##' @param target.prev.tb
##' @return
##' @author Andrew Azman
fitIncPrev <- function(initial.state,
params,
target.ci,
target.prev.tb,
lowers=c(4,.1),
uppers=c(20,7)
){
require("rootSolve")
## set all theta's to theta sp
fit.pars <- c(params$beta.sp[1],params$theta.sp[1])
print(fit.pars)
out <- optim(fit.pars,
fn=incObFunc,
params=params,
state=initial.state,
target.ci=target.ci,
target.prev.tb=target.prev.tb,
method="L-BFGS-B",
lower=lowers,upper=uppers, #optimization is finicky! adjust lower bound
control=list(trace=T,parscale=c(10,1),maxit=1000))
final.pars <- params
final.pars$beta.sp <- rep(out$par[1],4)
final.pars$theta.sp <- rep(out$par[2],4)
ep.sn.mult <- 1
final.pars$theta.ep <- final.pars$theta.sp*ep.sn.mult
final.pars$theta.sn <- final.pars$theta.sp*ep.sn.mult
tryCatch(RS <- runsteady(y=initial.state,fun=dxdt.TBHIV.CI,parms=final.pars,times=c(0,10000),verbose=F),
error = function(e){
stop("Sorry can't reach steady state from optimized params")
})
ss.vals <- RS$y
return(list(final.pars=final.pars,ss=ss.vals))
}
## Runs intervention and control with a specfified increase in the detection rates
##' @param ss starting state for runs, should include the main states and claculated ones
##' @param params list of parameters to use in the simulations
##' @param time how long to run the models
##' @param int.theta.sp - increased rate of detection of sp TB
##' @param int.theta.sn - increased rate of detection of sn TB
##' @param int.theta.ep - increased rate of detection of ep TB
##' @return
runIntCont <- function(ss,
params,
time,
int.theta.sp,
int.theta.sn,
int.theta.ep,
var.beta=FALSE,
intervention.duration=time){
## make sure all the stats for the ss are set to zero
#ss[37:length(ss)] <- 0
cont <- runTBHIVMod(params,initial.state=ss,max.time=time,var.beta=var.beta)
cont <- addColNames(cont,ext=T)
params.int <- params
params.int$theta.snI <- rep(int.theta.sn,4)
params.int$theta.spI <- rep(int.theta.sp,4)
params.int$theta.epI <- rep(int.theta.ep,4)
## first we will run the intervention
int <- runTBHIVMod(params.int,initial.state=ss,max.time=intervention.duration,var.beta=var.beta)
if (intervention.duration < time){
int.part2 <- runTBHIVMod(params,initial.state=tail(int,1)[-1],max.time=time-intervention.duration,var.beta=var.beta)
int <- rbind(int,int.part2[-1,])
int[,1] <- seq(0,time,by=0.1)
}
int <- addColNames(int,ext=T)
return(list(int=int,cont=cont))
}
#takes a run and plots incdience and cases detected
plotOut <- function(out,pop.adj=T,overlay=FALSE,legend=TRUE){
if (pop.adj){
limit <- grep("CI",colnames(out)) ##which is the first col of stats
pa <- rowSums(out[,2:(limit-1)])/100000 ## pop.size / 100k
pa <- pa[-1] #since we are starting after 2008.0
} else {
pa <- rep(1,nrow(out)-1)
}
cd <- grep("N.",colnames(out))
## get cases detected per 100k (if adjusted)
cases.detected <- (diff(rowSums(out[,cd]))/pa)*10
times <- out[,1]
## get prevalence
prev <- rowSums(getPrevCols(out))/c(1,pa)
##get incidence
inc <- (diff(out[,"CI"])/pa)*10
if (!overlay){
plot(times,prev,col=1,type="l",ylim=c(0,700),lty=1,xlab="",ylab="Rate per 100k per year")
lines(times[-1],inc,col=2,type="l",lty=1)
lines(times[-1],cases.detected,col=3,type="l",lty=1)
} else {
lty <- 2
lines(times,prev,col=1,type="l",lty=lty)
lines(times[-1],inc,col=2,type="l",lty=lty)
lines(times[-1],cases.detected,col=3,type="l",lty=lty)
}
if(legend & overlay){
legend("topright",c("Prevalence, Intervention","Incidence, Intervention","Cases Detected, Intervention","Prevalence, No Intervention","Incidence, No Intervention","Cases Detected, No Intervention"),col=c(1:3,1:3),lty=c(rep(1,3),rep(2,3)),bty="n")
} else if (legend){
legend("topright",c("Prev","Inc","Detected"),col=1:3,lty=1,bty="n")
}
}
##' Calculates HIV related summary statistics given model state
##' @param mod model state
##' @param full a flag for whether or not we are giving a full model output ot the function or not (or jsut a single line)
##' @return vector, prevalance and prop.on ARTs for both age classes
##' @author Andrew Azman
hIVStats <- function(mod,full=F){
if(!is.null(nrow(mod)) && colnames(mod)[1] == "time") mod <- mod[,-1]
if(is.null(nrow(mod)) && names(mod)[1] == "time") mod <- mod[-1]
if(!is.null(nrow(mod))){
#recover()
## assuming that the first CI column is the first one of cumulative statistics
first.column.of.cum.stats <- grep("CI",colnames(mod))
if (length(first.column.of.cum.stats) > 0){
mod <- mod[,-c(first.column.of.cum.stats[1]:ncol(mod))]
}
prev.1 <-
apply(mod[,grep("^[han]",colnames(mod))],1,sum)/
rowSums(mod[,grep(".+1$",colnames(mod))])
## note the the labels for n and a are actually reveresed
prop.on.art.1 <-
rowSums(mod[,grep("^n.+1$",colnames(mod))])/
rowSums(mod[,grep("(^a.+1$)|(^n.+1$)",colnames(mod))])
## only considering those eligible
if (full) {
return(list(prev.1,prop.on.art.1))
} else {
return(c(hiv.prev.1=tail(prev.1,1),prop.art.1=tail(prop.on.art.1,1)))
}
} else {
## assuming that the first CI column is the first one of cumulative statistics
first.column.of.cum.stats <- grep("CI",names(mod))
if (length(first.column.of.cum.stats) > 0){
mod <- mod[first.column.of.cum.stats[1]:ncol(mod)]
}
prev.1 <- sum(mod[grep("^[han]",names(mod))])/sum(mod[grep(".+1$",names(mod))])
## note the the labels for n and a are actually reveresed
prop.on.art.1 <- sum(mod[grep("^n.+1$",names(mod))])/
sum(mod[grep("(^a.+1$)|(^n.+1$)",names(mod))])
return(c(hiv.prev.1=prev.1,prop.art.1=prop.on.art.1))
}
}
##' Takes parameters and model starting state, runs to steady state and estimates the sum of squared errors for HIV STAT output
##' @param fit.params
##' @param full.params
##' @param state
##' @param prev.1 true HIV prevalence for 1st age clas
##' @param prop.art.1 true propirtion of hiv eligible that are on ARTs (<15)
##' @return sum of squared errors for hiv.prev and prop.on.art for each age class (equally weighted and not scaled)
##' @author Andrew Azman
hIVObjective <- function(fit.params,
full.params,
state,
prev.1,
prop.art.1){
full.params$chi.tx[3] <- fit.params[1]
# full.params$chi.tx[2] <- fit.params[2]
full.params$foi.hiv[1] <- fit.params[2]
## full.params$foi.hiv[2] <- fit.params[4]
#print(fit.params)
RS <- runsteady(y=state,fun=dxdt.TBHIV3,parms=full.params,verbose=F)
tmp <- addColNames(RS$y,time=F)
(stats <- hIVStats(tmp))
# print(matrix(c(stats,prev.1,prop.art.1),nrow=2,byrow=T))
# recover()
sum((stats/c(prev.1,prop.art.1) - 1)^2)
}
##' Fits the chi.tx (rate of flow from eligble to ART) for each age class and foi.hiv (the constant rate of new hiv infections)
##' @param start.pars
##' @param params
##' @param start.state
##' @param prev.1
##' @param prop.art.1
##' @return final parameters of optimization routine
##' @author Andrew Azman
fitHIV <- function(params,
start.state,
prev.1,
prop.art.1){
start.pars <- c(params$chi.tx[3],params$foi.hiv[1])
fit <- optim(start.pars,
fn=hIVObjective,
full.params=params,
state=start.state,
prev.1=prev.1,
prop.art.1=prop.art.1,
method="L-BFGS-B",
lower=c(1e-5,1e-10),
upper=c(365,1),
control=list(parscale=c(1,.1)))
fit
}
##' Gets percentage of people of each age for a given model output
##' @title
##' @param mod.out
##' @param classes number of age classes in the model
##' @return
getAgeDistribution <- function(mod.out,classes=2){
ages <- c()
for (i in 1:classes){
ages[i] <- sum(mod.out[nrow(mod.out),grep(paste0(i,"$"),colnames(mod.out))])
}
ages/sum(ages)
}
##' Function takes a single year of data and returns some key TB related stats
##' prevalence , incidence, mortality
##' cases detected per year
##' percent of new TB infections that are HIV positive
##' @title
##' @param mod
##' @return
##' @author Andrew Azman
getTBStats <- function(mod,add.names=T,row.final,row.init){
if (add.names) mod <- addColNames(mod,time=T,ext=T)
if(missing(row.final) || missing(row.init)){
row.final <- nrow(mod)
row.init <- row.final - 10
}
## overall TB mortality
tb.mort <- sum(mod[row.final,grep("Mtb",colnames(mod))] -
mod[row.init,grep("Mtb",colnames(mod))])
tb.hiv.mort <- sum(mod[row.final,grep("(a|h|n)Mtb",colnames(mod))] -
mod[row.init,grep("(a|h|n)Mtb",colnames(mod))])
tb.prev <- sum(getPrevCols(mod)[row.final,])
tb.hiv.prev <- sum(getPrevCols(mod,hiv.only=T)[row.final,])
tb.inc <- mod[row.final,"CIall"] - mod[row.init,"CIall"]
tb.hiv.inc <- sum(mod[row.final,grep("(a|h|n)CI",colnames(mod))] -
mod[row.init,grep("(a|h|n)CI",colnames(mod))])
return(round(c(tb.mort.nohiv=tb.mort-tb.hiv.mort,
tb.hiv.mort=tb.hiv.mort,
tb.hiv.prev=tb.hiv.prev,
tb.prev=tb.prev,
tb.inc=tb.inc,tb.hiv.inc=tb.hiv.inc),1))
}
iterativeHIVTBFit <- function(start.state,
params.start,
target.ci=993,
target.cdr=0.69,
target.prev.tb = 768,
target.prev.hiv = 0.178,
target.art = 0.55,
epsilon.target=1e-2,
uppers.tb=c(20,4),
lowers.tb=c(5,.1)){
## initialize parameters
epsilon <- Inf
tmp.state <- start.state
params.tmp <- params.start
## params.hiv.tmp <- params.hiv.start
## params.tb.tmp <- params.tb.start
## set up proposed parameter vector
par.cur <- c(params.tmp$chi.tx[3],
params.tmp$foi.hiv[1],
params.tmp$beta.sp[1],
params.tmp$theta.sp[1])
par.new <- rep(NA,4)
while(epsilon > epsilon.target){
hiv.fit.sa <- fitHIV(params.tmp,
tmp.state[1:36],
prev.1=target.prev.hiv,
prop.art.1=target.art)
par.new[1] <- params.tmp$chi.tx[3] <- hiv.fit.sa$par[1]
par.new[2] <- params.tmp$foi.hiv[1] <- hiv.fit.sa$par[2]
if(!missing(target.prev.tb)){
tb.fit.tmp <- fitIncPrev(initial.state=tmp.state,
params=params.tmp,
target.ci=target.ci,
target.prev.tb=target.prev.tb,
uppers=uppers.tb,lowers=lowers.tb)
} else {
tb.fit.tmp <- fitIncCDR(initial.state=tmp.state,
params=params.tmp,
target.ci=target.ci,
target.cdr=target.cdr )
}
params.tmp$beta.sp <- tb.fit.tmp$final.pars$beta.sp
params.tmp$theta.sp <- tb.fit.tmp$final.pars$theta.sp
par.new[3] <- tb.fit.tmp$final.pars$beta.sp[1]
par.new[4] <- tb.fit.tmp$final.pars$theta.sp[1]
## change if we alter relations hsip between theta.sp and the others
params.tmp$theta.sn <- tb.fit.tmp$final.pars$theta.sp*1
params.tmp$theta.ep <- tb.fit.tmp$final.pars$theta.sp*1
epsilon <- max(abs(par.new - par.cur)/par.cur)
par.cur <- par.new
tmp.state <- tb.fit.tmp$ss
cat(sprintf("Pct change in params from last optim is %f \n",epsilon))
}
list(params=params.tmp,
state=tmp.state,
epsilon=epsilon)
}
##' Takes output from runIntCont
##' @param out
##' @param times
##' @param costs
##' @param params
##' @param ...
##' @return
##' @author Andrew Azman
makeHorizonICERPlot <- function(out,times,costs,params,...){
cols <- brewer.pal(6, name="Greens")
cols <-colorRampPalette(cols, space = "Lab")
colors<-cols(length(times)+3)
plot(-100,-100,xlim=range(costs),ylim=c(0,600),xlab="",ylab="")
sapply(1:length(times),function(horiz){
lines(costs,sapply(1:length(costs),function(cost)
calcStats(out,eval.times=1:((horiz*10)+1),dtx.cost=cost,params=params,...)["ICER"]),col=horiz)
#colors[horiz+2])
})
}
##' Makes a levelplot of ICERs by cost and analystic time horizon
##' @param out output of runIntCont
##' @param times time horzozons
##' @param costs costs we want to evaluate it at
##' @param params parameters vector
##' @param xlabs
##' @param ylabs
##' @param ...
##' @return plot
##' @author Andrew Azman
makeLevelPlotICER <- function(out,times,costs,params,xlabs,ylabs,...){
require(fields)
cols <- brewer.pal(9, name="Greens")
cols <-colorRampPalette(cols[-1], space = "Lab")
grid <- expand.grid(times,costs)
ICERS <- mapply(getICER,horiz=grid[,1],cost=grid[,2],MoreArgs= list(params=params,out=out,...))
mat <- matrix(ICERS,nrow=length(times),ncol=length(costs))
# layout(matrix(c(1,2),nrow=1),widths = c(.9,.1))
# par(mar=c(2,2,2,2))
par(mar=c(5,4.5,4,7))
image(mat,col=cols(15),axes=F,xlab="Time Horizon (years)",ylab="Diagnosis Cost (USD)")
axis(1,at=seq(0,1,length=length(xlabs)),labels=xlabs)
axis(2,at=seq(0,1,length=length(ylabs)),labels=ylabs)
image.plot(col=cols(15),zlim=range(ICERS),legend.only=T,horizontal=F,width=5)
}
##' Helper function
##' @param horiz
##' @param cost
##' @param params
##' @param out
##' @param fixed true if we are fixing
##' @param ...
##' @return
##' @author Andrew Azman
getICER <- function(horiz,cost,params,out,fixed,...){
if (fixed){
calcICERFixedCosts(out,eval.times=1:((horiz*10)+1),dtx.cost=cost,params=params,...)["ICER"]
} else {
calcICER(out,eval.times=1:((horiz*10)+1),dtx.cost=cost,params=params,...)["ICER"]
}
}
##' objective function for fitting annual percent change in beta to change in CI
##' @title
##' @param beta.delta
##' @param params
##' @param ss
##' @param target.ci
##' @param years
##' @return
##' @author Andrew Azman
fitAnnualBetaDeltaObjFunc <- function(beta.delta,params,ss,target.ci,years){
params$beta.delta <- rep(beta.delta,4)
out <- runTBHIVMod(params,ss,years,T)
ret <- (target.ci - getTBStats(out)[5])^2
cat(sprintf("Target = %f, Current = %f \n",target.ci,getTBStats(out)[5]))
ret
}
##' Fits annual pct change in beta
##' @param params
##' @param ss
##' @param target.ci
##' @param years
##' @return
##' @author Andrew Azman
fitAnnualBetaDelta <- function(params,
ss,
target.ci,
years){
optim(params$beta.delta[1],
fn=fitAnnualBetaDeltaObjFunc,
ss=ss,params=params,target.ci=target.ci,years=years,
method="Brent",lower=-10,upper=10,control=list(trace=T))
}
##' Returns data for a specific country for a specific year
##' @title
##' @return
##' @author Andrew Azman
getWHOStats <- function(target.country,years){
dat <- read.csv("Data/TB_burden_countries_2012-12-10.csv")
subset(dat,country == target.country & year %in% years)
}
##' Just to check that runsteady actually does what I hope it does
##' @param state
##' @param fun
##' @param params
##' @param check.every
##' @param var.beta
##' @return
##' @author Andrew Azman
runSteady <- function(state,fun,params,check.every=500,var.beta=FALSE){
steady <- F
while(!steady){
tmp <- runTBHIVMod(params,state,check.every,var.beta=var.beta)
if (abs(tail(tmp,10)[10] - tail(tmp,10)[1]) < 1){
steady <- TRUE
}
}
tail(tmp,1)[-1]
}
##' Fits increased theta to match a specific number increased cases detected in the first year
##' @param target.detection.increase number per 100k
##' @param duration
##' @param params
##' @param starting.state
##' @param ep.sn.muliplier
##' @param var.beta
##' @return
##' @author Andrew Azman
fitIncreasedDetectionRate <- function(target.detection.increase,
duration,
params,
starting.state,
ep.sn.multiplier,
var.beta){
optim(params$theta.spI[1]+.1,
fn=fitIncreasedDetectionRateObjFunc,
params=params,
state=starting.state,
duration=duration,
ep.sn.multiplier=ep.sn.multiplier,
target.detection.increase=target.detection.increase,
var.beta=var.beta,method="Brent",lower=0,upper=10)
}
##' Objective function for fitting increased theta to increase in number of detected cases
##' @param theta.spI
##' @param params
##' @param state
##' @param duration
##' @param ep.sn.muliplier what percent of the sp rate increase shoudl be assigned to ep and sn?
##' @param var.beta
##' @param target.detection.increase
##' @return
##' @author Andrew Azman
fitIncreasedDetectionRateObjFunc <- function(theta.spI,
params,
state,
duration,
ep.sn.multiplier,
var.beta,
target.detection.increase){
## first run the model without an increased detection rate
run.pre <- runTBHIVMod(params,state,duration,var.beta=var.beta)
run.pre <- addColNames(run.pre,ext=T)
last.time <- nrow(run.pre)
## now update the rates
params$theta.spI <- rep(theta.spI,4)
params$theta.snI <- rep(theta.spI,4)*ep.sn.multiplier
params$theta.epI <- rep(theta.spI,4)*ep.sn.multiplier
run.post <- runTBHIVMod(params,state,duration,var.beta=var.beta)
run.post <- addColNames(run.post,ext=T)
#how many additional cases are detected?
cd.pre <- (run.pre[last.time,grep("N.Asp",colnames(run.pre))] +
run.pre[last.time,grep("N.Asn",colnames(run.pre))] +
run.pre[last.time,grep("N.Aep",colnames(run.pre))]) -
(run.pre[1,grep("N.Asp",colnames(run.pre))] +
run.pre[1,grep("N.Asn",colnames(run.pre))] +
run.pre[1,grep("N.Aep",colnames(run.pre))])
cd.post <- (run.post[last.time,grep("N.Asp",colnames(run.post))] +
run.post[last.time,grep("N.Asn",colnames(run.post))] +
run.post[last.time,grep("N.Aep",colnames(run.post))]) -
(run.post[1,grep("N.Asp",colnames(run.post))] +
run.post[1,grep("N.Asn",colnames(run.post))] +
run.post[1,grep("N.Aep",colnames(run.post))])
# cat(sprintf("pre = %.0f \n post = %.0f, \n increase = %.3f \n",sum(cd.pre),sum(cd.post),params$theta.spI[1]))
((sum(cd.post) - sum(cd.pre)) - target.detection.increase )^2
}
##' Calculates ICER for the output of intervention and counterfactual run
##' @param out output from runIntCont
##' @param eval.times - times to extract (in units of 1/10 year) and to analysze
##' @param dtx.cost - cost of finding cases in the first year (total - NOT per case)
##' @param tx.cost
##' @param tx.cost.mdr
##' @param tx.suc
##' @param tx.cost.partial
##' @param tx.cost.partial.mdr
##' @param discount
##' @param dis.wt.tx
##' @param dis.wt.tb
##' @param pct.mdr
##' @param params
calcICERFixedCosts <- function(out,
eval.times=1:11,
dtx.cost=20*100, #full cost in year 1
tx.cost=120,
tx.cost.mdr=120,
tx.suc=c(1),
tx.cost.partial=80,
tx.cost.partial.mdr=80,
discount=.03,
dis.wt.tx = c((0.331+0)/2,(0.399+0.221)/2,0.547,(0.399+0.053)/2), ## Weighted averages from solomon et al 2013
dis.wt.tb = c(0.331,0.399,0.547,0.399), ##using DB for AIDs only for HIV/TB from salomon et al 2013
pct.mdr = 0.023,
params){
require(plyr)
## number of age classes (can put this as a param later)
ac <- 1
## reduce to output for times of interest
## helper vectors
types <- c("Asp","Asn","Aep")
hivstatus <- c("","h","a","n")
## extract only the relavent evaluation times
out <- lapply(out,function(x) x[eval.times,])
## get the times vector
times <- out[[1]][,1][-1]
## get differences in stats over time
diffs <- lapply(out,function(x) {
diff(x)[,14:ncol(x)]
})
## get unit costs through time
## NB: dtx.costs are total costs through time NOT per case
## taking Reimann integral here assuming step size of 0.1
dtx.costs <- dtx.cost*exp(-times*discount)*0.1
## how many did we detect
dtxs.int <- diffs$int[,grep("N.A(sp|sn|ep)",colnames(diffs$int))]
dtxs.cont <- diffs$cont[,grep("N.A(sp|sn|ep)",colnames(diffs$cont))]
## get our costs discounted through time for DS and MDR TB
tx.unitcosts <- tx.cost*exp(-times*discount)
tx.part.unitcosts <- tx.cost.partial*exp(-times*discount)
tx.unitcosts.mdr <- tx.cost.mdr*exp(-times*discount)
tx.part.unitcosts.mdr <- tx.cost.partial.mdr*exp(-times*discount)
## Now we get the cost of full and partial treatment over time
## subtracting the control costs from the intervetion costs for case finding and then adding the diagnosis costs.
## for the treatment group
txcost.int <- sum((rowSums(dtxs.int) - rowSums(dtxs.cont))*
(tx.suc*(tx.unitcosts*(1-pct.mdr) + tx.unitcosts.mdr*pct.mdr) +
(1-tx.suc)*(tx.part.unitcosts*(1-pct.mdr) + tx.part.unitcosts.mdr*pct.mdr)))
## Deaths
death.cont <- diffs$cont[,grep("Mtb",colnames(diffs$cont))]
death.int <- diffs$int[,grep("Mtb",colnames(diffs$int))]
## Years of life lost by hiv class
## taking a conservative approach where people
## can at most contribute horizon - time.step years
YLL.cont <- apply(death.cont,2,function(hiv.class) {
hiv.class * (max(times) - times) * exp(-discount *times)
## hiv.class * which.max(times) - 1:length(times) * exp(-discount *times)
})
YLL.int <- apply(death.int,2,function(hiv.class) {
hiv.class * (max(times) - times) * exp(-discount *times)
# hiv.class * which.max(times) - 1:length(times) * exp(-discount *times)
})
YLL.cont.minus.int <- YLL.cont - YLL.int
## from the model not accounting for deaths
## only considering symtomatic time not PS period
## NOTE: dis.dur.tx is not actually used anywhere anymore
with(params,{
dur.sp <- (theta.sp+theta.spI)*eta.sp+zeta.sp
dur.sn <- (theta.sn+theta.snI)*eta.sn+zeta.sn
dur.ep <- (theta.ep+theta.epI)*eta.ep+zeta.sn
tmp <- 1/rbind(sp=dur.sp,sn=dur.sn,ep=dur.ep)
colnames(tmp) <- c("","h","n","a")
tmp
}) -> dis.dur.tx
with(params,{
dur.sp <- theta.sp+zeta.sp
dur.sn <- theta.sn+zeta.sn
dur.ep <- theta.ep+zeta.sn
tmp <- 1/rbind(sp=dur.sp,sn=dur.sn,ep=dur.ep)
colnames(tmp) <- c("","h","n","a")
tmp
}) -> dis.dur.notx
# taking mean treatment duration
# assuming that all TB types have same duration of TX
tx.dur <- 1/params$gamma.tx.rtx[1]
## Disability Years YLD = I * D * DW
## may need to split this by TB type
prop.each.TB.type <- sapply(1:4,function(x) {
with(params, c(pi.sp[x]*(1-pi.ep[x]),(1-pi.sp[x])*(1-pi.ep[x]),
pi.ep[x]))
})
colnames(prop.each.TB.type) <- c("","h","n","a")
## We consider prevalent cases are those contributing to YLD
hiv.types <- c("^","h","a","n")
## list with each element as hiv type. matrices rows = time, columns = Asp, Asn, Aep
prev.cases.cont <- sapply(1:4,function(x)
(out[[2]])[,grep(paste0(hiv.types[x],"(Asp|Aep|Asn)"),
colnames(out[[2]]))],simplify=F)
prev.cases.int <- sapply(1:4,function(x)
(out[[1]])[,grep(paste0(hiv.types[x],"(Asp|Aep|Asn)"),
colnames(out[[1]]))],simplify=F)
prev.cases.cont.minus.int <- llply(1:4,function(x) prev.cases.cont[[x]] - prev.cases.int[[x]])
## these output lists of matrices. list by HIV, matrix columns by TB
time.step <- 0.1
YLD.notx.cont <- sapply(1:4,function(hiv){
sum(sapply(1:3,function(tb) {
prev.cases.cont[[hiv]][,tb] *
time.step * dis.wt.tb[hiv]
}))})
YLD.notx.int <- sapply(1:4,function(hiv){
sum(sapply(1:3,function(tb) {
prev.cases.int[[hiv]][,tb] *
time.step * dis.wt.tb[hiv]
}))})
YLD.notx.cont.minus.int <- YLD.notx.cont - YLD.notx.int
#just getting them into a different form
det.cases.int <- sapply(1:4,function(x){
dtxs.int[,grep(paste0(hiv.types[x],"(N.Asp|N.Aep|N.Asn)"),
colnames(dtxs.int))]}
,simplify=F)
det.cases.cont <- sapply(1:4,function(x){
dtxs.cont[,grep(paste0(hiv.types[x],"(N.Asp|N.Aep|N.Asn)"),
colnames(dtxs.cont))]}
,simplify=F)
## NB: not discounting for time on treatment since it is SO short
YLD.tx.int <- sapply(1:4,function(hiv){
sum(sapply(1:3,function(tb){
det.cases.int[[hiv]][,tb] * pmin(0.5,max(times) - times) * dis.wt.tb[hiv]
}))})
YLD.tx.cont <- sapply(1:4,function(hiv){
sum(sapply(1:3,function(tb){
det.cases.cont[[hiv]][,tb] * pmin(0.5,max(times) - times) * dis.wt.tb[hiv]
}))})
YLD.tx.cont.minus.int <- YLD.tx.cont - YLD.tx.int
DALYs.int <- sum(YLL.int) + sum(YLD.notx.int) + sum(YLD.tx.int)
DALYs.cont <- sum(YLL.cont) + sum(YLD.notx.cont) + sum(YLD.tx.cont)
DALYs.averted <- sum(YLL.cont.minus.int) + sum(YLD.notx.cont.minus.int) + sum(YLD.tx.cont.minus.int)
ret <- c(txcost.int=txcost.int,
ICER=(txcost.int+sum(dtx.costs))/sum(DALYs.averted),
DALYs.averted = DALYs.averted,
DALYS.int = DALYs.int,
DALYs.cont = DALYs.cont)
return(ret)
}
##' @title
##' @param start.beta
##' @param state
##' @param params
##' @param target.ci
##' @return
##' @author Andrew Azman
ManualTuneBeta <- function(start.beta,state,params,target.ci){
params$beta.sp <- start.beta
RS <- runsteady(y=state[1:61],fun=dxdt.TBHIV.CI,parms=params,times=c(0,10000),verbose=F)
run <- runTBHIVMod(params,initial.state=RS$y,max.time=1,var.beta=FALSE)
getTBStats(run,add.names=T)
}
##' Makes fixed ICER plot
##' @param icer.min
##' @param icer.max
##' @param case.dt.dif fit to thi many numebr of cases detected int he first year
##' @param plot.params
##' @param start.state
##' @param tx.cost
##' @param tx.cost.partial
##' @param tx.cost.mdr
##' @param pct.mdr
##' @param tx.cost.partial.mdr
##' @param my.title
##' @param intcont.run if we give it this output from runIncCont we don't do it automatically
##' @param gdp per capita GDP
##' @param ICERS icers calaculated over the grid (calculated and returned in this function if not provided)
##' @param contours
##' @param xlab
##' @param ylab
##' @param leg legend?
##' @param ep.sn.multiplier
##' @param max.icer.cutoff
##' @param ...
##' @return list with ICERS and int.cont.run and makes plot
##' @author Andrew Azman
makeICERPlotFixed <- function(icer.min=0.00001,
icer.max=6000,
case.dt.dif,
plot.params = india2011_params,
start.state = start.state.2011,
tx.cost = 81,
tx.cost.partial = tx.cost*.75,
tx.cost.mdr = 350,
pct.mdr = 0.023, # default for india
tx.cost.partial.mdr = tx.cost.mdr*.75,
my.title = "",
intcont.run,
gdp,
ICERS,
contours,
xlab="",
ylab="",
leg=FALSE,
ep.sn.multiplier=1,
truncate.color=TRUE,
...
){
## fitting increased detetion rate that will give us X additional cases in the first year
if (missing(intcont.run)){
cat(sprintf("Fitting increased detection rate for %d case increase in year 1 \n",case.dt.dif))
fit.tmp <- fitIncreasedDetectionRate(target.detection.increase = case.dt.dif,
duration = 1,
params = plot.params,
starting.state = start.state,
ep.sn.multiplier = ep.sn.multiplier,
var.beta=FALSE)
theta.reduction <- fit.tmp$par
tmp <- runIntCont(start.state,plot.params,10,
int.theta.sp= theta.reduction,
int.theta.sn = theta.reduction*ep.sn.multiplier,
int.theta.ep = theta.reduction*ep.sn.multiplier)
plot.params$theta.spI <- rep(theta.reduction,4)
plot.params$theta.snI <- rep(theta.reduction,4)*ep.sn.multiplier
plot.params$theta.epI <- rep(theta.reduction,4)*ep.sn.multiplier
} else {
tmp <- intcont.run
}
times <- seq(1,10,by=.1)
costs <- seq(50,5000,by=5)*case.dt.dif
xlabs <- 1:10
ylabs <- seq(50,5000,by=350)
zlims <- c(0,log10(icer.max))
# zlims <- c(icer.min,icer.max)
# breaks <- seq(icer.min,icer.max,by=50)
breaks <- seq(0,log10(icer.max),length=50)
cols <- colorRampPalette(brewer.pal(9, name="Greens"))
grid <- expand.grid(times,costs)
# params for the mapply statement
args.for.mapply <- list(params=plot.params,
out=tmp,
tx.cost=tx.cost,
tx.cost.partial=tx.cost.partial,
tx.cost.mdr=tx.cost.mdr,
tx.cost.partial.mdr=tx.cost.partial.mdr,
pct.mdr=pct.mdr,
fixed=TRUE)
#only estiamte if we didn't supply ICERS
if (missing(ICERS)) ICERS <- mapply(getICER,horiz=grid[,1],cost=grid[,2],MoreArgs=args.for.mapply)
mat <- matrix(ICERS,nrow=length(times),ncol=length(costs))
# if truncate color then we are going to set all values larger to icer.max
if (truncate.color) {
mat[which(mat > icer.max)] <- icer.max
mat[which(mat < icer.min)] <- icer.min
}
## par(mar=c(5,4.5,4,7))
image(log10(mat),col=cols(length(breaks)-1),axes=F,xlab=xlab,ylab=ylab,zlim=zlims,breaks=breaks)
if (!missing(gdp)){
contour(log10(mat),levels=c(log10(0.0001),log10(gdp),log10(3*gdp)),col=addAlpha("black",.5),labcex=.5,lwd=1,lty=2,add=T,drawlabels=TRUE,method="edge",labels=c("cost saving","highly cost effective","cost effective"))
}
if (!missing(contours)){
contour(log10(mat),
levels=log10(contours), #[[1]]
col=addAlpha("black",.5),
labcex=.5,
lwd=1,
lty=2,
labels=contours,
add=TRUE)
#,method="edge")
}
time.labs <- cbind(seq(0,1,length=length(times)),seq(1,10,length=length(times)))[seq(1,length(times),by=5),]
axis(1,at=time.labs[,1],labels=time.labs[,2])
# axis(1,at=seq(0,1,length=length(xlabs)),labels=xlabs)
costs.labs <- cbind(seq(0,1,length=length(costs)),costs/case.dt.dif)[seq(1,991,by=50),]
axis(2,at=costs.labs[,1],labels=costs.labs[,2])
#axis(2,at=seq(0,1,length=length(ylabs)),labels=ylabs)
if (leg){
legend.seq <- round(seq(min(zlims),max(zlims),length=5),0)
image.plot(col=cols(length(breaks)-1),zlim=zlims,
## breaks=seq(min(zlims),max(zlims),length=length(breaks)),
## lab.breaks=round(10^seq(min(zlims),max(zlims),length=length(breaks)),0),
legend.only=T,horizontal=F,width=7,smallplot = c(.95,1,.05,.9),
axis.args=list(at=legend.seq, labels=10^legend.seq))
}
title(my.title)
list("ICERS"=ICERS,"intcont.run"=tmp)
}
##' Plots Overview of Outputs from runIntCont
##' @param intcont
##' @param legend
##' @param by.TB - not implemented yet
##' @return
##' @author Andrew Azman
plotTBIncMort <- function(intcont,
legend=TRUE,
col1=1,
col2=2,
cd=FALSE,...){
#CI, Prev # mortality, retx
times <- intcont[[1]][,1]
ci1 <- diff(intcont[[1]][,"CIall"])*10
ci2 <- diff(intcont[[2]][,"CIall"])*10
## ##now prevalence
## prev1 <- rowSums(getPrevCols(intcont[[1]]))
## prev2 <- rowSums(getPrevCols(intcont[[2]]))
##mortality
mort1 <- diff(rowSums(intcont[[1]][,grep("(n|a|h|^)(Mtb1)",colnames(intcont[[1]]))]))*10
mort2 <- diff(rowSums(intcont[[2]][,grep("(n|a|h|^)(Mtb1)",colnames(intcont[[2]]))]))*10
## ##retreatment
## retx1 <- diff(rowSums(intcont[[1]][,grep("(n|a|h|^)(ReTx1)",colnames(intcont[[1]]))]))*10
## retx2 <- diff(rowSums(intcont[[2]][,grep("(n|a|h|^)(ReTx1)",colnames(intcont[[2]]))]))*10
## ## cases detected
if (cd){
cases.detected.1 <- diff(rowSums(intcont[[1]][,grep("N.A(sp|sn|ep)",colnames(intcont[[1]]))]))*10
cases.detected.2 <- diff(rowSums(intcont[[2]][,grep("N.A(sp|sn|ep)",colnames(intcont[[2]]))]))*10
}
all.data.points <- c(ci1,ci2,mort1,mort2)
if (cd) all.data.points <- c(all.data.points,cases.detected.1,cases.detected.2)
plot(-100,-100,xlim=range(times),ylim=c(min(all.data.points),max(all.data.points)),xlab="",...)
lines(times[-1],ci1,col=col1)
lines(times[-1],ci2,col=col1,lty=2)
## lines(times,prev1,col=2)
## lines(times,prev2,col=2,lty=2)
lines(times[-1],mort1,col=col2)
lines(times[-1],mort2,col=col2,lty=2)
## lines(times[-1],retx1,col=4)
## lines(times[-1],retx2,col=4,lty=2)
if (cd){
lines(times[-1],cases.detected.1,col=5)
lines(times[-1],cases.detected.2,col=5,lty=2)
}
if (legend){
legend("topright",paste0(rep(c("CI","mort"),each=2),c(" - Interv."," - Baseline")),
lty=rep(1:2,2),
col=rep(1:2,each=2),
bty="n")
}
}
##' adds alpha to a set of colors
##' @title
##' @param COLORS
##' @param ALPHA
##' @return
addAlpha <- function(COLORS, ALPHA){
if(missing(ALPHA)) stop("provide a value for alpha between 0 and 1")
RGB <- col2rgb(COLORS, alpha=TRUE)
RGB[4,] <- round(RGB[4,]*ALPHA)
NEW.COLORS <- rgb(RGB[1,], RGB[2,], RGB[3,], RGB[4,], maxColorValue = 255)
return(NEW.COLORS)
}
plotCumTBIncMort <- function(intcont,
legend=TRUE,
col1=1,
col2=2,
diffs=FALSE,
poly=TRUE,
...){
times <- intcont[[1]][,1]
ci1 <- intcont[[1]][,"CIall"]*10
ci2 <- intcont[[2]][,"CIall"]*10
##mortality
#mort1 <- rowSums(intcont[[1]][,grep("(n|a|h|^)(Mtb1)",colnames(intcont[[1]]))])*10
#mort2 <- rowSums(intcont[[2]][,grep("(n|a|h|^)(Mtb1)",colnames(intcont[[2]]))])*10
all.data.points <- c(ci1,ci2)#,mort1,mort2)
if (diffs){
plot(ci2 - ci1,col=col1,lty=6,...)
} else {
plot(-100,-100,xlim=range(times),ylim=c(0,max(all.data.points)),xlab="",...)
lines(times,ci1,col=col1,lty=1)
lines(times,ci2,col=col1,lty=2)
if (poly){
polygon(x=c(times,rev(times)),y=c(ci1,rev(ci2)),col=addAlpha(col1,.2),border=FALSE)
}
}
## lines(times,mort1,col=col2)
## lines(times,mort2,col=col2,lty=2)
}
##' Compares stats from model output to those from WHO
##' @param run
##' @param year
##' @param country
##' @return
compareStats <- function(run,year,country){
tb.hiv.stats <- c(getTBStats(run),hIVStats(addColNames(run,ext=T)))
who.stats <- getWHOStats(country,year)
#mort
who.stat.colnames <- c("e_mort_exc_tbhiv_100k","e_mort_exc_tbhiv_100k_lo","e_mort_exc_tbhiv_100k_hi",
"e_prev_100k","e_prev_100k_lo","e_prev_100k_hi",
"e_inc_100k","e_inc_100k_lo","e_inc_100k_hi",
"e_inc_tbhiv_100k","e_inc_tbhiv_100k_lo","e_inc_tbhiv_100k_hi")
cbind(who.stats[who.stat.colnames])
}
##' Runs a short term ACF intervention then continues on for some years
##' @param country string with "india", "sa", or "china"
##' @param pct.incidence extra cases found in year one should be pct.incidence X incidence
##' @param int.dur total number of years we want to run the intervention
##' @param total.dur total number of years we want to run the smiluation
##' @param fits named (by country) list of fitted objects
##' @return intcont list for simulation
##' @author Andrew Azman
runNYearACF <- function(country,
pct.incidence,
case.dt.dif,
int.dur=2,
total.dur=10,
fits){
#require(Hmisc)
## number of cases detecgted in year 1 proportional to incidence
if (missing(case.dt.dif)){
case.dt.dif <- c(round(getWHOStats("China",2011)[,"e_inc_100k"]*pct.incidence,0),
round(getWHOStats("India",2011)[,"e_inc_100k"]*pct.incidence,0),
round(getWHOStats("South Africa",2011)[,"e_inc_100k"]*pct.incidence,0))
}
case.dt.dif <- switch(country,
"india" = case.dt.dif[2],
"china" = case.dt.dif[1],
"sa" = case.dt.dif[3])
fit.tmp <- fitIncreasedDetectionRate(target.detection.increase = case.dt.dif,
duration = 1,
params = fits[[country]]$params,
starting.state = fits[[country]]$state,
ep.sn.multiplier = 1,
var.beta=FALSE)
theta.reduction <- fit.tmp$par
return(runIntCont(ss=fits[[country]]$state,
params=fits[[country]]$params,
time=total.dur,
int.theta.sp=theta.reduction,
int.theta.sn=theta.reduction*1,
int.theta.ep=theta.reduction*1,
intervention.duration = int.dur))
}
## Sens/Uncertainty Analyses Functions
##' Makes list of update functions for every param (or non-param) in the params list
##' used for running sesntivity analyses and dealing with dependent params
##' @return list of params suitable for use in the models
##' @author Andrew Azman
makeUpFuncs <- function(){
up.funcs <- vector("list",length=148)
up.funcs[[1]] <-
update.func <- function(para,new.value) {
para$beta.sp <- rep(new.value,4)
para
}
up.funcs[[2]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[3]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[4]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[5]] <-
update.func <- function(para,new.value) {
para$phi.sn <- rep(new.value,4)
para
}
up.funcs[[6]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[7]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[8]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[9]] <-
update.func <- function(para,new.value) {
para$phi.l[1] <- new.value
para$phi.l[c(2,4)] <- new.value*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$phi.l[3]
para
}
up.funcs[[10]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[11]] <-
update.func <- function(para,new.value) {
para$phi.l[3] <- new.value
para$phi.l[c(2,4)] <- para$phi.l[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*new.value
para
}
up.funcs[[12]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[13]] <-
update.func <- function(para,new.value) {
para$phi.ps[1:4] <- new.value
para
}
up.funcs[[14]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[15]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[16]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[17]] <-
update.func <- function(para,new.value) {
para$gamma.lf.ls[1:4] <- new.value
para
}
up.funcs[[18]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[19]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[20]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[21]] <-
update.func <- function(para,new.value) {
para$gamma.rtx.ls[1:4] <- new.value
para
}
up.funcs[[22]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[23]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[24]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[25]] <-
update.func <- function(para,new.value) {
para$gamma.tx.rtx[1:4] <- new.value
para
}
up.funcs[[26]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[27]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[28]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[29]] <-
update.func <- function(para,new.value) {
para$rho.lf[1] <- new.value
para$rho.lf[c(2,4)] <- new.value*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$rho.lf[3]
para
}
up.funcs[[30]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[31]] <-
update.func <- function(para,new.value) {
para$rho.lf[3] <- new.value
para$rho.lf[c(2,4)] <- para$rho.lf[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$rho.lf[3]
para
}
up.funcs[[32]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[33]] <-
update.func <- function(para,new.value) {
para$rho.ls[1] <- new.value
para$rho.ls[c(2,4)] <- new.value*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$rho.ls[3]
para
}
up.funcs[[34]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[35]] <-
update.func <- function(para,new.value) {
para$rho.ls[3] <- new.value
para$rho.ls[c(2,4)] <- para$rho.ls[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$rho.ls[3]
para
}
up.funcs[[36]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[37]] <-
update.func <- function(para,new.value) {
para$rho.rel[1:4] <- new.value
para
}
up.funcs[[38]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[39]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[40]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[41]] <-
update.func <- function(para,new.value) {
para$rho.ps[1] <- new.value
para$rho.ps[c(2,4)] <- para$rho.ps[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$rho.ps[3]
para
}
up.funcs[[42]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[43]] <-
update.func <- function(para,new.value) {
para$rho.ps[3] <- new.value
para$rho.ps[c(2,4)] <- para$rho.ps[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$rho.ps[3]
para
}
up.funcs[[44]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[45]] <-
update.func <- function(para,new.value) {
para$pi.sp[1] <- new.value
para$pi.sp[c(2,4)] <- para$pi.sp[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$pi.sp[3]
para
}
up.funcs[[46]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[47]] <-
update.func <- function(para,new.value) {
para$pi.sp[3] <- new.value
para$pi.sp[c(2,4)] <- para$pi.sp[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$pi.sp[3]
para
}
up.funcs[[48]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[49]] <-
update.func <- function(para,new.value) {
para$pi.ep[1] <- new.value
para$pi.ep[c(2,4)] <- para$pi.ep[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$pi.ep[3]
para
}
up.funcs[[50]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[51]] <-
update.func <- function(para,new.value) {
para$pi.ep[3] <- new.value
para$pi.ep[c(2,4)] <- para$pi.ep[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$pi.ep[3]
para
}
up.funcs[[52]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[53]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[54]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[55]] <-
update.func <- function(para,new.value) {
para$mu.sp[3] <- new.value
para$mu.sp[c(2,4)] <- para$mu.sp[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$mu.sp[3]
para
}
up.funcs[[56]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[57]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[58]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[59]] <-
update.func <- function(para,new.value) {
para$mu.sn[3] <- new.value
para$mu.sn[c(2,4)] <- para$mu.sn[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$mu.sn[3]
para
}
up.funcs[[60]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[61]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[62]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[63]] <-
update.func <- function(para,new.value) {
para$mu.ep[3] <- new.value
para$mu.ep[c(2,4)] <- para$mu.ep[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$mu.ep[3]
para
}
up.funcs[[64]] <-
update.func <- function(para,new.value) {
para
}
## zeta.sps
up.funcs[[65]] <-
update.func <- function(para,new.value) {
para$zeta.sp[1] <- new.value
para$zeta.sp[c(2,4)] <- para$zeta.sp[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$zeta.sp[3]
para$mu.sp[1] <- 1/3 - new.value
para$mu.sp[c(2,4)] <- para$mu.sp[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$mu.sp[3]
para
}
up.funcs[[66]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[67]] <-
update.func <- function(para,new.value) {
para$zeta.sp[3] <- new.value
para$zeta.sp[c(2,4)] <- para$zeta.sp[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$zeta.sp[3]
para
}
up.funcs[[68]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[69]] <-
update.func <- function(para,new.value) {
para$zeta.sn[1] <- new.value
para$zeta.sn[c(2,4)] <- para$zeta.sn[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$zeta.sn[3]
para$mu.sn[1] <- 1/3 - new.value
para$mu.sn[c(2,4)] <- para$mu.sn[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$mu.sn[3]
para
}
up.funcs[[70]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[71]] <-
update.func <- function(para,new.value) {
para$zeta.sn[3] <- new.value
para$zeta.sn[c(2,4)] <- para$zeta.sn[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$zeta.sn[3]
para
}
up.funcs[[72]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[73]] <-
update.func <- function(para,new.value) {
para$zeta.ep[1] <- new.value
para$zeta.ep[c(2,4)] <- para$zeta.ep[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$zeta.ep[3]
para$mu.ep[1] <- 1/3 - new.value
para$mu.ep[c(2,4)] <- para$mu.ep[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$mu.ep[3]
para
}
up.funcs[[74]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[75]] <-
update.func <- function(para,new.value) {
para$zeta.ep[3] <- new.value
para$zeta.ep[c(2,4)] <- para$zeta.ep[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$zeta.ep[3]
para
}
up.funcs[[76]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[77]] <-
update.func <- function(para,new.value) {
para$theta.sp[1:4] <- new.value
para
}
up.funcs[[78]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[79]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[80]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[81]] <-
update.func <- function(para,new.value) {
para$theta.sn[1:4] <- new.value
para
}
up.funcs[[82]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[83]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[84]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[85]] <-
update.func <- function(para,new.value) {
para$theta.ep[1:4] <- new.value
para
}
up.funcs[[86]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[87]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[88]] <-
update.func <- function(para,new.value) {
para
}
for (i in 89:(89+(4*6)-1)){
up.funcs[[i]] <-
update.func <- function(para,new.value) {
para
}
}
up.funcs[[113]] <-
update.func <- function(para,new.value) {
para$foi.hiv[1] <- new.value
para
}
for (i in c(114:116,117,119,120,121,122,124,129:((129+4*4)-1),146:148)){
up.funcs[[i]] <-
update.func <- function(para,new.value) {
para
}
}
up.funcs[[118]] <-
update.func <- function(para,new.value) {
para$chi.elg[2] <- new.value
para
}
up.funcs[[123]] <-
update.func <- function(para,new.value) {
para$chi.tx[3] <- new.value
para
}
up.funcs[[125]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[126]] <-
update.func <- function(para,new.value) {
para$mu.hiv[2] <- new.value
para
}
up.funcs[[127]] <-
update.func <- function(para,new.value) {
para$mu.hiv[3] <- new.value
para
}
up.funcs[[128]] <-
update.func <- function(para,new.value) {
para$mu.hiv[4] <- new.value
para
}
up.funcs[[145]] <-
update.func <- function(para,new.value) {
para$`ART mulitplier`[1:4] <- new.value
para
}
up.funcs
}
##' helper function to generate array of parameters for sensitivty analyses
##' @param fits
##' @param country
##' @param p
##' @param seq.lengths
##' @param true.param.index
##' @return
genParamSeqs <- function(fits,country,
p=max.pct.change,
seq.lengths=num.points,
true.param.index=true.param.index){
param.seq.array <- array(dim=c(seq.lengths,length(true.param.index)))
for (i in seq_along(true.param.index)){
orig.value <- c(t(do.call(rbind,fits[[country]]$params)))[true.param.index[i]]
if (i %in% c(16:19,38)){ ## 38 is the ART multiplier
param.seq.array[,i] <- seq(orig.value*p,min(orig.value*(1+p),1),length=seq.lengths)
} else {
param.seq.array[,i] <- seq(orig.value*p,orig.value*(1+p),length=seq.lengths)
}
}
param.seq.array
}
##' For running on-way sensitivity analyses
##' @param country
##' @param fits
##' @param max.pct.change
##' @param num.points
##' @param cost.per.case
##' @param analytic.horizon
##' @param min.tx.costs
##' @param max.tx.costs
##' @param min.mdr.tx.costs
##' @param max.mdr.tx.costs
##' @return
##' @author Andrew Azman
runOneWaySens <- function(country,
fits,
max.pct.change,
num.points=5,
cost.per.case=2000,
analytic.horizon = 5,
min.tx.costs,
max.tx.costs,
min.mdr.tx.costs,
max.mdr.tx.costs
){
up.funcs <- makeUpFuncs()
true.params <-1 - sapply(up.funcs,function(x) all.equal(c(do.call(rbind,x(fits[[country]]$params,-10))),c(do.call(rbind,fits[[country]]$params))) == TRUE)
true.param.index <- which(true.params == 1)
original.values <- c(t(do.call(rbind,fits[[country]]$params)))[true.param.index]
seq.lengths <- num.points
fits.orig <- fits
out <- array(dim=c(seq.lengths,length(true.param.index)+2))
## 1. Let's first explore how the ICER for fixed cost per case detected in a single country varies by parameter
param.array <- genParamSeqs(fits.orig,country,
p=max.pct.change,
seq.lengths = num.points,
true.param.index=true.param.index)
## get number of cases that will be detected
pct.increase.in.yr1 <- 0.25
cases.detected <- getIncreasedCasesDetected(TRUE,pct.increase.in.yr1)
## define ranges for parameters
for (j in 1:ncol(param.array)){
param.seq <- param.array[,j]
for (i in seq_along(param.seq)){
## update param and any additional dependent params (e.g. HIV states)
new.params <- up.funcs[[true.param.index[j]]](fits.orig[[country]]$params,param.seq[i])
fits[[country]]$params <- new.params
## run 2 year ACF
## not we are not useing pct.incidence here as it is overridden by case.dt.fid
run <- runNYearACF(country,pct.incidence = 0.15,case.dt.dif=cases.detected,int.dur = 2,total.dur = 10,fits=fits)
## Calculate and store ICER
out[i,j] <- calcICERFixedCosts(out=run,
eval.times = 1:(10*analytic.horizon + 1),
dtx.cost=cases.detected[country]*cost.per.case,
tx.suc=c(1),
tx.cost = tx.cost.pc[country],
tx.cost.partial = tx.cost.partial.pc[country],
tx.cost.mdr = tx.cost.mdr.pc[country],
pct.mdr= pct.mdr.pc[country],
tx.cost.partial.mdr = tx.cost.partial.mdr[country],
params=fits[[country]]$params)[2]
}
## next paramater value
}
## now for costs
tx.costs <- seq(min.tx.costs,max.tx.costs,length=seq.lengths)
mdr.tx.costs <- seq(min.mdr.tx.costs,max.mdr.tx.costs,length=seq.lengths)
for (i in 1:seq.lengths){
run <- runNYearACF(country,pct.incidence = 0.15,case.dt.dif=cases.detected,int.dur = 2,total.dur = 10,fits=fits.orig)
## Calculate and store ICER
out[i,j+1] <- calcICERFixedCosts(out=run,
eval.times = 1:(10*analytic.horizon + 1),
dtx.cost=cases.detected[country]*cost.per.case,
tx.suc=c(1),
tx.cost = tx.costs[i],
tx.cost.partial = tx.cost.partial.pc[country],
tx.cost.mdr = tx.cost.mdr.pc[country],
pct.mdr= pct.mdr.pc[country],
tx.cost.partial.mdr = tx.cost.partial.mdr[country],
params=fits[[country]]$params)[2]
out[i,j+2] <- calcICERFixedCosts(out=run,
eval.times = 1:(10*analytic.horizon + 1),
dtx.cost=cases.detected[country]*cost.per.case,
tx.suc=c(1),
tx.cost = tx.cost.pc[country],
tx.cost.partial = tx.cost.partial.pc[country],
tx.cost.mdr = mdr.tx.costs[i],
pct.mdr= pct.mdr.pc[country],
tx.cost.partial.mdr = tx.cost.partial.mdr[country],
params=fits[[country]]$params)[2]
}
param.array <- cbind(param.array,tx.costs,mdr.tx.costs)
list(out,param.array)
}
##' @param sens.mat
##' @param param.array
##' @param country string of country
##' @param fits.orig
##' @param analytic.horizon
##' @param cost.per.case
##' @param lwd
##' @param top.n.params
##' @return pdf of tornado plot
##' @author Andrew Azman
makeTornadoPlot <- function(sens.mat,
param.array,
country,
fits.orig,
analytic.horizon,
cost.per.case,
lwd=10,
top.n.params=10){
param.index.names <- rep(names(fits.orig[[country]]$params),each=4)
param.names <- as.matrix(read.csv("Data/param_names.csv",as.is=T,header=F))
param.names <- paste0(rep(param.names,each=4)," [",0:3,"]")
up.funcs <- makeUpFuncs() # get functions that help update parameters
true.params <-1 - sapply(up.funcs,function(x) all.equal(c(do.call(rbind,x(fits[[country]]$params,-10))),c(do.call(rbind,fits[[country]]$params))) == TRUE)
true.param.index <- which(true.params == 1)
original.values <- c(c(t(do.call(rbind,fits[[country]]$params)))[true.param.index],tx.cost.pc[country],tx.cost.mdr.pc[country])
pdf(sprintf("Figures/oneway_sens_%s_%.fyr_%.fusd.pdf",country,analytic.horizon,cost.per.case),width=5,height=4)
out <- sens.mat
run <- runNYearACF(country,pct.incidence = 0.5,
case.dt.dif=case.dt.dif,int.dur = 2,total.dur = 10,fits=fits.orig)
icer.orig <- calcICERFixedCosts(out=run,
eval.times = 1:(10*analytic.horizon + 1),
dtx.cost=case.dt.dif[country]*cost.per.case,
tx.suc=c(1),
tx.cost = tx.cost.pc[country],
tx.cost.partial = tx.cost.partial.pc[country],
tx.cost.mdr = tx.cost.mdr.pc[country],
pct.mdr= pct.mdr.pc[country],
tx.cost.partial.mdr = tx.cost.partial.mdr[country],
params=fits.orig[[country]]$params)[2]
cat(print(icer.orig))
layout(matrix(c(1,1,1,2,2,2,2,2,1,1,1,2,2,2,2,2),nrow=2,byrow=T))
par(mar=c(4.5,1,0,0))
xlims <- c(min(out),max(out))
plot(-100,-100,xlim=xlims,ylim=c(0,1),bty="n",yaxt="n",ylab="",xlab="Cost per DALY Averted (USD)")#ncol(param.array)))
abline(v=icer.orig,col="grey",lty=2)
## sort by extremes
param.order <- order(apply(out,2,function(x) range(x)[2] - range(x)[1]))
sorted.out <- out[,param.order]
y.increment <- 1/min(ncol(out),top.n.params)
start.iter <- ifelse(top.n.params > ncol(out),1,ncol(out) - top.n.params) # do we start the below iterations from the lowest params?
for (param in start.iter:ncol(out)){
tmp.out <- sorted.out[,param]
greater.than.orig <- param.array[,param] > original.values[param]
extremes <- range(tmp.out)
print(range(tmp.out))
max.col <- ifelse(greater.than.orig[which.max(tmp.out)],"red","blue")
min.col <- ifelse(max.col == "red","blue","red")
lines(x=c(extremes[1],icer.orig),y=c((param-start.iter)*y.increment,(param-start.iter)*y.increment),lwd=lwd,lend="butt",col=min.col)
lines(x=c(icer.orig,extremes[2]),y=c((param-start.iter)*y.increment,(param-start.iter)*y.increment),lwd=lwd,lend="butt",col=max.col)
}
text(par("usr")[2]-par("usr")[2]*.1,.1,"High Value",col="red",cex=1)
text(par("usr")[2]-par("usr")[2]*.1,.14,"Low Value",col="blue",cex=1)
## plot ranges for each
## plot(-100,-100,axes=F,bty="n",xlim=c(-1,1),ylim=c(0,1),xlab="",ylab="")
ranges <- apply(param.array,2,range)
ranges <- apply(ranges,2,function(x) sprintf("(%.2f,%.2f)",x[1],x[2]))
## for (param in 1:ncol(out)) text(.5,(param-start.iter)*y.increment,ranges[param.order[param]],cex=1.1)
## plot names of each
par(mar=c(4.5,0,0,0))
plot(-100,-100,axes=F,bty="n",xlim=c(-1,1),ylim=c(0,1),xlab="",ylab="")
for (param in 1:ncol(out)) text(1,(param-start.iter)*y.increment,
sprintf("%s %s",param.names[true.param.index[param.order[param]]],ranges[param.order[param]]),cex=.9,pos=4,offset=-22)
dev.off()
}
##' @param nsims
##' @param country
##' @param param_range_file
##' @param output_file
##' @return saves (1) list of run outputs and (2) list of parameters lists
##' @author Andrew Azman
runLHS <- function(nsims=10,
country="sa",
param_range_prefix="uncer_ranges_",
output_file_prefix="uncer_out",
case.dt.dif=case.dt.dif,
orig.fits=fits,
per.person.dx.cost=seq(1000,35000,length=300)
){
require(tgp)
## load in transformation functiosn that deal with dependent params
up.funcs <- makeUpFuncs()
params.minmax <- as.matrix(read.csv(paste0("Data/",param_range_prefix,country,".csv"),row.names=1),ncol=4)
true.params <-1 -sapply(up.funcs,function(x) all.equal(
unlist(x(orig.fits[[country]]$params,-10)),
unlist(orig.fits[[country]]$params)) == TRUE)
true.param.index <- which(true.params == 1)
param.names <- paste0(rep(names(orig.fits[[country]]$params),each=4),
rep(c("_n","_h","_hArt","_hNoART"),
length(orig.fits[[country]]$params)))
## make the lhs draws
lhs.draws <- lhs(n=nsims,
params.minmax[,2:3],
shape=rep(3,nrow(params.minmax)),
mode=params.minmax[,1])
runs <- list("vector",nsims)
new.params <- list("vector",nsims)
## Run a two year ACF and store the results only if
## I don't think we are doing the following anymore but left the comment in:
## incidence in baseline scenario at year 10 is orig.I <= I_10 <= orig.I*.5
for (i in 1:nrow(lhs.draws)){
if (i %% 100 == 0) cat(".")
## make the parameter list
new.params[[i]] <- updateParams(new.values=lhs.draws[i,],
param.indices=true.param.index,
countr=country,
fits=orig.fits)
tmp.fits <- orig.fits
(tmp.fits[[country]]$params <- new.params[[i]])
runs[[i]] <- runNYearACF(country,
pct.incidence=.15,
case.dt.dif=case.dt.dif,
int.dur = 2,
total.dur = 10,
fits=tmp.fits)
}
## going to store as a list of runs
unix.time.stamp <- sprintf("%.0f",as.numeric(Sys.time()))
save(runs,file=paste0(output_file_prefix,"_",country,"_runs_",unix.time.stamp,".rda"))
save(new.params,file=paste0(output_file_prefix,"_",country,"_params_",unix.time.stamp,".rda"))
save(lhs.draws,file=paste0(output_file_prefix,"_",country,"_lhsdraws_",unix.time.stamp,".rda")) #this is a matrix of the LHS samples and includes the cost
## save(runs,file=paste0(output_file_prefix,"_",country,"_runs_",Sys.Date(),".rda"))
## save(new.params,file=paste0(output_file_prefix,"_",country,"_params_",Sys.Date(),".rda"))
## save(lhs.draws,file=paste0(output_file_prefix,"_",country,"_lhsdraws_",Sys.Date(),".rda")) #this is a matrix of the LHS samples and includes the cost
horizons <- c(2,5,10)
out <- array(dim=c(300,3,nsims))
print(" \n post-processing \n")
for (i in 1:nsims){
cat("*")
for (h in seq_along(horizons)){
for (t in seq_along(per.person.dx.cost)){
out[t,h,i] <-
calcICERFixedCosts(out=runs[[i]],
eval.times = 1:(horizons[h]*10+1),
dtx.cost=case.dt.df[country]*per.person.dx.cost[t],
tx.suc=c(1),
tx.cost = tx.cost.pc[country],
tx.cost.partial = tx.cost.partial.pc[country],
tx.cost.mdr = tx.cost.mdr.pc[country],
pct.mdr= pct.mdr.pc[country],
tx.cost.partial.mdr = tx.cost.partial.mdr[country],
params=new.params[[i]])[2]
}
}
}
save(out,file=paste0(output_file_prefix,"_",country,"_icers_",unix.time.stamp,".rda"))
}
##' Updates the parameter list for us with a set of new values from LHS
##' @param param.indices
##' @param new.values
##' @param country
##' @param fits
##' @return list of params suitable for model runs
##' @author Andrew Azman
updateParams <- function(new.values,param.indices,country,fits){
up.funcs <- makeUpFuncs() # get functions that help update parameters
param.tmp <- fits[[country]]$params
## for each parameter we will sequentiually update the parameter list
## ineffecient but a function of previous code I wrote.
for (i in seq_along(param.indices)){
param.tmp <- up.funcs[[param.indices[i]]](param.tmp,new.values[i])
}
param.tmp
}
##' gets the number of of cases that need to be detected for a number of cases equal to pct.first.yr% of either the
##' projected cases detected in the first year (case.det.based == TRUE), or incidence (case.det.base == FALSE).
##' @param case.det.based
##' @param pct.first.yr
##' @return named vector with number of cases for each country
##' @author Andrew Azman
getIncreasedCasesDetected <- function(case.det.based=TRUE,pct.first.yr=0.25){
if (case.det.based){
## let's try increasing the number of cases detected by x% of the modeled steady state / first year
sa.trial <- runTBHIVMod(fit.sa.2011$params,fit.sa.2011$state,1,var.beta=F)
india.trial <- runTBHIVMod(fit.india.2011$params,fit.india.2011$state,1,var.beta=F)
china.trial <- runTBHIVMod(fit.china.2011$params,fit.china.2011$state,1,var.beta=F)
case.dt.dif <- c("china"=round(sum(tail(china.trial[,grep("N.", colnames(india.trial))],1))*pct.first.yr,0),
"india"=round(sum(tail(india.trial[,grep("N.", colnames(india.trial))],1))*pct.first.yr,0),
"sa"=round(sum(tail(sa.trial[,grep("N.", colnames(india.trial))],1))*pct.first.yr,0))
} else {
## incidence based
case.dt.dif <- c("china"=round(getWHOStats("China",2011)[,"e_inc_100k"]*pct.first.yr,0),
"india"=round(getWHOStats("India",2011)[,"e_inc_100k"]*pct.first.yr,0),
"sa"=round(getWHOStats("South Africa",2011)[,"e_inc_100k"]*pct.first.yr,0))
}
return(case.dt.dif)
}
| /Code/ACF-base.R | no_license | scottyaz/CostOfActiveCaseFinding | R | false | false | 91,397 | r | ## These are some of the core functions used in the analyses
## Some initial setup
library(RColorBrewer)
palette(brewer.pal(8,"Dark2"))
library("rootSolve")
library("deSolve")
library(xtable)
library(fields)
############################
## The model ###
###########################
##' Single age class model for adult TB
##' This model has an explicit Tx compartment and a presymptomatic compartment
##' @param t
##' @param y
##' @param parms
##' @return
##' @author Andrew Azman
dxdt.TBHIV3 <- function(t,y,parms){
with(as.list(c(parms,y)),{
ac <- 1
hivc <- 4
tbc <- 9
inds <- seq(1,tbc*hivc*ac+1,by=hivc*ac) #indices for state arrays below
S <- array(y[1:(inds[2]-1)],dim=c(ac,4))
Lf <- array(y[inds[2]:(inds[3]-1)],dim=c(ac,4))
Ls <- array(y[inds[3]:(inds[4]-1)],dim=c(ac,4))
Ps <- array(y[inds[4]:(inds[5]-1)],dim=c(ac,4))
Asp <- array(y[inds[5]:(inds[6]-1)],dim=c(ac,4))
Asn <- array(y[inds[6]:(inds[7]-1)],dim=c(ac,4))
Aep <- array(y[inds[7]:(inds[8]-1)],dim=c(ac,4))
Tx <- array(y[inds[8]:(inds[9]-1)],dim=c(ac,4))
Rtx <- array(y[inds[9]:(inds[10]-1)],dim=c(ac,4))
N <- sum(S + Lf + Ls + Ps + Asp + Asn + Aep + Tx + Rtx)
## may want to add a real hiv force of infection here later
foi <- as.numeric(Asp %*% c(beta.sp/N*rep(1,4)) +
Asn %*% c((beta.sp/N)*phi.sn) +
Ps %*% c((beta.sp/N)*phi.ps))
theta.sp.c <- theta.sp + theta.spI
theta.sn.c <- theta.sn + theta.snI
theta.ep.c <- theta.ep + theta.epI
dS <- dLf <- dLs <- dPs <- dAsp <- dAsn <- dAep <- dTx <- dRtx <- array(0,dim=c(ac,hivc))
##hiv uninfected susceptibles
dS <- S*(nu - foi - foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
c(0,(S*foi.hiv)[-hivc]) +
c(0,(S*chi.elg)[-hivc]) +
c(0,(S*chi.tx)[-hivc])
## keeping population size constant
dS[1,1] <- dS[1,1] +
Asp %*% mu.sp + Asn %*% mu.sn + Aep %*% mu.ep + ## TB Deaths
(S + Lf + Ls + Ps + Asp + Asn + Aep + Tx + Rtx) %*% delta + ## Old Age
(S + Lf + Ls + Ps + Asp + Asn + Aep + Tx + Rtx) %*% mu.hiv ## HIV Deaths
## Latent fast
dLf <-Lf*(nu - gamma.lf.ls - rho.lf -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
foi*(Ls*phi.l + Rtx*phi.l + S) +
c(0,(Lf*foi.hiv)[-hivc]) +
c(0,(Lf*chi.elg)[-hivc]) +
c(0,(Lf*chi.tx)[-hivc])
## Latent slow (remote infection)
dLs <- Ls*(nu - foi*phi.l - rho.ls -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
Lf * gamma.lf.ls +
Rtx*gamma.rtx.ls +
c(0,(Ls*foi.hiv)[-hivc]) +
c(0,(Ls*chi.elg)[-hivc]) +
c(0,(Ls*chi.tx)[-hivc])
## Pre-symptomatic period
dPs <- Ps*(nu - rho.ps - zeta.sn -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
Lf*rho.lf + Ls*rho.ls +
c(0,(Ps*foi.hiv)[-hivc]) +
c(0,(Ps*chi.elg)[-hivc]) +
c(0,(Ps*chi.tx)[-hivc])
## Smear Positive
dAsp <- Asp*(nu - mu.sp - theta.sp.c - zeta.sp -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
(Ps*rho.ps + Rtx*rho.rel)*pi.sp*(1-pi.ep) +
c(0,(Asp*foi.hiv)[-hivc]) +
c(0,(Asp*chi.elg)[-hivc]) +
c(0,(Asp*chi.tx)[-hivc])
dAsn <- Asn*(nu - mu.sn - theta.sn.c - zeta.sn -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
(Ps*rho.ps + Rtx*rho.rel)*(1-pi.sp)*(1-pi.ep) +
c(0,(Asn*foi.hiv)[-hivc]) +
c(0,(Asn*chi.elg)[-hivc]) +
c(0,(Asn*chi.tx)[-hivc])
dAep <- Aep*(nu - mu.ep - theta.ep.c - zeta.ep -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
(Ps*rho.ps + Rtx*rho.rel)*pi.ep+
c(0,(Aep*foi.hiv)[-hivc]) +
c(0,(Aep*chi.elg)[-hivc]) +
c(0,(Aep*chi.tx)[-hivc])
dTx <- Tx*(nu - gamma.tx.rtx -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
Asp*theta.sp.c +
Asn*theta.sn.c +
Aep*theta.ep.c +
c(0,(Tx*foi.hiv)[-hivc]) +
c(0,(Tx*chi.elg)[-hivc]) +
c(0,(Tx*chi.tx)[-hivc])
dRtx <- Rtx*(nu - gamma.rtx.ls - rho.rel - foi*phi.l -
foi.hiv - mu.hiv - chi.elg - chi.tx - delta) +
Asp*zeta.sp + (Asn + Ps)*zeta.sn + Aep*zeta.ep +
Tx*(gamma.tx.rtx) +
c(0,(Rtx*foi.hiv)[-hivc]) +
c(0,(Rtx*chi.elg)[-hivc]) +
c(0,(Rtx*chi.tx)[-hivc])
list(c(dS,dLf,dLs,dPs,dAsp,dAsn,dAep,dTx,dRtx))
})}
##' take dxdt.TBHIV3 odes and appends some summary statistics to each time step
##' @param t
##' @param state
##' @param params
##' @return vector of state changes
##' @author Andrew Azman
dxdt.TBHIV.CI <- function(t,state,params){
## a little pre-processing
ac <- 1 ## number of age classes
hivc <- 4
tbc <- 9
inds <- seq(1,tbc*hivc*ac+1,by=hivc*ac) #indices for state arrays
S <- array(state[1:(inds[2]-1)],dim=c(ac,4))
Lf <- array(state[inds[2]:(inds[3]-1)],dim=c(ac,4))
Ls <- array(state[inds[3]:(inds[4]-1)],dim=c(ac,4))
Ps <- array(state[inds[4]:(inds[5]-1)],dim=c(ac,4))
Asp <- array(state[inds[5]:(inds[6]-1)],dim=c(ac,4))
Asn <- array(state[inds[6]:(inds[7]-1)],dim=c(ac,4))
Aep <- array(state[inds[7]:(inds[8]-1)],dim=c(ac,4))
Tx <- array(state[inds[8]:(inds[9]-1)],dim=c(ac,4))
Rtx <- array(state[inds[9]:(inds[10]-1)],dim=c(ac,4))
with(as.list(c(state,params)),{
## rho.lf <- array(rho.lf,dim=c(ac,length(rho.lf)/2))
## rho.ls <- array(rho.ls,dim=c(ac,length(rho.ls)/2))
## rho.rel <- array(rho.rel,dim=c(ac,length(rho.rel)/2))
dCI <- c((Lf * rho.lf) + (Ls * rho.ls) + (Rtx * rho.rel)) #1x4 number of new cases of each type
#dCI <- c((Ps * rho.ps) + (Rtx * rho.rel)) #1x4 number of new cases of each type
dCIall <- sum(dCI) #1x1 sum of all incident tb types
## tb deaths in each age class and hiv status 1x8
dMtb <- c((Asp * mu.sp) + (Asn * mu.sn) + (Aep * mu.sn)) # number of new TB deaths
## cases detected of each type (this is what we will fit to)
dN.Asp <- c(Asp * (theta.sp + theta.spI)) #1x4
dN.Asn <- c(Asn * (theta.sn + theta.snI)) #1x4
dN.Aep <- c(Aep * (theta.ep + theta.epI)) #1x4
dReTx <- c(Rtx * rho.rel) #1x4
c(dCI,dCIall,dMtb,dN.Asp,dN.Asn,dN.Aep,dReTx)
}) -> dInds
##run TB model
TBout <- dxdt.TBHIV3(t,state,params)
##Return the results
rc <- list(c(TBout[[1]],dInds))
return(rc)
}
##' take dxdt.TBHIV3 odes and appends some summary statistics to each time step
##' and allows beta to vary by a fixed amount per year
##' @param t
##' @param state
##' @param params
##' @return vector of state changes
##' @author Andrew Azman
dxdt.TBHIV.CI.var.beta <- function(t,state,params){
## a little pre-processing
ac <- 1 ## number of age classes
hivc <- 4
tbc <- 9
inds <- seq(1,tbc*hivc*ac+1,by=hivc*ac) #indices for state arrays
S <- array(state[1:(inds[2]-1)],dim=c(ac,4))
Lf <- array(state[inds[2]:(inds[3]-1)],dim=c(ac,4))
Ls <- array(state[inds[3]:(inds[4]-1)],dim=c(ac,4))
Ps <- array(state[inds[4]:(inds[5]-1)],dim=c(ac,4))
Asp <- array(state[inds[5]:(inds[6]-1)],dim=c(ac,4))
Asn <- array(state[inds[6]:(inds[7]-1)],dim=c(ac,4))
Aep <- array(state[inds[7]:(inds[8]-1)],dim=c(ac,4))
Tx <- array(state[inds[8]:(inds[9]-1)],dim=c(ac,4))
Rtx <- array(state[inds[9]:(inds[10]-1)],dim=c(ac,4))
params$beta.sp <- params$beta.sp*exp(params$beta.delta*t)
#cat(sprintf("beta.sp = %.2f, and beta.delta = %.3f \n",params$beta.sp[1],params$beta.delta[1]))
with(as.list(c(state,params)),{
## rho.lf <- array(rho.lf,dim=c(ac,length(rho.lf)/2))
## rho.ls <- array(rho.ls,dim=c(ac,length(rho.ls)/2))
## rho.rel <- array(rho.rel,dim=c(ac,length(rho.rel)/2))
dCI <- c((Lf * rho.lf) + (Ls * rho.ls) + (Rtx * rho.rel)) #1x4
#dCI <- c((Ps * rho.ps) + (Rtx * rho.rel)) #1x4
dCIall <- sum(dCI) #1x1
## tb deaths in each age class and hiv status 1x8
dMtb <- c((Asp * mu.sp) + (Asn * mu.sn) + (Aep * mu.sn))
## cases detected of each type in formal sector (this is what we will fit to)
dN.Asp <- c(Asp * (theta.sp + theta.spI)) #1x4
dN.Asn <- c(Asn * (theta.sn + theta.snI)) #1x4
dN.Aep <- c(Aep * (theta.ep + theta.epI)) #1x4
dReTx <- c(Rtx * rho.rel) #1x4
c(dCI,dCIall,dMtb,dN.Asp,dN.Asn,dN.Aep,dReTx)
}) -> dInds
##run TB model
TBout <- dxdt.TBHIV3(t,state,params)
##Return the results
rc <- list(c(TBout[[1]],dInds))
return(rc)
}
######################
## Helper functions ##
######################
##' Adds column names to output from ode
##' @param mod
##' @param time
##' @param ext
##' @param ac
##' @return
##' @author Andrew Azman
addColNames <- function(mod,time=T,ext=F,ac=1){
ts <- c()
if (time) ts <- "time"
tmp <- c(ts,paste0("S",1:ac),
paste0("hS",1:ac),
paste0("aS",1:ac),
paste0("nS",1:ac),
paste0("Lf",1:ac),
paste0("hLf",1:ac),
paste0("aLf",1:ac),
paste0("nLf",1:ac),
paste0("Ls",1:ac),
paste0("hLs",1:ac),
paste0("aLs",1:ac),
paste0("nLs",1:ac),
paste0("Ps",1:ac),
paste0("hPs",1:ac),
paste0("aPs",1:ac),
paste0("nPs",1:ac),
paste0("Asp",1:ac),
paste0("hAsp",1:ac),
paste0("aAsp",1:ac),
paste0("nAsp",1:ac),
paste0("Asn",1:ac),
paste0("hAsn",1:ac),
paste0("aAsn",1:ac),
paste0("nAsn",1:ac),
paste0("Aep",1:ac),
paste0("hAep",1:ac),
paste0("aAep",1:ac),
paste0("nAep",1:ac),
paste0("Tx",1:ac),
paste0("hTx",1:ac),
paste0("aTx",1:ac),
paste0("nTx",1:ac),
paste0("Rtx",1:ac),
paste0("hRtx",1:ac),
paste0("aRtx",1:ac),
paste0("nRtx",1:ac))
if (ext) {
tmp <- c(tmp,paste0("CI",1:ac),paste0("hCI",1:ac),paste0("aCI",1:ac),
paste0("nCI",1:ac),"CIall",paste0("Mtb",1:ac),
paste0("hMtb",1:ac),paste0("aMtb",1:ac),paste0("nMtb",1:ac),
paste0("N.Asp",1:ac),paste0("hN.Asp",1:ac),paste0("aN.Asp",1:ac),
paste0("nN.Asp",1:ac),paste0("N.Asn",1:ac),paste0("hN.Asn",1:ac),
paste0("aN.Asn",1:ac),paste0("nN.Asn",1:ac),
paste0("N.Aep",1:ac),paste0("hN.Aep",1:ac),paste0("aN.Aep",1:ac),paste0("nN.Aep",1:ac),
paste0("ReTx",1:ac),paste0("hReTx",1:ac),paste0("aReTx",1:ac),paste0("nReTx",1:ac))
}
if (!is.null(nrow(mod))){
colnames(mod) <- tmp
} else {
names(mod) <- tmp
}
return(mod)
}
##' takes parameters from csv file
##' @param country name of country whose parameters we want (assumes in common form)
##' @param cols column numbers for data
##' @return list with each entry being a vector for that parameter
##' @author Andrew Azman
make.params <- function(country,cols=2:5){
filename <- sprintf("Data/%s_params.csv",country)
tmp <- read.csv(filename)
params.block <- tmp[cols]
rownames(params.block) <- tmp[,1]
params.list <- do.call("list",as.data.frame(t(params.block)))
return(params.list)
}
## runs TBHIV.CI model
##' @param params
##' @param initial.state
##' @param max.time
##' @param var.beta
##' @return output of lsoda or other ode solver
runTBHIVMod <- function(params,
initial.state,
max.time=1,
var.beta = FALSE
){
library(deSolve)
times <- seq(0,max.time,by=0.1)
##print(params)
if (var.beta){
mod.out <- ode(initial.state,times,dxdt.TBHIV.CI.var.beta,params)
} else {
mod.out <- ode(initial.state,times,dxdt.TBHIV.CI,params)
}
return(mod.out)
}
##' takes a matrix with column names of model
##' and outputs just the columns needed for prevalence
##' @param run.mat
##' @param hiv.only
##' @return matrix of only columns of prev cases
##' @author Andrew Azman
getPrevCols <- function(run.mat,hiv.only=F){
## in case it is a vector
if (is.null(nrow(run.mat))) run.mat <- t(as.matrix(run.mat))
if (hiv.only){
run.mat[,grep("(n|a|h)(A(sp|sn|ep)|Tx|Ps)1$",colnames(run.mat))]
} else {
run.mat[,grep("(n|a|h|^)(A(sp|sn|ep)|Tx|Ps)1$",colnames(run.mat))]
}}
##' Objective function for fitting Incidence and CDR
##' @param params.fit
##' @param params
##' @param state
##' @param target.ci
##' @param target.cdr
##' @param target.prev.tb
##' @param plot.it
##' @param beta.or.theta - if we want to only fit one param ("beta" if we only want to fit beta, "theta" if we want to fit theta only)
##' @param weight.ci
##' @param weight.other
##' @return
##' @author Andrew Azman
incObFunc <- function(params.fit,
params,
state,
target.ci,
target.cdr,
target.prev.tb,
plot.it=FALSE,
beta.or.theta="",
weight.ci = 3,
weight.other=1
){
if (length(params.fit) == 1 && !missing(beta.or.theta)){
if (beta.or.theta == "beta") params$beta.sp <- rep(params.fit,4)
else if (beta.or.theta == "theta") params$theta.sp <- rep(params.fit,4)
else stop("beta.or.theta is mispecficified")
} else {
params$beta.sp <- rep(params.fit[1],4)
params$theta.sp <- rep(params.fit[2],4)
}
## cat(sprintf("fit.pars (post optim) = %f, %f \n",exp(params.fit)[1],exp(params.fit)[2]))
## assuming that the case detection rate of ep is same as sp
## and that sn is 0.75* sp
ep.sn.mult <- 1
params$theta.ep <- params$theta.sp*ep.sn.mult
params$theta.sn <- params$theta.sp*ep.sn.mult
tryCatch(
RS <- runsteady(y=state[1:36],
fun=dxdt.TBHIV3,
parms=params,
verbose=F)
,
error = function(e){
ss.vals <- state
cat(sprintf(e$message))
}
)
if (attr(RS,"steady")){
ss.vals <- c(RS$y,state[37:length(state)])
} else {
print("Couldn't reach steady state but proceeding to next set of paramters in optimization")
ss.vals <- state
}
run <- runTBHIVMod(params,initial.state=ss.vals,max.time=1,var.beta=FALSE)
run <- addColNames(run,ext=T,time=T)
ci <- run[11,"CIall"] - run[1,"CIall"]
if (!missing(target.prev.tb)){
## calc prevalance stats
prev <- sum(getPrevCols(run)[11,])
if (!missing(beta.or.theta) && beta.or.theta == "theta"){
obj <- ((prev/target.prev.tb) - 1)^2
obj.no.trans <- 1 # value if there is no tranmission
} else if (!missing(beta.or.theta) && beta.or.theta == "beta"){
obj <- ((ci/target.ci) - 1)^2
obj.no.trans <- 1
} else {
obj <- weight.ci*((ci/target.ci) - 1)^2 + weight.other*((prev/target.prev.tb) - 1)^2
obj.no.trans <- 2
}
print(c(ci,target.ci=target.ci,prev=prev,target.prev=target.prev.tb))
} else {
cd <- (run[11,grep("N.Asp",colnames(run))] +
run[11,grep("N.Asn",colnames(run))] +
run[11,grep("N.Aep",colnames(run))]) -
(run[1,grep("N.Asp",colnames(run))] +
run[1,grep("N.Asn",colnames(run))] +
run[1,grep("N.Aep",colnames(run))])
## but we really want to fit to cases detected which is not implicitly a function of ci
cd.num <- sum(cd)
cdr <- (sum(cd)/ci)*100
cd.num.target <- target.cdr*target.ci
print(c(ci,target.ci=target.ci,cdr=cdr,target.cdr=100*target.cdr))
if (!missing(beta.or.theta) && beta.or.theta == "theta"){
obj <- (cdr - target.cdr*100)^2
obj.no.trans <- 1000000 # value if there is no tranmission
} else if (!missing(beta.or.theta) && beta.or.theta == "beta"){
print("beta")
obj <- (ci - target.ci)^2
obj.no.trans <- 1000000
} else {
obj <- weight.ci*((ci/target.ci) - 1)^2 + weight.other*((cd.num/cd.num.target) - 1)^2
obj.no.trans <- 2
}
}
print(c(params$beta.sp[1],params$theta.sp[1]))
if (is.nan(obj) || obj == obj.no.trans) obj <- Inf #when we get no tranmission the ob func = 2
cat(sprintf("objective func = %f \n",obj))
if (plot.it){
points(params$theta.sp[1],obj,col=2)
}
return(obj) # may think about scaling the objective function
}
##' For fitting incidence and % cases detected to thetea and beta
##' @param initial.state
##' @param params
##' @param target.ci
##' @param target.cdr
##' @return
##' @author Andrew Azman
fitIncCDR <- function(initial.state,
params,
target.ci,
target.cdr,
epsilon.cdr.inc.target=0.1
){
require("rootSolve")
## set all theta's to theta sp
fit.pars <- c(params$beta.sp[1],params$theta.sp[1])
print(fit.pars)
##fit each serperatley and iterate between em.
epsilon.cdr.inc <- Inf
while (epsilon.cdr.inc >= epsilon.cdr.inc.target){
cur.beta <- params$beta.sp[1]
cur.theta <- params$theta.sp[1]
out.beta <- optim(fit.pars[1],
fn=incObFunc,
params=params,
state=initial.state,
target.ci=target.ci,
target.cdr=target.cdr,
beta.or.theta = "beta",
method="Brent",
lower=2,upper=100, #optimization is finicky! adjust lower bound
control=list(trace=T,abstol=1))
#update beta
params$beta.sp <- rep(out.beta$par,4)
#update initial state
out.theta <- optim(fit.pars[2],
fn=incObFunc,
params=params,
state=initial.state,
target.ci=target.ci,
target.cdr=target.cdr,
beta.or.theta = "theta",
method="Brent",
lower=0.1,
upper=2.5, #optimization is finicky! adjust lower bound
control=list(trace=T,abstol=1))
## update thetas
ep.sn.mult <- 1 ## Assuming equal impcat on all tb types
params$theta.sp <- rep(out.theta$par,4)
params$theta.sn <- ep.sn.mult*rep(out.theta$par,4)
params$theta.ep <- ep.sn.mult*rep(out.theta$par,4)
## now calculate the change
epsilon.cdr.inc <- max(c(abs(cur.theta - out.theta$par)/cur.theta,abs(cur.beta - out.beta$par)/cur.beta))
}
## start.state.min <- initial.state
tryCatch(RS <- runsteady(y=initial.state,fun=dxdt.TBHIV.CI,parms=params,times=c(0,10000),verbose=F),
error = function(e){
stop("Sorry can't reach steady state from optimized params")
})
ss.vals <- RS$y
return(list(final.pars=params,ss=ss.vals))
}
##' Function to fit theta.sp and beta to TB preva and incidence
##' @param initial.state
##' @param params
##' @param target.ci
##' @param target.prev.tb
##' @return
##' @author Andrew Azman
fitIncPrev <- function(initial.state,
params,
target.ci,
target.prev.tb,
lowers=c(4,.1),
uppers=c(20,7)
){
require("rootSolve")
## set all theta's to theta sp
fit.pars <- c(params$beta.sp[1],params$theta.sp[1])
print(fit.pars)
out <- optim(fit.pars,
fn=incObFunc,
params=params,
state=initial.state,
target.ci=target.ci,
target.prev.tb=target.prev.tb,
method="L-BFGS-B",
lower=lowers,upper=uppers, #optimization is finicky! adjust lower bound
control=list(trace=T,parscale=c(10,1),maxit=1000))
final.pars <- params
final.pars$beta.sp <- rep(out$par[1],4)
final.pars$theta.sp <- rep(out$par[2],4)
ep.sn.mult <- 1
final.pars$theta.ep <- final.pars$theta.sp*ep.sn.mult
final.pars$theta.sn <- final.pars$theta.sp*ep.sn.mult
tryCatch(RS <- runsteady(y=initial.state,fun=dxdt.TBHIV.CI,parms=final.pars,times=c(0,10000),verbose=F),
error = function(e){
stop("Sorry can't reach steady state from optimized params")
})
ss.vals <- RS$y
return(list(final.pars=final.pars,ss=ss.vals))
}
## Runs intervention and control with a specfified increase in the detection rates
##' @param ss starting state for runs, should include the main states and claculated ones
##' @param params list of parameters to use in the simulations
##' @param time how long to run the models
##' @param int.theta.sp - increased rate of detection of sp TB
##' @param int.theta.sn - increased rate of detection of sn TB
##' @param int.theta.ep - increased rate of detection of ep TB
##' @return
runIntCont <- function(ss,
params,
time,
int.theta.sp,
int.theta.sn,
int.theta.ep,
var.beta=FALSE,
intervention.duration=time){
## make sure all the stats for the ss are set to zero
#ss[37:length(ss)] <- 0
cont <- runTBHIVMod(params,initial.state=ss,max.time=time,var.beta=var.beta)
cont <- addColNames(cont,ext=T)
params.int <- params
params.int$theta.snI <- rep(int.theta.sn,4)
params.int$theta.spI <- rep(int.theta.sp,4)
params.int$theta.epI <- rep(int.theta.ep,4)
## first we will run the intervention
int <- runTBHIVMod(params.int,initial.state=ss,max.time=intervention.duration,var.beta=var.beta)
if (intervention.duration < time){
int.part2 <- runTBHIVMod(params,initial.state=tail(int,1)[-1],max.time=time-intervention.duration,var.beta=var.beta)
int <- rbind(int,int.part2[-1,])
int[,1] <- seq(0,time,by=0.1)
}
int <- addColNames(int,ext=T)
return(list(int=int,cont=cont))
}
#takes a run and plots incdience and cases detected
plotOut <- function(out,pop.adj=T,overlay=FALSE,legend=TRUE){
if (pop.adj){
limit <- grep("CI",colnames(out)) ##which is the first col of stats
pa <- rowSums(out[,2:(limit-1)])/100000 ## pop.size / 100k
pa <- pa[-1] #since we are starting after 2008.0
} else {
pa <- rep(1,nrow(out)-1)
}
cd <- grep("N.",colnames(out))
## get cases detected per 100k (if adjusted)
cases.detected <- (diff(rowSums(out[,cd]))/pa)*10
times <- out[,1]
## get prevalence
prev <- rowSums(getPrevCols(out))/c(1,pa)
##get incidence
inc <- (diff(out[,"CI"])/pa)*10
if (!overlay){
plot(times,prev,col=1,type="l",ylim=c(0,700),lty=1,xlab="",ylab="Rate per 100k per year")
lines(times[-1],inc,col=2,type="l",lty=1)
lines(times[-1],cases.detected,col=3,type="l",lty=1)
} else {
lty <- 2
lines(times,prev,col=1,type="l",lty=lty)
lines(times[-1],inc,col=2,type="l",lty=lty)
lines(times[-1],cases.detected,col=3,type="l",lty=lty)
}
if(legend & overlay){
legend("topright",c("Prevalence, Intervention","Incidence, Intervention","Cases Detected, Intervention","Prevalence, No Intervention","Incidence, No Intervention","Cases Detected, No Intervention"),col=c(1:3,1:3),lty=c(rep(1,3),rep(2,3)),bty="n")
} else if (legend){
legend("topright",c("Prev","Inc","Detected"),col=1:3,lty=1,bty="n")
}
}
##' Calculates HIV related summary statistics given model state
##' @param mod model state
##' @param full a flag for whether or not we are giving a full model output ot the function or not (or jsut a single line)
##' @return vector, prevalance and prop.on ARTs for both age classes
##' @author Andrew Azman
hIVStats <- function(mod,full=F){
if(!is.null(nrow(mod)) && colnames(mod)[1] == "time") mod <- mod[,-1]
if(is.null(nrow(mod)) && names(mod)[1] == "time") mod <- mod[-1]
if(!is.null(nrow(mod))){
#recover()
## assuming that the first CI column is the first one of cumulative statistics
first.column.of.cum.stats <- grep("CI",colnames(mod))
if (length(first.column.of.cum.stats) > 0){
mod <- mod[,-c(first.column.of.cum.stats[1]:ncol(mod))]
}
prev.1 <-
apply(mod[,grep("^[han]",colnames(mod))],1,sum)/
rowSums(mod[,grep(".+1$",colnames(mod))])
## note the the labels for n and a are actually reveresed
prop.on.art.1 <-
rowSums(mod[,grep("^n.+1$",colnames(mod))])/
rowSums(mod[,grep("(^a.+1$)|(^n.+1$)",colnames(mod))])
## only considering those eligible
if (full) {
return(list(prev.1,prop.on.art.1))
} else {
return(c(hiv.prev.1=tail(prev.1,1),prop.art.1=tail(prop.on.art.1,1)))
}
} else {
## assuming that the first CI column is the first one of cumulative statistics
first.column.of.cum.stats <- grep("CI",names(mod))
if (length(first.column.of.cum.stats) > 0){
mod <- mod[first.column.of.cum.stats[1]:ncol(mod)]
}
prev.1 <- sum(mod[grep("^[han]",names(mod))])/sum(mod[grep(".+1$",names(mod))])
## note the the labels for n and a are actually reveresed
prop.on.art.1 <- sum(mod[grep("^n.+1$",names(mod))])/
sum(mod[grep("(^a.+1$)|(^n.+1$)",names(mod))])
return(c(hiv.prev.1=prev.1,prop.art.1=prop.on.art.1))
}
}
##' Takes parameters and model starting state, runs to steady state and estimates the sum of squared errors for HIV STAT output
##' @param fit.params
##' @param full.params
##' @param state
##' @param prev.1 true HIV prevalence for 1st age clas
##' @param prop.art.1 true propirtion of hiv eligible that are on ARTs (<15)
##' @return sum of squared errors for hiv.prev and prop.on.art for each age class (equally weighted and not scaled)
##' @author Andrew Azman
hIVObjective <- function(fit.params,
full.params,
state,
prev.1,
prop.art.1){
full.params$chi.tx[3] <- fit.params[1]
# full.params$chi.tx[2] <- fit.params[2]
full.params$foi.hiv[1] <- fit.params[2]
## full.params$foi.hiv[2] <- fit.params[4]
#print(fit.params)
RS <- runsteady(y=state,fun=dxdt.TBHIV3,parms=full.params,verbose=F)
tmp <- addColNames(RS$y,time=F)
(stats <- hIVStats(tmp))
# print(matrix(c(stats,prev.1,prop.art.1),nrow=2,byrow=T))
# recover()
sum((stats/c(prev.1,prop.art.1) - 1)^2)
}
##' Fits the chi.tx (rate of flow from eligble to ART) for each age class and foi.hiv (the constant rate of new hiv infections)
##' @param start.pars
##' @param params
##' @param start.state
##' @param prev.1
##' @param prop.art.1
##' @return final parameters of optimization routine
##' @author Andrew Azman
fitHIV <- function(params,
start.state,
prev.1,
prop.art.1){
start.pars <- c(params$chi.tx[3],params$foi.hiv[1])
fit <- optim(start.pars,
fn=hIVObjective,
full.params=params,
state=start.state,
prev.1=prev.1,
prop.art.1=prop.art.1,
method="L-BFGS-B",
lower=c(1e-5,1e-10),
upper=c(365,1),
control=list(parscale=c(1,.1)))
fit
}
##' Gets percentage of people of each age for a given model output
##' @title
##' @param mod.out
##' @param classes number of age classes in the model
##' @return
getAgeDistribution <- function(mod.out,classes=2){
ages <- c()
for (i in 1:classes){
ages[i] <- sum(mod.out[nrow(mod.out),grep(paste0(i,"$"),colnames(mod.out))])
}
ages/sum(ages)
}
##' Function takes a single year of data and returns some key TB related stats
##' prevalence , incidence, mortality
##' cases detected per year
##' percent of new TB infections that are HIV positive
##' @title
##' @param mod
##' @return
##' @author Andrew Azman
getTBStats <- function(mod,add.names=T,row.final,row.init){
if (add.names) mod <- addColNames(mod,time=T,ext=T)
if(missing(row.final) || missing(row.init)){
row.final <- nrow(mod)
row.init <- row.final - 10
}
## overall TB mortality
tb.mort <- sum(mod[row.final,grep("Mtb",colnames(mod))] -
mod[row.init,grep("Mtb",colnames(mod))])
tb.hiv.mort <- sum(mod[row.final,grep("(a|h|n)Mtb",colnames(mod))] -
mod[row.init,grep("(a|h|n)Mtb",colnames(mod))])
tb.prev <- sum(getPrevCols(mod)[row.final,])
tb.hiv.prev <- sum(getPrevCols(mod,hiv.only=T)[row.final,])
tb.inc <- mod[row.final,"CIall"] - mod[row.init,"CIall"]
tb.hiv.inc <- sum(mod[row.final,grep("(a|h|n)CI",colnames(mod))] -
mod[row.init,grep("(a|h|n)CI",colnames(mod))])
return(round(c(tb.mort.nohiv=tb.mort-tb.hiv.mort,
tb.hiv.mort=tb.hiv.mort,
tb.hiv.prev=tb.hiv.prev,
tb.prev=tb.prev,
tb.inc=tb.inc,tb.hiv.inc=tb.hiv.inc),1))
}
iterativeHIVTBFit <- function(start.state,
params.start,
target.ci=993,
target.cdr=0.69,
target.prev.tb = 768,
target.prev.hiv = 0.178,
target.art = 0.55,
epsilon.target=1e-2,
uppers.tb=c(20,4),
lowers.tb=c(5,.1)){
## initialize parameters
epsilon <- Inf
tmp.state <- start.state
params.tmp <- params.start
## params.hiv.tmp <- params.hiv.start
## params.tb.tmp <- params.tb.start
## set up proposed parameter vector
par.cur <- c(params.tmp$chi.tx[3],
params.tmp$foi.hiv[1],
params.tmp$beta.sp[1],
params.tmp$theta.sp[1])
par.new <- rep(NA,4)
while(epsilon > epsilon.target){
hiv.fit.sa <- fitHIV(params.tmp,
tmp.state[1:36],
prev.1=target.prev.hiv,
prop.art.1=target.art)
par.new[1] <- params.tmp$chi.tx[3] <- hiv.fit.sa$par[1]
par.new[2] <- params.tmp$foi.hiv[1] <- hiv.fit.sa$par[2]
if(!missing(target.prev.tb)){
tb.fit.tmp <- fitIncPrev(initial.state=tmp.state,
params=params.tmp,
target.ci=target.ci,
target.prev.tb=target.prev.tb,
uppers=uppers.tb,lowers=lowers.tb)
} else {
tb.fit.tmp <- fitIncCDR(initial.state=tmp.state,
params=params.tmp,
target.ci=target.ci,
target.cdr=target.cdr )
}
params.tmp$beta.sp <- tb.fit.tmp$final.pars$beta.sp
params.tmp$theta.sp <- tb.fit.tmp$final.pars$theta.sp
par.new[3] <- tb.fit.tmp$final.pars$beta.sp[1]
par.new[4] <- tb.fit.tmp$final.pars$theta.sp[1]
## change if we alter relations hsip between theta.sp and the others
params.tmp$theta.sn <- tb.fit.tmp$final.pars$theta.sp*1
params.tmp$theta.ep <- tb.fit.tmp$final.pars$theta.sp*1
epsilon <- max(abs(par.new - par.cur)/par.cur)
par.cur <- par.new
tmp.state <- tb.fit.tmp$ss
cat(sprintf("Pct change in params from last optim is %f \n",epsilon))
}
list(params=params.tmp,
state=tmp.state,
epsilon=epsilon)
}
##' Takes output from runIntCont
##' @param out
##' @param times
##' @param costs
##' @param params
##' @param ...
##' @return
##' @author Andrew Azman
makeHorizonICERPlot <- function(out,times,costs,params,...){
cols <- brewer.pal(6, name="Greens")
cols <-colorRampPalette(cols, space = "Lab")
colors<-cols(length(times)+3)
plot(-100,-100,xlim=range(costs),ylim=c(0,600),xlab="",ylab="")
sapply(1:length(times),function(horiz){
lines(costs,sapply(1:length(costs),function(cost)
calcStats(out,eval.times=1:((horiz*10)+1),dtx.cost=cost,params=params,...)["ICER"]),col=horiz)
#colors[horiz+2])
})
}
##' Makes a levelplot of ICERs by cost and analystic time horizon
##' @param out output of runIntCont
##' @param times time horzozons
##' @param costs costs we want to evaluate it at
##' @param params parameters vector
##' @param xlabs
##' @param ylabs
##' @param ...
##' @return plot
##' @author Andrew Azman
makeLevelPlotICER <- function(out,times,costs,params,xlabs,ylabs,...){
require(fields)
cols <- brewer.pal(9, name="Greens")
cols <-colorRampPalette(cols[-1], space = "Lab")
grid <- expand.grid(times,costs)
ICERS <- mapply(getICER,horiz=grid[,1],cost=grid[,2],MoreArgs= list(params=params,out=out,...))
mat <- matrix(ICERS,nrow=length(times),ncol=length(costs))
# layout(matrix(c(1,2),nrow=1),widths = c(.9,.1))
# par(mar=c(2,2,2,2))
par(mar=c(5,4.5,4,7))
image(mat,col=cols(15),axes=F,xlab="Time Horizon (years)",ylab="Diagnosis Cost (USD)")
axis(1,at=seq(0,1,length=length(xlabs)),labels=xlabs)
axis(2,at=seq(0,1,length=length(ylabs)),labels=ylabs)
image.plot(col=cols(15),zlim=range(ICERS),legend.only=T,horizontal=F,width=5)
}
##' Helper function
##' @param horiz
##' @param cost
##' @param params
##' @param out
##' @param fixed true if we are fixing
##' @param ...
##' @return
##' @author Andrew Azman
getICER <- function(horiz,cost,params,out,fixed,...){
if (fixed){
calcICERFixedCosts(out,eval.times=1:((horiz*10)+1),dtx.cost=cost,params=params,...)["ICER"]
} else {
calcICER(out,eval.times=1:((horiz*10)+1),dtx.cost=cost,params=params,...)["ICER"]
}
}
##' objective function for fitting annual percent change in beta to change in CI
##' @title
##' @param beta.delta
##' @param params
##' @param ss
##' @param target.ci
##' @param years
##' @return
##' @author Andrew Azman
fitAnnualBetaDeltaObjFunc <- function(beta.delta,params,ss,target.ci,years){
params$beta.delta <- rep(beta.delta,4)
out <- runTBHIVMod(params,ss,years,T)
ret <- (target.ci - getTBStats(out)[5])^2
cat(sprintf("Target = %f, Current = %f \n",target.ci,getTBStats(out)[5]))
ret
}
##' Fits annual pct change in beta
##' @param params
##' @param ss
##' @param target.ci
##' @param years
##' @return
##' @author Andrew Azman
fitAnnualBetaDelta <- function(params,
ss,
target.ci,
years){
optim(params$beta.delta[1],
fn=fitAnnualBetaDeltaObjFunc,
ss=ss,params=params,target.ci=target.ci,years=years,
method="Brent",lower=-10,upper=10,control=list(trace=T))
}
##' Returns data for a specific country for a specific year
##' @title
##' @return
##' @author Andrew Azman
getWHOStats <- function(target.country,years){
dat <- read.csv("Data/TB_burden_countries_2012-12-10.csv")
subset(dat,country == target.country & year %in% years)
}
##' Just to check that runsteady actually does what I hope it does
##' @param state
##' @param fun
##' @param params
##' @param check.every
##' @param var.beta
##' @return
##' @author Andrew Azman
runSteady <- function(state,fun,params,check.every=500,var.beta=FALSE){
steady <- F
while(!steady){
tmp <- runTBHIVMod(params,state,check.every,var.beta=var.beta)
if (abs(tail(tmp,10)[10] - tail(tmp,10)[1]) < 1){
steady <- TRUE
}
}
tail(tmp,1)[-1]
}
##' Fits increased theta to match a specific number increased cases detected in the first year
##' @param target.detection.increase number per 100k
##' @param duration
##' @param params
##' @param starting.state
##' @param ep.sn.muliplier
##' @param var.beta
##' @return
##' @author Andrew Azman
fitIncreasedDetectionRate <- function(target.detection.increase,
duration,
params,
starting.state,
ep.sn.multiplier,
var.beta){
optim(params$theta.spI[1]+.1,
fn=fitIncreasedDetectionRateObjFunc,
params=params,
state=starting.state,
duration=duration,
ep.sn.multiplier=ep.sn.multiplier,
target.detection.increase=target.detection.increase,
var.beta=var.beta,method="Brent",lower=0,upper=10)
}
##' Objective function for fitting increased theta to increase in number of detected cases
##' @param theta.spI
##' @param params
##' @param state
##' @param duration
##' @param ep.sn.muliplier what percent of the sp rate increase shoudl be assigned to ep and sn?
##' @param var.beta
##' @param target.detection.increase
##' @return
##' @author Andrew Azman
fitIncreasedDetectionRateObjFunc <- function(theta.spI,
params,
state,
duration,
ep.sn.multiplier,
var.beta,
target.detection.increase){
## first run the model without an increased detection rate
run.pre <- runTBHIVMod(params,state,duration,var.beta=var.beta)
run.pre <- addColNames(run.pre,ext=T)
last.time <- nrow(run.pre)
## now update the rates
params$theta.spI <- rep(theta.spI,4)
params$theta.snI <- rep(theta.spI,4)*ep.sn.multiplier
params$theta.epI <- rep(theta.spI,4)*ep.sn.multiplier
run.post <- runTBHIVMod(params,state,duration,var.beta=var.beta)
run.post <- addColNames(run.post,ext=T)
#how many additional cases are detected?
cd.pre <- (run.pre[last.time,grep("N.Asp",colnames(run.pre))] +
run.pre[last.time,grep("N.Asn",colnames(run.pre))] +
run.pre[last.time,grep("N.Aep",colnames(run.pre))]) -
(run.pre[1,grep("N.Asp",colnames(run.pre))] +
run.pre[1,grep("N.Asn",colnames(run.pre))] +
run.pre[1,grep("N.Aep",colnames(run.pre))])
cd.post <- (run.post[last.time,grep("N.Asp",colnames(run.post))] +
run.post[last.time,grep("N.Asn",colnames(run.post))] +
run.post[last.time,grep("N.Aep",colnames(run.post))]) -
(run.post[1,grep("N.Asp",colnames(run.post))] +
run.post[1,grep("N.Asn",colnames(run.post))] +
run.post[1,grep("N.Aep",colnames(run.post))])
# cat(sprintf("pre = %.0f \n post = %.0f, \n increase = %.3f \n",sum(cd.pre),sum(cd.post),params$theta.spI[1]))
((sum(cd.post) - sum(cd.pre)) - target.detection.increase )^2
}
##' Calculates ICER for the output of intervention and counterfactual run
##' @param out output from runIntCont
##' @param eval.times - times to extract (in units of 1/10 year) and to analysze
##' @param dtx.cost - cost of finding cases in the first year (total - NOT per case)
##' @param tx.cost
##' @param tx.cost.mdr
##' @param tx.suc
##' @param tx.cost.partial
##' @param tx.cost.partial.mdr
##' @param discount
##' @param dis.wt.tx
##' @param dis.wt.tb
##' @param pct.mdr
##' @param params
calcICERFixedCosts <- function(out,
eval.times=1:11,
dtx.cost=20*100, #full cost in year 1
tx.cost=120,
tx.cost.mdr=120,
tx.suc=c(1),
tx.cost.partial=80,
tx.cost.partial.mdr=80,
discount=.03,
dis.wt.tx = c((0.331+0)/2,(0.399+0.221)/2,0.547,(0.399+0.053)/2), ## Weighted averages from solomon et al 2013
dis.wt.tb = c(0.331,0.399,0.547,0.399), ##using DB for AIDs only for HIV/TB from salomon et al 2013
pct.mdr = 0.023,
params){
require(plyr)
## number of age classes (can put this as a param later)
ac <- 1
## reduce to output for times of interest
## helper vectors
types <- c("Asp","Asn","Aep")
hivstatus <- c("","h","a","n")
## extract only the relavent evaluation times
out <- lapply(out,function(x) x[eval.times,])
## get the times vector
times <- out[[1]][,1][-1]
## get differences in stats over time
diffs <- lapply(out,function(x) {
diff(x)[,14:ncol(x)]
})
## get unit costs through time
## NB: dtx.costs are total costs through time NOT per case
## taking Reimann integral here assuming step size of 0.1
dtx.costs <- dtx.cost*exp(-times*discount)*0.1
## how many did we detect
dtxs.int <- diffs$int[,grep("N.A(sp|sn|ep)",colnames(diffs$int))]
dtxs.cont <- diffs$cont[,grep("N.A(sp|sn|ep)",colnames(diffs$cont))]
## get our costs discounted through time for DS and MDR TB
tx.unitcosts <- tx.cost*exp(-times*discount)
tx.part.unitcosts <- tx.cost.partial*exp(-times*discount)
tx.unitcosts.mdr <- tx.cost.mdr*exp(-times*discount)
tx.part.unitcosts.mdr <- tx.cost.partial.mdr*exp(-times*discount)
## Now we get the cost of full and partial treatment over time
## subtracting the control costs from the intervetion costs for case finding and then adding the diagnosis costs.
## for the treatment group
txcost.int <- sum((rowSums(dtxs.int) - rowSums(dtxs.cont))*
(tx.suc*(tx.unitcosts*(1-pct.mdr) + tx.unitcosts.mdr*pct.mdr) +
(1-tx.suc)*(tx.part.unitcosts*(1-pct.mdr) + tx.part.unitcosts.mdr*pct.mdr)))
## Deaths
death.cont <- diffs$cont[,grep("Mtb",colnames(diffs$cont))]
death.int <- diffs$int[,grep("Mtb",colnames(diffs$int))]
## Years of life lost by hiv class
## taking a conservative approach where people
## can at most contribute horizon - time.step years
YLL.cont <- apply(death.cont,2,function(hiv.class) {
hiv.class * (max(times) - times) * exp(-discount *times)
## hiv.class * which.max(times) - 1:length(times) * exp(-discount *times)
})
YLL.int <- apply(death.int,2,function(hiv.class) {
hiv.class * (max(times) - times) * exp(-discount *times)
# hiv.class * which.max(times) - 1:length(times) * exp(-discount *times)
})
YLL.cont.minus.int <- YLL.cont - YLL.int
## from the model not accounting for deaths
## only considering symtomatic time not PS period
## NOTE: dis.dur.tx is not actually used anywhere anymore
with(params,{
dur.sp <- (theta.sp+theta.spI)*eta.sp+zeta.sp
dur.sn <- (theta.sn+theta.snI)*eta.sn+zeta.sn
dur.ep <- (theta.ep+theta.epI)*eta.ep+zeta.sn
tmp <- 1/rbind(sp=dur.sp,sn=dur.sn,ep=dur.ep)
colnames(tmp) <- c("","h","n","a")
tmp
}) -> dis.dur.tx
with(params,{
dur.sp <- theta.sp+zeta.sp
dur.sn <- theta.sn+zeta.sn
dur.ep <- theta.ep+zeta.sn
tmp <- 1/rbind(sp=dur.sp,sn=dur.sn,ep=dur.ep)
colnames(tmp) <- c("","h","n","a")
tmp
}) -> dis.dur.notx
# taking mean treatment duration
# assuming that all TB types have same duration of TX
tx.dur <- 1/params$gamma.tx.rtx[1]
## Disability Years YLD = I * D * DW
## may need to split this by TB type
prop.each.TB.type <- sapply(1:4,function(x) {
with(params, c(pi.sp[x]*(1-pi.ep[x]),(1-pi.sp[x])*(1-pi.ep[x]),
pi.ep[x]))
})
colnames(prop.each.TB.type) <- c("","h","n","a")
## We consider prevalent cases are those contributing to YLD
hiv.types <- c("^","h","a","n")
## list with each element as hiv type. matrices rows = time, columns = Asp, Asn, Aep
prev.cases.cont <- sapply(1:4,function(x)
(out[[2]])[,grep(paste0(hiv.types[x],"(Asp|Aep|Asn)"),
colnames(out[[2]]))],simplify=F)
prev.cases.int <- sapply(1:4,function(x)
(out[[1]])[,grep(paste0(hiv.types[x],"(Asp|Aep|Asn)"),
colnames(out[[1]]))],simplify=F)
prev.cases.cont.minus.int <- llply(1:4,function(x) prev.cases.cont[[x]] - prev.cases.int[[x]])
## these output lists of matrices. list by HIV, matrix columns by TB
time.step <- 0.1
YLD.notx.cont <- sapply(1:4,function(hiv){
sum(sapply(1:3,function(tb) {
prev.cases.cont[[hiv]][,tb] *
time.step * dis.wt.tb[hiv]
}))})
YLD.notx.int <- sapply(1:4,function(hiv){
sum(sapply(1:3,function(tb) {
prev.cases.int[[hiv]][,tb] *
time.step * dis.wt.tb[hiv]
}))})
YLD.notx.cont.minus.int <- YLD.notx.cont - YLD.notx.int
#just getting them into a different form
det.cases.int <- sapply(1:4,function(x){
dtxs.int[,grep(paste0(hiv.types[x],"(N.Asp|N.Aep|N.Asn)"),
colnames(dtxs.int))]}
,simplify=F)
det.cases.cont <- sapply(1:4,function(x){
dtxs.cont[,grep(paste0(hiv.types[x],"(N.Asp|N.Aep|N.Asn)"),
colnames(dtxs.cont))]}
,simplify=F)
## NB: not discounting for time on treatment since it is SO short
YLD.tx.int <- sapply(1:4,function(hiv){
sum(sapply(1:3,function(tb){
det.cases.int[[hiv]][,tb] * pmin(0.5,max(times) - times) * dis.wt.tb[hiv]
}))})
YLD.tx.cont <- sapply(1:4,function(hiv){
sum(sapply(1:3,function(tb){
det.cases.cont[[hiv]][,tb] * pmin(0.5,max(times) - times) * dis.wt.tb[hiv]
}))})
YLD.tx.cont.minus.int <- YLD.tx.cont - YLD.tx.int
DALYs.int <- sum(YLL.int) + sum(YLD.notx.int) + sum(YLD.tx.int)
DALYs.cont <- sum(YLL.cont) + sum(YLD.notx.cont) + sum(YLD.tx.cont)
DALYs.averted <- sum(YLL.cont.minus.int) + sum(YLD.notx.cont.minus.int) + sum(YLD.tx.cont.minus.int)
ret <- c(txcost.int=txcost.int,
ICER=(txcost.int+sum(dtx.costs))/sum(DALYs.averted),
DALYs.averted = DALYs.averted,
DALYS.int = DALYs.int,
DALYs.cont = DALYs.cont)
return(ret)
}
##' @title
##' @param start.beta
##' @param state
##' @param params
##' @param target.ci
##' @return
##' @author Andrew Azman
ManualTuneBeta <- function(start.beta,state,params,target.ci){
params$beta.sp <- start.beta
RS <- runsteady(y=state[1:61],fun=dxdt.TBHIV.CI,parms=params,times=c(0,10000),verbose=F)
run <- runTBHIVMod(params,initial.state=RS$y,max.time=1,var.beta=FALSE)
getTBStats(run,add.names=T)
}
##' Makes fixed ICER plot
##' @param icer.min
##' @param icer.max
##' @param case.dt.dif fit to thi many numebr of cases detected int he first year
##' @param plot.params
##' @param start.state
##' @param tx.cost
##' @param tx.cost.partial
##' @param tx.cost.mdr
##' @param pct.mdr
##' @param tx.cost.partial.mdr
##' @param my.title
##' @param intcont.run if we give it this output from runIncCont we don't do it automatically
##' @param gdp per capita GDP
##' @param ICERS icers calaculated over the grid (calculated and returned in this function if not provided)
##' @param contours
##' @param xlab
##' @param ylab
##' @param leg legend?
##' @param ep.sn.multiplier
##' @param max.icer.cutoff
##' @param ...
##' @return list with ICERS and int.cont.run and makes plot
##' @author Andrew Azman
makeICERPlotFixed <- function(icer.min=0.00001,
icer.max=6000,
case.dt.dif,
plot.params = india2011_params,
start.state = start.state.2011,
tx.cost = 81,
tx.cost.partial = tx.cost*.75,
tx.cost.mdr = 350,
pct.mdr = 0.023, # default for india
tx.cost.partial.mdr = tx.cost.mdr*.75,
my.title = "",
intcont.run,
gdp,
ICERS,
contours,
xlab="",
ylab="",
leg=FALSE,
ep.sn.multiplier=1,
truncate.color=TRUE,
...
){
## fitting increased detetion rate that will give us X additional cases in the first year
if (missing(intcont.run)){
cat(sprintf("Fitting increased detection rate for %d case increase in year 1 \n",case.dt.dif))
fit.tmp <- fitIncreasedDetectionRate(target.detection.increase = case.dt.dif,
duration = 1,
params = plot.params,
starting.state = start.state,
ep.sn.multiplier = ep.sn.multiplier,
var.beta=FALSE)
theta.reduction <- fit.tmp$par
tmp <- runIntCont(start.state,plot.params,10,
int.theta.sp= theta.reduction,
int.theta.sn = theta.reduction*ep.sn.multiplier,
int.theta.ep = theta.reduction*ep.sn.multiplier)
plot.params$theta.spI <- rep(theta.reduction,4)
plot.params$theta.snI <- rep(theta.reduction,4)*ep.sn.multiplier
plot.params$theta.epI <- rep(theta.reduction,4)*ep.sn.multiplier
} else {
tmp <- intcont.run
}
times <- seq(1,10,by=.1)
costs <- seq(50,5000,by=5)*case.dt.dif
xlabs <- 1:10
ylabs <- seq(50,5000,by=350)
zlims <- c(0,log10(icer.max))
# zlims <- c(icer.min,icer.max)
# breaks <- seq(icer.min,icer.max,by=50)
breaks <- seq(0,log10(icer.max),length=50)
cols <- colorRampPalette(brewer.pal(9, name="Greens"))
grid <- expand.grid(times,costs)
# params for the mapply statement
args.for.mapply <- list(params=plot.params,
out=tmp,
tx.cost=tx.cost,
tx.cost.partial=tx.cost.partial,
tx.cost.mdr=tx.cost.mdr,
tx.cost.partial.mdr=tx.cost.partial.mdr,
pct.mdr=pct.mdr,
fixed=TRUE)
#only estiamte if we didn't supply ICERS
if (missing(ICERS)) ICERS <- mapply(getICER,horiz=grid[,1],cost=grid[,2],MoreArgs=args.for.mapply)
mat <- matrix(ICERS,nrow=length(times),ncol=length(costs))
# if truncate color then we are going to set all values larger to icer.max
if (truncate.color) {
mat[which(mat > icer.max)] <- icer.max
mat[which(mat < icer.min)] <- icer.min
}
## par(mar=c(5,4.5,4,7))
image(log10(mat),col=cols(length(breaks)-1),axes=F,xlab=xlab,ylab=ylab,zlim=zlims,breaks=breaks)
if (!missing(gdp)){
contour(log10(mat),levels=c(log10(0.0001),log10(gdp),log10(3*gdp)),col=addAlpha("black",.5),labcex=.5,lwd=1,lty=2,add=T,drawlabels=TRUE,method="edge",labels=c("cost saving","highly cost effective","cost effective"))
}
if (!missing(contours)){
contour(log10(mat),
levels=log10(contours), #[[1]]
col=addAlpha("black",.5),
labcex=.5,
lwd=1,
lty=2,
labels=contours,
add=TRUE)
#,method="edge")
}
time.labs <- cbind(seq(0,1,length=length(times)),seq(1,10,length=length(times)))[seq(1,length(times),by=5),]
axis(1,at=time.labs[,1],labels=time.labs[,2])
# axis(1,at=seq(0,1,length=length(xlabs)),labels=xlabs)
costs.labs <- cbind(seq(0,1,length=length(costs)),costs/case.dt.dif)[seq(1,991,by=50),]
axis(2,at=costs.labs[,1],labels=costs.labs[,2])
#axis(2,at=seq(0,1,length=length(ylabs)),labels=ylabs)
if (leg){
legend.seq <- round(seq(min(zlims),max(zlims),length=5),0)
image.plot(col=cols(length(breaks)-1),zlim=zlims,
## breaks=seq(min(zlims),max(zlims),length=length(breaks)),
## lab.breaks=round(10^seq(min(zlims),max(zlims),length=length(breaks)),0),
legend.only=T,horizontal=F,width=7,smallplot = c(.95,1,.05,.9),
axis.args=list(at=legend.seq, labels=10^legend.seq))
}
title(my.title)
list("ICERS"=ICERS,"intcont.run"=tmp)
}
##' Plots Overview of Outputs from runIntCont
##' @param intcont
##' @param legend
##' @param by.TB - not implemented yet
##' @return
##' @author Andrew Azman
plotTBIncMort <- function(intcont,
legend=TRUE,
col1=1,
col2=2,
cd=FALSE,...){
#CI, Prev # mortality, retx
times <- intcont[[1]][,1]
ci1 <- diff(intcont[[1]][,"CIall"])*10
ci2 <- diff(intcont[[2]][,"CIall"])*10
## ##now prevalence
## prev1 <- rowSums(getPrevCols(intcont[[1]]))
## prev2 <- rowSums(getPrevCols(intcont[[2]]))
##mortality
mort1 <- diff(rowSums(intcont[[1]][,grep("(n|a|h|^)(Mtb1)",colnames(intcont[[1]]))]))*10
mort2 <- diff(rowSums(intcont[[2]][,grep("(n|a|h|^)(Mtb1)",colnames(intcont[[2]]))]))*10
## ##retreatment
## retx1 <- diff(rowSums(intcont[[1]][,grep("(n|a|h|^)(ReTx1)",colnames(intcont[[1]]))]))*10
## retx2 <- diff(rowSums(intcont[[2]][,grep("(n|a|h|^)(ReTx1)",colnames(intcont[[2]]))]))*10
## ## cases detected
if (cd){
cases.detected.1 <- diff(rowSums(intcont[[1]][,grep("N.A(sp|sn|ep)",colnames(intcont[[1]]))]))*10
cases.detected.2 <- diff(rowSums(intcont[[2]][,grep("N.A(sp|sn|ep)",colnames(intcont[[2]]))]))*10
}
all.data.points <- c(ci1,ci2,mort1,mort2)
if (cd) all.data.points <- c(all.data.points,cases.detected.1,cases.detected.2)
plot(-100,-100,xlim=range(times),ylim=c(min(all.data.points),max(all.data.points)),xlab="",...)
lines(times[-1],ci1,col=col1)
lines(times[-1],ci2,col=col1,lty=2)
## lines(times,prev1,col=2)
## lines(times,prev2,col=2,lty=2)
lines(times[-1],mort1,col=col2)
lines(times[-1],mort2,col=col2,lty=2)
## lines(times[-1],retx1,col=4)
## lines(times[-1],retx2,col=4,lty=2)
if (cd){
lines(times[-1],cases.detected.1,col=5)
lines(times[-1],cases.detected.2,col=5,lty=2)
}
if (legend){
legend("topright",paste0(rep(c("CI","mort"),each=2),c(" - Interv."," - Baseline")),
lty=rep(1:2,2),
col=rep(1:2,each=2),
bty="n")
}
}
##' adds alpha to a set of colors
##' @title
##' @param COLORS
##' @param ALPHA
##' @return
addAlpha <- function(COLORS, ALPHA){
if(missing(ALPHA)) stop("provide a value for alpha between 0 and 1")
RGB <- col2rgb(COLORS, alpha=TRUE)
RGB[4,] <- round(RGB[4,]*ALPHA)
NEW.COLORS <- rgb(RGB[1,], RGB[2,], RGB[3,], RGB[4,], maxColorValue = 255)
return(NEW.COLORS)
}
plotCumTBIncMort <- function(intcont,
legend=TRUE,
col1=1,
col2=2,
diffs=FALSE,
poly=TRUE,
...){
times <- intcont[[1]][,1]
ci1 <- intcont[[1]][,"CIall"]*10
ci2 <- intcont[[2]][,"CIall"]*10
##mortality
#mort1 <- rowSums(intcont[[1]][,grep("(n|a|h|^)(Mtb1)",colnames(intcont[[1]]))])*10
#mort2 <- rowSums(intcont[[2]][,grep("(n|a|h|^)(Mtb1)",colnames(intcont[[2]]))])*10
all.data.points <- c(ci1,ci2)#,mort1,mort2)
if (diffs){
plot(ci2 - ci1,col=col1,lty=6,...)
} else {
plot(-100,-100,xlim=range(times),ylim=c(0,max(all.data.points)),xlab="",...)
lines(times,ci1,col=col1,lty=1)
lines(times,ci2,col=col1,lty=2)
if (poly){
polygon(x=c(times,rev(times)),y=c(ci1,rev(ci2)),col=addAlpha(col1,.2),border=FALSE)
}
}
## lines(times,mort1,col=col2)
## lines(times,mort2,col=col2,lty=2)
}
##' Compares stats from model output to those from WHO
##' @param run
##' @param year
##' @param country
##' @return
compareStats <- function(run,year,country){
tb.hiv.stats <- c(getTBStats(run),hIVStats(addColNames(run,ext=T)))
who.stats <- getWHOStats(country,year)
#mort
who.stat.colnames <- c("e_mort_exc_tbhiv_100k","e_mort_exc_tbhiv_100k_lo","e_mort_exc_tbhiv_100k_hi",
"e_prev_100k","e_prev_100k_lo","e_prev_100k_hi",
"e_inc_100k","e_inc_100k_lo","e_inc_100k_hi",
"e_inc_tbhiv_100k","e_inc_tbhiv_100k_lo","e_inc_tbhiv_100k_hi")
cbind(who.stats[who.stat.colnames])
}
##' Runs a short term ACF intervention then continues on for some years
##' @param country string with "india", "sa", or "china"
##' @param pct.incidence extra cases found in year one should be pct.incidence X incidence
##' @param int.dur total number of years we want to run the intervention
##' @param total.dur total number of years we want to run the smiluation
##' @param fits named (by country) list of fitted objects
##' @return intcont list for simulation
##' @author Andrew Azman
runNYearACF <- function(country,
pct.incidence,
case.dt.dif,
int.dur=2,
total.dur=10,
fits){
#require(Hmisc)
## number of cases detecgted in year 1 proportional to incidence
if (missing(case.dt.dif)){
case.dt.dif <- c(round(getWHOStats("China",2011)[,"e_inc_100k"]*pct.incidence,0),
round(getWHOStats("India",2011)[,"e_inc_100k"]*pct.incidence,0),
round(getWHOStats("South Africa",2011)[,"e_inc_100k"]*pct.incidence,0))
}
case.dt.dif <- switch(country,
"india" = case.dt.dif[2],
"china" = case.dt.dif[1],
"sa" = case.dt.dif[3])
fit.tmp <- fitIncreasedDetectionRate(target.detection.increase = case.dt.dif,
duration = 1,
params = fits[[country]]$params,
starting.state = fits[[country]]$state,
ep.sn.multiplier = 1,
var.beta=FALSE)
theta.reduction <- fit.tmp$par
return(runIntCont(ss=fits[[country]]$state,
params=fits[[country]]$params,
time=total.dur,
int.theta.sp=theta.reduction,
int.theta.sn=theta.reduction*1,
int.theta.ep=theta.reduction*1,
intervention.duration = int.dur))
}
## Sens/Uncertainty Analyses Functions
##' Makes list of update functions for every param (or non-param) in the params list
##' used for running sesntivity analyses and dealing with dependent params
##' @return list of params suitable for use in the models
##' @author Andrew Azman
makeUpFuncs <- function(){
up.funcs <- vector("list",length=148)
up.funcs[[1]] <-
update.func <- function(para,new.value) {
para$beta.sp <- rep(new.value,4)
para
}
up.funcs[[2]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[3]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[4]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[5]] <-
update.func <- function(para,new.value) {
para$phi.sn <- rep(new.value,4)
para
}
up.funcs[[6]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[7]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[8]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[9]] <-
update.func <- function(para,new.value) {
para$phi.l[1] <- new.value
para$phi.l[c(2,4)] <- new.value*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$phi.l[3]
para
}
up.funcs[[10]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[11]] <-
update.func <- function(para,new.value) {
para$phi.l[3] <- new.value
para$phi.l[c(2,4)] <- para$phi.l[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*new.value
para
}
up.funcs[[12]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[13]] <-
update.func <- function(para,new.value) {
para$phi.ps[1:4] <- new.value
para
}
up.funcs[[14]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[15]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[16]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[17]] <-
update.func <- function(para,new.value) {
para$gamma.lf.ls[1:4] <- new.value
para
}
up.funcs[[18]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[19]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[20]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[21]] <-
update.func <- function(para,new.value) {
para$gamma.rtx.ls[1:4] <- new.value
para
}
up.funcs[[22]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[23]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[24]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[25]] <-
update.func <- function(para,new.value) {
para$gamma.tx.rtx[1:4] <- new.value
para
}
up.funcs[[26]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[27]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[28]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[29]] <-
update.func <- function(para,new.value) {
para$rho.lf[1] <- new.value
para$rho.lf[c(2,4)] <- new.value*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$rho.lf[3]
para
}
up.funcs[[30]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[31]] <-
update.func <- function(para,new.value) {
para$rho.lf[3] <- new.value
para$rho.lf[c(2,4)] <- para$rho.lf[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$rho.lf[3]
para
}
up.funcs[[32]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[33]] <-
update.func <- function(para,new.value) {
para$rho.ls[1] <- new.value
para$rho.ls[c(2,4)] <- new.value*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$rho.ls[3]
para
}
up.funcs[[34]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[35]] <-
update.func <- function(para,new.value) {
para$rho.ls[3] <- new.value
para$rho.ls[c(2,4)] <- para$rho.ls[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$rho.ls[3]
para
}
up.funcs[[36]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[37]] <-
update.func <- function(para,new.value) {
para$rho.rel[1:4] <- new.value
para
}
up.funcs[[38]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[39]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[40]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[41]] <-
update.func <- function(para,new.value) {
para$rho.ps[1] <- new.value
para$rho.ps[c(2,4)] <- para$rho.ps[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$rho.ps[3]
para
}
up.funcs[[42]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[43]] <-
update.func <- function(para,new.value) {
para$rho.ps[3] <- new.value
para$rho.ps[c(2,4)] <- para$rho.ps[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$rho.ps[3]
para
}
up.funcs[[44]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[45]] <-
update.func <- function(para,new.value) {
para$pi.sp[1] <- new.value
para$pi.sp[c(2,4)] <- para$pi.sp[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$pi.sp[3]
para
}
up.funcs[[46]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[47]] <-
update.func <- function(para,new.value) {
para$pi.sp[3] <- new.value
para$pi.sp[c(2,4)] <- para$pi.sp[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$pi.sp[3]
para
}
up.funcs[[48]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[49]] <-
update.func <- function(para,new.value) {
para$pi.ep[1] <- new.value
para$pi.ep[c(2,4)] <- para$pi.ep[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$pi.ep[3]
para
}
up.funcs[[50]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[51]] <-
update.func <- function(para,new.value) {
para$pi.ep[3] <- new.value
para$pi.ep[c(2,4)] <- para$pi.ep[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$pi.ep[3]
para
}
up.funcs[[52]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[53]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[54]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[55]] <-
update.func <- function(para,new.value) {
para$mu.sp[3] <- new.value
para$mu.sp[c(2,4)] <- para$mu.sp[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$mu.sp[3]
para
}
up.funcs[[56]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[57]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[58]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[59]] <-
update.func <- function(para,new.value) {
para$mu.sn[3] <- new.value
para$mu.sn[c(2,4)] <- para$mu.sn[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$mu.sn[3]
para
}
up.funcs[[60]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[61]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[62]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[63]] <-
update.func <- function(para,new.value) {
para$mu.ep[3] <- new.value
para$mu.ep[c(2,4)] <- para$mu.ep[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$mu.ep[3]
para
}
up.funcs[[64]] <-
update.func <- function(para,new.value) {
para
}
## zeta.sps
up.funcs[[65]] <-
update.func <- function(para,new.value) {
para$zeta.sp[1] <- new.value
para$zeta.sp[c(2,4)] <- para$zeta.sp[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$zeta.sp[3]
para$mu.sp[1] <- 1/3 - new.value
para$mu.sp[c(2,4)] <- para$mu.sp[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$mu.sp[3]
para
}
up.funcs[[66]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[67]] <-
update.func <- function(para,new.value) {
para$zeta.sp[3] <- new.value
para$zeta.sp[c(2,4)] <- para$zeta.sp[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$zeta.sp[3]
para
}
up.funcs[[68]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[69]] <-
update.func <- function(para,new.value) {
para$zeta.sn[1] <- new.value
para$zeta.sn[c(2,4)] <- para$zeta.sn[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$zeta.sn[3]
para$mu.sn[1] <- 1/3 - new.value
para$mu.sn[c(2,4)] <- para$mu.sn[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$mu.sn[3]
para
}
up.funcs[[70]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[71]] <-
update.func <- function(para,new.value) {
para$zeta.sn[3] <- new.value
para$zeta.sn[c(2,4)] <- para$zeta.sn[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$zeta.sn[3]
para
}
up.funcs[[72]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[73]] <-
update.func <- function(para,new.value) {
para$zeta.ep[1] <- new.value
para$zeta.ep[c(2,4)] <- para$zeta.ep[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$zeta.ep[3]
para$mu.ep[1] <- 1/3 - new.value
para$mu.ep[c(2,4)] <- para$mu.ep[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$mu.ep[3]
para
}
up.funcs[[74]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[75]] <-
update.func <- function(para,new.value) {
para$zeta.ep[3] <- new.value
para$zeta.ep[c(2,4)] <- para$zeta.ep[1]*para$`ART mulitplier`[1] + (1-para$`ART mulitplier`[1])*para$zeta.ep[3]
para
}
up.funcs[[76]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[77]] <-
update.func <- function(para,new.value) {
para$theta.sp[1:4] <- new.value
para
}
up.funcs[[78]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[79]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[80]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[81]] <-
update.func <- function(para,new.value) {
para$theta.sn[1:4] <- new.value
para
}
up.funcs[[82]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[83]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[84]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[85]] <-
update.func <- function(para,new.value) {
para$theta.ep[1:4] <- new.value
para
}
up.funcs[[86]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[87]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[88]] <-
update.func <- function(para,new.value) {
para
}
for (i in 89:(89+(4*6)-1)){
up.funcs[[i]] <-
update.func <- function(para,new.value) {
para
}
}
up.funcs[[113]] <-
update.func <- function(para,new.value) {
para$foi.hiv[1] <- new.value
para
}
for (i in c(114:116,117,119,120,121,122,124,129:((129+4*4)-1),146:148)){
up.funcs[[i]] <-
update.func <- function(para,new.value) {
para
}
}
up.funcs[[118]] <-
update.func <- function(para,new.value) {
para$chi.elg[2] <- new.value
para
}
up.funcs[[123]] <-
update.func <- function(para,new.value) {
para$chi.tx[3] <- new.value
para
}
up.funcs[[125]] <-
update.func <- function(para,new.value) {
para
}
up.funcs[[126]] <-
update.func <- function(para,new.value) {
para$mu.hiv[2] <- new.value
para
}
up.funcs[[127]] <-
update.func <- function(para,new.value) {
para$mu.hiv[3] <- new.value
para
}
up.funcs[[128]] <-
update.func <- function(para,new.value) {
para$mu.hiv[4] <- new.value
para
}
up.funcs[[145]] <-
update.func <- function(para,new.value) {
para$`ART mulitplier`[1:4] <- new.value
para
}
up.funcs
}
##' helper function to generate array of parameters for sensitivty analyses
##' @param fits
##' @param country
##' @param p
##' @param seq.lengths
##' @param true.param.index
##' @return
genParamSeqs <- function(fits,country,
p=max.pct.change,
seq.lengths=num.points,
true.param.index=true.param.index){
param.seq.array <- array(dim=c(seq.lengths,length(true.param.index)))
for (i in seq_along(true.param.index)){
orig.value <- c(t(do.call(rbind,fits[[country]]$params)))[true.param.index[i]]
if (i %in% c(16:19,38)){ ## 38 is the ART multiplier
param.seq.array[,i] <- seq(orig.value*p,min(orig.value*(1+p),1),length=seq.lengths)
} else {
param.seq.array[,i] <- seq(orig.value*p,orig.value*(1+p),length=seq.lengths)
}
}
param.seq.array
}
##' For running on-way sensitivity analyses
##' @param country
##' @param fits
##' @param max.pct.change
##' @param num.points
##' @param cost.per.case
##' @param analytic.horizon
##' @param min.tx.costs
##' @param max.tx.costs
##' @param min.mdr.tx.costs
##' @param max.mdr.tx.costs
##' @return
##' @author Andrew Azman
runOneWaySens <- function(country,
fits,
max.pct.change,
num.points=5,
cost.per.case=2000,
analytic.horizon = 5,
min.tx.costs,
max.tx.costs,
min.mdr.tx.costs,
max.mdr.tx.costs
){
up.funcs <- makeUpFuncs()
true.params <-1 - sapply(up.funcs,function(x) all.equal(c(do.call(rbind,x(fits[[country]]$params,-10))),c(do.call(rbind,fits[[country]]$params))) == TRUE)
true.param.index <- which(true.params == 1)
original.values <- c(t(do.call(rbind,fits[[country]]$params)))[true.param.index]
seq.lengths <- num.points
fits.orig <- fits
out <- array(dim=c(seq.lengths,length(true.param.index)+2))
## 1. Let's first explore how the ICER for fixed cost per case detected in a single country varies by parameter
param.array <- genParamSeqs(fits.orig,country,
p=max.pct.change,
seq.lengths = num.points,
true.param.index=true.param.index)
## get number of cases that will be detected
pct.increase.in.yr1 <- 0.25
cases.detected <- getIncreasedCasesDetected(TRUE,pct.increase.in.yr1)
## define ranges for parameters
for (j in 1:ncol(param.array)){
param.seq <- param.array[,j]
for (i in seq_along(param.seq)){
## update param and any additional dependent params (e.g. HIV states)
new.params <- up.funcs[[true.param.index[j]]](fits.orig[[country]]$params,param.seq[i])
fits[[country]]$params <- new.params
## run 2 year ACF
## not we are not useing pct.incidence here as it is overridden by case.dt.fid
run <- runNYearACF(country,pct.incidence = 0.15,case.dt.dif=cases.detected,int.dur = 2,total.dur = 10,fits=fits)
## Calculate and store ICER
out[i,j] <- calcICERFixedCosts(out=run,
eval.times = 1:(10*analytic.horizon + 1),
dtx.cost=cases.detected[country]*cost.per.case,
tx.suc=c(1),
tx.cost = tx.cost.pc[country],
tx.cost.partial = tx.cost.partial.pc[country],
tx.cost.mdr = tx.cost.mdr.pc[country],
pct.mdr= pct.mdr.pc[country],
tx.cost.partial.mdr = tx.cost.partial.mdr[country],
params=fits[[country]]$params)[2]
}
## next paramater value
}
## now for costs
tx.costs <- seq(min.tx.costs,max.tx.costs,length=seq.lengths)
mdr.tx.costs <- seq(min.mdr.tx.costs,max.mdr.tx.costs,length=seq.lengths)
for (i in 1:seq.lengths){
run <- runNYearACF(country,pct.incidence = 0.15,case.dt.dif=cases.detected,int.dur = 2,total.dur = 10,fits=fits.orig)
## Calculate and store ICER
out[i,j+1] <- calcICERFixedCosts(out=run,
eval.times = 1:(10*analytic.horizon + 1),
dtx.cost=cases.detected[country]*cost.per.case,
tx.suc=c(1),
tx.cost = tx.costs[i],
tx.cost.partial = tx.cost.partial.pc[country],
tx.cost.mdr = tx.cost.mdr.pc[country],
pct.mdr= pct.mdr.pc[country],
tx.cost.partial.mdr = tx.cost.partial.mdr[country],
params=fits[[country]]$params)[2]
out[i,j+2] <- calcICERFixedCosts(out=run,
eval.times = 1:(10*analytic.horizon + 1),
dtx.cost=cases.detected[country]*cost.per.case,
tx.suc=c(1),
tx.cost = tx.cost.pc[country],
tx.cost.partial = tx.cost.partial.pc[country],
tx.cost.mdr = mdr.tx.costs[i],
pct.mdr= pct.mdr.pc[country],
tx.cost.partial.mdr = tx.cost.partial.mdr[country],
params=fits[[country]]$params)[2]
}
param.array <- cbind(param.array,tx.costs,mdr.tx.costs)
list(out,param.array)
}
##' @param sens.mat
##' @param param.array
##' @param country string of country
##' @param fits.orig
##' @param analytic.horizon
##' @param cost.per.case
##' @param lwd
##' @param top.n.params
##' @return pdf of tornado plot
##' @author Andrew Azman
makeTornadoPlot <- function(sens.mat,
param.array,
country,
fits.orig,
analytic.horizon,
cost.per.case,
lwd=10,
top.n.params=10){
param.index.names <- rep(names(fits.orig[[country]]$params),each=4)
param.names <- as.matrix(read.csv("Data/param_names.csv",as.is=T,header=F))
param.names <- paste0(rep(param.names,each=4)," [",0:3,"]")
up.funcs <- makeUpFuncs() # get functions that help update parameters
true.params <-1 - sapply(up.funcs,function(x) all.equal(c(do.call(rbind,x(fits[[country]]$params,-10))),c(do.call(rbind,fits[[country]]$params))) == TRUE)
true.param.index <- which(true.params == 1)
original.values <- c(c(t(do.call(rbind,fits[[country]]$params)))[true.param.index],tx.cost.pc[country],tx.cost.mdr.pc[country])
pdf(sprintf("Figures/oneway_sens_%s_%.fyr_%.fusd.pdf",country,analytic.horizon,cost.per.case),width=5,height=4)
out <- sens.mat
run <- runNYearACF(country,pct.incidence = 0.5,
case.dt.dif=case.dt.dif,int.dur = 2,total.dur = 10,fits=fits.orig)
icer.orig <- calcICERFixedCosts(out=run,
eval.times = 1:(10*analytic.horizon + 1),
dtx.cost=case.dt.dif[country]*cost.per.case,
tx.suc=c(1),
tx.cost = tx.cost.pc[country],
tx.cost.partial = tx.cost.partial.pc[country],
tx.cost.mdr = tx.cost.mdr.pc[country],
pct.mdr= pct.mdr.pc[country],
tx.cost.partial.mdr = tx.cost.partial.mdr[country],
params=fits.orig[[country]]$params)[2]
cat(print(icer.orig))
layout(matrix(c(1,1,1,2,2,2,2,2,1,1,1,2,2,2,2,2),nrow=2,byrow=T))
par(mar=c(4.5,1,0,0))
xlims <- c(min(out),max(out))
plot(-100,-100,xlim=xlims,ylim=c(0,1),bty="n",yaxt="n",ylab="",xlab="Cost per DALY Averted (USD)")#ncol(param.array)))
abline(v=icer.orig,col="grey",lty=2)
## sort by extremes
param.order <- order(apply(out,2,function(x) range(x)[2] - range(x)[1]))
sorted.out <- out[,param.order]
y.increment <- 1/min(ncol(out),top.n.params)
start.iter <- ifelse(top.n.params > ncol(out),1,ncol(out) - top.n.params) # do we start the below iterations from the lowest params?
for (param in start.iter:ncol(out)){
tmp.out <- sorted.out[,param]
greater.than.orig <- param.array[,param] > original.values[param]
extremes <- range(tmp.out)
print(range(tmp.out))
max.col <- ifelse(greater.than.orig[which.max(tmp.out)],"red","blue")
min.col <- ifelse(max.col == "red","blue","red")
lines(x=c(extremes[1],icer.orig),y=c((param-start.iter)*y.increment,(param-start.iter)*y.increment),lwd=lwd,lend="butt",col=min.col)
lines(x=c(icer.orig,extremes[2]),y=c((param-start.iter)*y.increment,(param-start.iter)*y.increment),lwd=lwd,lend="butt",col=max.col)
}
text(par("usr")[2]-par("usr")[2]*.1,.1,"High Value",col="red",cex=1)
text(par("usr")[2]-par("usr")[2]*.1,.14,"Low Value",col="blue",cex=1)
## plot ranges for each
## plot(-100,-100,axes=F,bty="n",xlim=c(-1,1),ylim=c(0,1),xlab="",ylab="")
ranges <- apply(param.array,2,range)
ranges <- apply(ranges,2,function(x) sprintf("(%.2f,%.2f)",x[1],x[2]))
## for (param in 1:ncol(out)) text(.5,(param-start.iter)*y.increment,ranges[param.order[param]],cex=1.1)
## plot names of each
par(mar=c(4.5,0,0,0))
plot(-100,-100,axes=F,bty="n",xlim=c(-1,1),ylim=c(0,1),xlab="",ylab="")
for (param in 1:ncol(out)) text(1,(param-start.iter)*y.increment,
sprintf("%s %s",param.names[true.param.index[param.order[param]]],ranges[param.order[param]]),cex=.9,pos=4,offset=-22)
dev.off()
}
##' @param nsims
##' @param country
##' @param param_range_file
##' @param output_file
##' @return saves (1) list of run outputs and (2) list of parameters lists
##' @author Andrew Azman
runLHS <- function(nsims=10,
country="sa",
param_range_prefix="uncer_ranges_",
output_file_prefix="uncer_out",
case.dt.dif=case.dt.dif,
orig.fits=fits,
per.person.dx.cost=seq(1000,35000,length=300)
){
require(tgp)
## load in transformation functiosn that deal with dependent params
up.funcs <- makeUpFuncs()
params.minmax <- as.matrix(read.csv(paste0("Data/",param_range_prefix,country,".csv"),row.names=1),ncol=4)
true.params <-1 -sapply(up.funcs,function(x) all.equal(
unlist(x(orig.fits[[country]]$params,-10)),
unlist(orig.fits[[country]]$params)) == TRUE)
true.param.index <- which(true.params == 1)
param.names <- paste0(rep(names(orig.fits[[country]]$params),each=4),
rep(c("_n","_h","_hArt","_hNoART"),
length(orig.fits[[country]]$params)))
## make the lhs draws
lhs.draws <- lhs(n=nsims,
params.minmax[,2:3],
shape=rep(3,nrow(params.minmax)),
mode=params.minmax[,1])
runs <- list("vector",nsims)
new.params <- list("vector",nsims)
## Run a two year ACF and store the results only if
## I don't think we are doing the following anymore but left the comment in:
## incidence in baseline scenario at year 10 is orig.I <= I_10 <= orig.I*.5
for (i in 1:nrow(lhs.draws)){
if (i %% 100 == 0) cat(".")
## make the parameter list
new.params[[i]] <- updateParams(new.values=lhs.draws[i,],
param.indices=true.param.index,
countr=country,
fits=orig.fits)
tmp.fits <- orig.fits
(tmp.fits[[country]]$params <- new.params[[i]])
runs[[i]] <- runNYearACF(country,
pct.incidence=.15,
case.dt.dif=case.dt.dif,
int.dur = 2,
total.dur = 10,
fits=tmp.fits)
}
## going to store as a list of runs
unix.time.stamp <- sprintf("%.0f",as.numeric(Sys.time()))
save(runs,file=paste0(output_file_prefix,"_",country,"_runs_",unix.time.stamp,".rda"))
save(new.params,file=paste0(output_file_prefix,"_",country,"_params_",unix.time.stamp,".rda"))
save(lhs.draws,file=paste0(output_file_prefix,"_",country,"_lhsdraws_",unix.time.stamp,".rda")) #this is a matrix of the LHS samples and includes the cost
## save(runs,file=paste0(output_file_prefix,"_",country,"_runs_",Sys.Date(),".rda"))
## save(new.params,file=paste0(output_file_prefix,"_",country,"_params_",Sys.Date(),".rda"))
## save(lhs.draws,file=paste0(output_file_prefix,"_",country,"_lhsdraws_",Sys.Date(),".rda")) #this is a matrix of the LHS samples and includes the cost
horizons <- c(2,5,10)
out <- array(dim=c(300,3,nsims))
print(" \n post-processing \n")
for (i in 1:nsims){
cat("*")
for (h in seq_along(horizons)){
for (t in seq_along(per.person.dx.cost)){
out[t,h,i] <-
calcICERFixedCosts(out=runs[[i]],
eval.times = 1:(horizons[h]*10+1),
dtx.cost=case.dt.df[country]*per.person.dx.cost[t],
tx.suc=c(1),
tx.cost = tx.cost.pc[country],
tx.cost.partial = tx.cost.partial.pc[country],
tx.cost.mdr = tx.cost.mdr.pc[country],
pct.mdr= pct.mdr.pc[country],
tx.cost.partial.mdr = tx.cost.partial.mdr[country],
params=new.params[[i]])[2]
}
}
}
save(out,file=paste0(output_file_prefix,"_",country,"_icers_",unix.time.stamp,".rda"))
}
##' Updates the parameter list for us with a set of new values from LHS
##' @param param.indices
##' @param new.values
##' @param country
##' @param fits
##' @return list of params suitable for model runs
##' @author Andrew Azman
updateParams <- function(new.values,param.indices,country,fits){
up.funcs <- makeUpFuncs() # get functions that help update parameters
param.tmp <- fits[[country]]$params
## for each parameter we will sequentiually update the parameter list
## ineffecient but a function of previous code I wrote.
for (i in seq_along(param.indices)){
param.tmp <- up.funcs[[param.indices[i]]](param.tmp,new.values[i])
}
param.tmp
}
##' gets the number of of cases that need to be detected for a number of cases equal to pct.first.yr% of either the
##' projected cases detected in the first year (case.det.based == TRUE), or incidence (case.det.base == FALSE).
##' @param case.det.based
##' @param pct.first.yr
##' @return named vector with number of cases for each country
##' @author Andrew Azman
getIncreasedCasesDetected <- function(case.det.based=TRUE,pct.first.yr=0.25){
if (case.det.based){
## let's try increasing the number of cases detected by x% of the modeled steady state / first year
sa.trial <- runTBHIVMod(fit.sa.2011$params,fit.sa.2011$state,1,var.beta=F)
india.trial <- runTBHIVMod(fit.india.2011$params,fit.india.2011$state,1,var.beta=F)
china.trial <- runTBHIVMod(fit.china.2011$params,fit.china.2011$state,1,var.beta=F)
case.dt.dif <- c("china"=round(sum(tail(china.trial[,grep("N.", colnames(india.trial))],1))*pct.first.yr,0),
"india"=round(sum(tail(india.trial[,grep("N.", colnames(india.trial))],1))*pct.first.yr,0),
"sa"=round(sum(tail(sa.trial[,grep("N.", colnames(india.trial))],1))*pct.first.yr,0))
} else {
## incidence based
case.dt.dif <- c("china"=round(getWHOStats("China",2011)[,"e_inc_100k"]*pct.first.yr,0),
"india"=round(getWHOStats("India",2011)[,"e_inc_100k"]*pct.first.yr,0),
"sa"=round(getWHOStats("South Africa",2011)[,"e_inc_100k"]*pct.first.yr,0))
}
return(case.dt.dif)
}
|
.refit <- function(object, fitting.method = "quadratic", jackknife.estimation = "quadratic",
asymptotic = TRUE, allowed.fitting = c("quad", "line", "nonl", "logl",
"log2"), allowed.jackknife = c("quad", "line", "nonl", "logl",
FALSE), ...) {
fitting.method <- substr(fitting.method, 1, 4)
if (object$fitting.method == fitting.method)
stop("Model is already fitted with the specified fitting method",
call. = FALSE)
if (!any(fitting.method == allowed.fitting)) {
warning("Fitting method not implemented. Using: quadratic", call. = FALSE)
fitting.method <- "quad"
}
if (jackknife.estimation != FALSE)
jackknife.estimation <- substr(jackknife.estimation, 1, 4)
if (!any(jackknife.estimation == allowed.jackknife)) {
warning("Fitting method (jackknife) not implemented. Using: quadratic",
call. = FALSE)
jackknife.estimation <- "quad"
}
if (!any(names(object) == "variance.jackknife") && jackknife.estimation !=
FALSE) {
warning("Jackknife variance estimation is not possible, due to the lack of it in the supplied model. Will be ignored.",
call. = FALSE)
jackknife.estimation <- FALSE
}
if (!any(names(object) == "variance.asymptotic") && asymptotic) {
warning("Asymptotic variance estimation is not possible, due to the lack of it in the supplied model. Will be ignored.",
call. = FALSE)
asymptotic <- FALSE
}
cl <- class(object)
if (any(names(object) == "variance.asymptotic") && asymptotic == FALSE) {
# removing unwanted parts of the object
object <- object[setdiff(names(object), c("PSI", "c11", "a11",
"sigma", "sigma.gamma", "g", "s", "variance.asymptotic"))]
}
if (any(names(object) == "variance.jackknife") && jackknife.estimation ==
FALSE) {
# removing unwanted parts of the object
object <- object[setdiff(names(object), c("extrapolation.variance",
"variance.jackknife", "variance.jackknife.lambda"))]
}
class(object) <- cl
estimates <- object$SIMEX.estimates[-1, -1]
lambda <- object$lambda
ncoef <- length(coef(object))
ndes <- dim(object$model$model)[1]
p.names <- names(coef(object))
SIMEX.estimate <- vector(mode = "numeric", length = ncoef)
switch(fitting.method, quad = extrapolation <- lm(estimates ~ lambda +
I(lambda^2)), line = extrapolation <- lm(estimates ~ lambda), logl = extrapolation <- lm(I(log(t(t(estimates) +
(abs(apply(estimates, 2, min)) + 1) * (apply(estimates, 2, min) <=
0)))) ~ lambda), log2 = extrapolation <- fit.logl(lambda, p.names,
estimates), nonl = extrapolation <- fit.nls(lambda, p.names, estimates))
# security if nls does not converge
if (any(class(extrapolation) == "lm") && fitting.method == "log2")
fitting.method <- "logl"
# predicting the SIMEX estimate
switch(fitting.method, quad = SIMEX.estimate <- predict(extrapolation,
newdata = data.frame(lambda = -1)), line = SIMEX.estimate <- predict(extrapolation,
newdata = data.frame(lambda = -1)), nonl = for (i in 1:length(p.names)) SIMEX.estimate[i] <- predict(extrapolation[[p.names[i]]],
newdata = data.frame(lambda = -1)), log2 = for (i in 1:length(p.names)) SIMEX.estimate[i] <- predict(extrapolation[[p.names[i]]],
newdata = data.frame(lambda = -1)) - ((abs(apply(estimates, 2,
min)) + 1) * (apply(estimates, 2, min) <= 0))[i], logl = SIMEX.estimate <- exp(predict(extrapolation,
newdata = data.frame(lambda = -1))) - (abs(apply(estimates, 2,
min)) + 1) * (apply(estimates, 2, min) <= 0))
# jackknife estimation
if (jackknife.estimation != FALSE) {
variance.jackknife <- object$variance.jackknife.lambda[-1, -1]
switch(jackknife.estimation, quad = extrapolation.variance <- lm(variance.jackknife ~
lambda + I(lambda^2)), line = extrapolation.variance <- lm(variance.jackknife ~
lambda), logl = extrapolation.variance <- lm(I(log(t(t(variance.jackknife) +
(abs(apply(variance.jackknife, 2, min)) + 1) * (apply(variance.jackknife,
2, min) <= 0)))) ~ lambda), nonl = extrapolation.variance <- fit.nls(lambda,
1:NCOL(variance.jackknife), variance.jackknife))
# variance.jackknife <- rbind(predict(extrapolation.variance, newdata =
# data.frame(lambda = -1)), variance.jackknife)
variance.jackknife2 <- vector("numeric", ncoef^2)
switch(jackknife.estimation, nonl = for (i in 1:NCOL(variance.jackknife)) variance.jackknife2[i] <- predict(extrapolation.variance[[i]],
newdata = data.frame(lambda = -1)), quad = variance.jackknife2 <- predict(extrapolation.variance,
newdata = data.frame(lambda = -1)), line = variance.jackknife2 <- predict(extrapolation.variance,
newdata = data.frame(lambda = -1)), logl = variance.jackknife2 <- exp(predict(extrapolation.variance,
newdata = data.frame(lambda = -1))) - (abs(apply(variance.jackknife,
2, min)) + 1) * (apply(variance.jackknife, 2, min) <= 0))
variance.jackknife <- rbind(variance.jackknife2, variance.jackknife)
variance.jackknife.lambda <- cbind(c(-1, lambda), variance.jackknife)
variance.jackknife <- matrix(variance.jackknife[1, ], nrow = ncoef,
ncol = ncoef, byrow = TRUE)
dimnames(variance.jackknife) <- list(p.names, p.names)
object$variance.jackknife.lambda <- variance.jackknife.lambda
object$variance.jackknife <- variance.jackknife
object$extrapolation.variance <- extrapolation.variance
}
if (asymptotic) {
sigma <- object$sigma
s <- construct.s(ncoef, lambda, fitting.method, extrapolation)
d.inv <- solve(s %*% t(s))
sigma.gamma <- d.inv %*% s %*% sigma %*% t(s) %*% d.inv
g <- list()
switch(fitting.method, quad = g <- c(1, -1, 1), line = g <- c(1,
-1), logl = for (i in 1:ncoef) g[[i]] <- c(exp(coef(extrapolation)[1,
i] - coef(extrapolation)[2, i]), -exp(coef(extrapolation)[1,
i] - coef(extrapolation)[2, i])), log2 = for (i in 1:ncoef) g[[i]] <- c(exp(coef(extrapolation[[i]])[1] -
coef(extrapolation[[i]])[2]), -exp(coef(extrapolation[[i]])[1] -
coef(extrapolation[[i]])[2])), nonl = for (i in 1:ncoef) g[[i]] <- c(-1,
-(coef(extrapolation[[i]])[3] - 1)^-1, coef(extrapolation[[i]])[2]/(coef(extrapolation[[i]])[3] -
1)^2))
g <- diag.block(g, ncoef)
variance.asymptotic <- (t(g) %*% sigma.gamma %*% g)/ndes
dimnames(variance.asymptotic) <- list(p.names, p.names)
object$sigma.gamma <- sigma.gamma
object$g <- g
object$s <- s
object$variance.asymptotic <- variance.asymptotic
}
object$call$fitting.method <- fitting.method
object$call$jackknife.estimation <- jackknife.estimation
object$call$asymptotic <- asymptotic
object$SIMEX.estimates[1, ] <- c(-1, SIMEX.estimate)
object$coefficients <- as.vector(SIMEX.estimate)
names(object$coefficients) <- p.names
fitted.values <- predict(object, newdata = object$model$model[, -1,
drop = FALSE], type = "response")
object$fitted.values <- fitted.values
if (is.factor(object$model$model[, 1]))
object$residuals <- as.numeric(levels(object$model$model[, 1]))[object$model$model[,
1]] - fitted.values else object$model$model[, 1] - fitted.values
object$extrapolation <- extrapolation
return(object)
}
| /R/simex-internal.R | no_license | cran/simex | R | false | false | 14,350 | r | .refit <- function(object, fitting.method = "quadratic", jackknife.estimation = "quadratic",
asymptotic = TRUE, allowed.fitting = c("quad", "line", "nonl", "logl",
"log2"), allowed.jackknife = c("quad", "line", "nonl", "logl",
FALSE), ...) {
fitting.method <- substr(fitting.method, 1, 4)
if (object$fitting.method == fitting.method)
stop("Model is already fitted with the specified fitting method",
call. = FALSE)
if (!any(fitting.method == allowed.fitting)) {
warning("Fitting method not implemented. Using: quadratic", call. = FALSE)
fitting.method <- "quad"
}
if (jackknife.estimation != FALSE)
jackknife.estimation <- substr(jackknife.estimation, 1, 4)
if (!any(jackknife.estimation == allowed.jackknife)) {
warning("Fitting method (jackknife) not implemented. Using: quadratic",
call. = FALSE)
jackknife.estimation <- "quad"
}
if (!any(names(object) == "variance.jackknife") && jackknife.estimation !=
FALSE) {
warning("Jackknife variance estimation is not possible, due to the lack of it in the supplied model. Will be ignored.",
call. = FALSE)
jackknife.estimation <- FALSE
}
if (!any(names(object) == "variance.asymptotic") && asymptotic) {
warning("Asymptotic variance estimation is not possible, due to the lack of it in the supplied model. Will be ignored.",
call. = FALSE)
asymptotic <- FALSE
}
cl <- class(object)
if (any(names(object) == "variance.asymptotic") && asymptotic == FALSE) {
# removing unwanted parts of the object
object <- object[setdiff(names(object), c("PSI", "c11", "a11",
"sigma", "sigma.gamma", "g", "s", "variance.asymptotic"))]
}
if (any(names(object) == "variance.jackknife") && jackknife.estimation ==
FALSE) {
# removing unwanted parts of the object
object <- object[setdiff(names(object), c("extrapolation.variance",
"variance.jackknife", "variance.jackknife.lambda"))]
}
class(object) <- cl
estimates <- object$SIMEX.estimates[-1, -1]
lambda <- object$lambda
ncoef <- length(coef(object))
ndes <- dim(object$model$model)[1]
p.names <- names(coef(object))
SIMEX.estimate <- vector(mode = "numeric", length = ncoef)
switch(fitting.method, quad = extrapolation <- lm(estimates ~ lambda +
I(lambda^2)), line = extrapolation <- lm(estimates ~ lambda), logl = extrapolation <- lm(I(log(t(t(estimates) +
(abs(apply(estimates, 2, min)) + 1) * (apply(estimates, 2, min) <=
0)))) ~ lambda), log2 = extrapolation <- fit.logl(lambda, p.names,
estimates), nonl = extrapolation <- fit.nls(lambda, p.names, estimates))
# security if nls does not converge
if (any(class(extrapolation) == "lm") && fitting.method == "log2")
fitting.method <- "logl"
# predicting the SIMEX estimate
switch(fitting.method, quad = SIMEX.estimate <- predict(extrapolation,
newdata = data.frame(lambda = -1)), line = SIMEX.estimate <- predict(extrapolation,
newdata = data.frame(lambda = -1)), nonl = for (i in 1:length(p.names)) SIMEX.estimate[i] <- predict(extrapolation[[p.names[i]]],
newdata = data.frame(lambda = -1)), log2 = for (i in 1:length(p.names)) SIMEX.estimate[i] <- predict(extrapolation[[p.names[i]]],
newdata = data.frame(lambda = -1)) - ((abs(apply(estimates, 2,
min)) + 1) * (apply(estimates, 2, min) <= 0))[i], logl = SIMEX.estimate <- exp(predict(extrapolation,
newdata = data.frame(lambda = -1))) - (abs(apply(estimates, 2,
min)) + 1) * (apply(estimates, 2, min) <= 0))
# jackknife estimation
if (jackknife.estimation != FALSE) {
variance.jackknife <- object$variance.jackknife.lambda[-1, -1]
switch(jackknife.estimation, quad = extrapolation.variance <- lm(variance.jackknife ~
lambda + I(lambda^2)), line = extrapolation.variance <- lm(variance.jackknife ~
lambda), logl = extrapolation.variance <- lm(I(log(t(t(variance.jackknife) +
(abs(apply(variance.jackknife, 2, min)) + 1) * (apply(variance.jackknife,
2, min) <= 0)))) ~ lambda), nonl = extrapolation.variance <- fit.nls(lambda,
1:NCOL(variance.jackknife), variance.jackknife))
# variance.jackknife <- rbind(predict(extrapolation.variance, newdata =
# data.frame(lambda = -1)), variance.jackknife)
variance.jackknife2 <- vector("numeric", ncoef^2)
switch(jackknife.estimation, nonl = for (i in 1:NCOL(variance.jackknife)) variance.jackknife2[i] <- predict(extrapolation.variance[[i]],
newdata = data.frame(lambda = -1)), quad = variance.jackknife2 <- predict(extrapolation.variance,
newdata = data.frame(lambda = -1)), line = variance.jackknife2 <- predict(extrapolation.variance,
newdata = data.frame(lambda = -1)), logl = variance.jackknife2 <- exp(predict(extrapolation.variance,
newdata = data.frame(lambda = -1))) - (abs(apply(variance.jackknife,
2, min)) + 1) * (apply(variance.jackknife, 2, min) <= 0))
variance.jackknife <- rbind(variance.jackknife2, variance.jackknife)
variance.jackknife.lambda <- cbind(c(-1, lambda), variance.jackknife)
variance.jackknife <- matrix(variance.jackknife[1, ], nrow = ncoef,
ncol = ncoef, byrow = TRUE)
dimnames(variance.jackknife) <- list(p.names, p.names)
object$variance.jackknife.lambda <- variance.jackknife.lambda
object$variance.jackknife <- variance.jackknife
object$extrapolation.variance <- extrapolation.variance
}
if (asymptotic) {
sigma <- object$sigma
s <- construct.s(ncoef, lambda, fitting.method, extrapolation)
d.inv <- solve(s %*% t(s))
sigma.gamma <- d.inv %*% s %*% sigma %*% t(s) %*% d.inv
g <- list()
switch(fitting.method, quad = g <- c(1, -1, 1), line = g <- c(1,
-1), logl = for (i in 1:ncoef) g[[i]] <- c(exp(coef(extrapolation)[1,
i] - coef(extrapolation)[2, i]), -exp(coef(extrapolation)[1,
i] - coef(extrapolation)[2, i])), log2 = for (i in 1:ncoef) g[[i]] <- c(exp(coef(extrapolation[[i]])[1] -
coef(extrapolation[[i]])[2]), -exp(coef(extrapolation[[i]])[1] -
coef(extrapolation[[i]])[2])), nonl = for (i in 1:ncoef) g[[i]] <- c(-1,
-(coef(extrapolation[[i]])[3] - 1)^-1, coef(extrapolation[[i]])[2]/(coef(extrapolation[[i]])[3] -
1)^2))
g <- diag.block(g, ncoef)
variance.asymptotic <- (t(g) %*% sigma.gamma %*% g)/ndes
dimnames(variance.asymptotic) <- list(p.names, p.names)
object$sigma.gamma <- sigma.gamma
object$g <- g
object$s <- s
object$variance.asymptotic <- variance.asymptotic
}
object$call$fitting.method <- fitting.method
object$call$jackknife.estimation <- jackknife.estimation
object$call$asymptotic <- asymptotic
object$SIMEX.estimates[1, ] <- c(-1, SIMEX.estimate)
object$coefficients <- as.vector(SIMEX.estimate)
names(object$coefficients) <- p.names
fitted.values <- predict(object, newdata = object$model$model[, -1,
drop = FALSE], type = "response")
object$fitted.values <- fitted.values
if (is.factor(object$model$model[, 1]))
object$residuals <- as.numeric(levels(object$model$model[, 1]))[object$model$model[,
1]] - fitted.values else object$model$model[, 1] - fitted.values
object$extrapolation <- extrapolation
return(object)
}
|
\docType{package}
\name{ruca-package}
\alias{ruca}
\alias{ruca-package}
\title{Rural-Urban Commuting Area Codes}
\description{
Rural-Urban Commuting Area Codes
}
\details{
Given a postal code, will determine the urbanicity of
that region based upon Rural Health Research Center's
Rural-Urban Commuting Area Codes (RUCAs).
}
\author{
\email{jason@bryer.org}
}
\keyword{package}
\keyword{ruca}
\keyword{urbanization}
| /man/ruca-package.Rd | no_license | Eemaa26/ruca | R | false | false | 426 | rd | \docType{package}
\name{ruca-package}
\alias{ruca}
\alias{ruca-package}
\title{Rural-Urban Commuting Area Codes}
\description{
Rural-Urban Commuting Area Codes
}
\details{
Given a postal code, will determine the urbanicity of
that region based upon Rural Health Research Center's
Rural-Urban Commuting Area Codes (RUCAs).
}
\author{
\email{jason@bryer.org}
}
\keyword{package}
\keyword{ruca}
\keyword{urbanization}
|
# Exercise 1: working with data frames (review)
# Install devtools package: allows installations from GitHub
install.packages("devtools")
install.packages("dplyr")
library("dplyr")
# Install "fueleconomy" dataset from GitHub
devtools::install_github("hadley/fueleconomy")
# Use the `libary()` function to load the "fueleconomy" package
library(fueleconomy)
# You should now have access to the `vehicles` data frame
# You can use `View()` to inspect it
View(vehicles)
# Select the different manufacturers (makes) of the cars in this data set.
# Save this vector in a variable
makes <- vehicles$make
# Use the `unique()` function to determine how many different car manufacturers
# are represented by the data set
length(unique(makes))
# Filter the data set for vehicles manufactured in 1997
vehicles_1997 <- vehicles[vehicles$year == 1997, ]
# Arrange the 1997 cars by highway (`hwy`) gas milage
# Hint: use the `order()` function to get a vector of indices in order by value
# See also:
# https://www.r-bloggers.com/r-sorting-a-data-frame-by-the-contents-of-a-column/
# Mutate the 1997 cars data frame to add a column `average` that has the average
# gas milage (between city and highway mpg) for each car
vehicles_1997$average <- (vehicles_1997$hwy + vehicles_1997cty) / 2
View(vehicles_1997)
# Filter the whole vehicles data set for 2-Wheel Drive vehicles that get more
# than 20 miles/gallon in the city.
# Save this new data frame in a variable.
vehicles_2wd <- vehicles[vehicles$drive == "2-Wheel Drive", ]
efficient_2wd <- vehicles_2wd[vehicles_2wd$cty > 20, ]
# Of the above vehicles, what is the vehicle ID of the vehicle with the worst
# hwy mpg?
# Hint: filter for the worst vehicle, then select its ID.
#vehicles_2wd$id
#vehicles_2wd$hwy
vehicles_2wd[vehicles_2wd$hwy == min(vehicles_2wd$hwy), "id" ]
# Write a function that takes a `year_choice` and a `make_choice` as parameters,
# and returns the vehicle model that gets the most hwy miles/gallon of vehicles
# of that make in that year.
# You'll need to filter more (and do some selecting)!
select()
# What was the most efficient Honda model of 1995?
| /chapter-11-exercises/exercise-1/exercise.R | permissive | ElsaZhong/book-exercises | R | false | false | 2,137 | r | # Exercise 1: working with data frames (review)
# Install devtools package: allows installations from GitHub
install.packages("devtools")
install.packages("dplyr")
library("dplyr")
# Install "fueleconomy" dataset from GitHub
devtools::install_github("hadley/fueleconomy")
# Use the `libary()` function to load the "fueleconomy" package
library(fueleconomy)
# You should now have access to the `vehicles` data frame
# You can use `View()` to inspect it
View(vehicles)
# Select the different manufacturers (makes) of the cars in this data set.
# Save this vector in a variable
makes <- vehicles$make
# Use the `unique()` function to determine how many different car manufacturers
# are represented by the data set
length(unique(makes))
# Filter the data set for vehicles manufactured in 1997
vehicles_1997 <- vehicles[vehicles$year == 1997, ]
# Arrange the 1997 cars by highway (`hwy`) gas milage
# Hint: use the `order()` function to get a vector of indices in order by value
# See also:
# https://www.r-bloggers.com/r-sorting-a-data-frame-by-the-contents-of-a-column/
# Mutate the 1997 cars data frame to add a column `average` that has the average
# gas milage (between city and highway mpg) for each car
vehicles_1997$average <- (vehicles_1997$hwy + vehicles_1997cty) / 2
View(vehicles_1997)
# Filter the whole vehicles data set for 2-Wheel Drive vehicles that get more
# than 20 miles/gallon in the city.
# Save this new data frame in a variable.
vehicles_2wd <- vehicles[vehicles$drive == "2-Wheel Drive", ]
efficient_2wd <- vehicles_2wd[vehicles_2wd$cty > 20, ]
# Of the above vehicles, what is the vehicle ID of the vehicle with the worst
# hwy mpg?
# Hint: filter for the worst vehicle, then select its ID.
#vehicles_2wd$id
#vehicles_2wd$hwy
vehicles_2wd[vehicles_2wd$hwy == min(vehicles_2wd$hwy), "id" ]
# Write a function that takes a `year_choice` and a `make_choice` as parameters,
# and returns the vehicle model that gets the most hwy miles/gallon of vehicles
# of that make in that year.
# You'll need to filter more (and do some selecting)!
select()
# What was the most efficient Honda model of 1995?
|
testlist <- list(a = 753170038L, b = 1981501696L, x = integer(0))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610056516-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 126 | r | testlist <- list(a = 753170038L, b = 1981501696L, x = integer(0))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{diagnostic.plots}
\alias{diagnostic.plots}
\title{R function plotting histograms and scatter plots of data, residuals and Z-scores}
\usage{
diagnostic.plots(data, plot.options = c("matrix", "singles", "none"))
}
\arguments{
\item{data}{is a dataframe with aggregated number of reported deaths, baseline, Zscores}
\item{plot.options}{selects for output graph type, default is matrix}
}
\description{
R function plotting histograms and scatter plots of data, residuals and Z-scores
}
| /man/diagnostic.plots.Rd | no_license | thl-mjv/euromomo | R | false | false | 543 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{diagnostic.plots}
\alias{diagnostic.plots}
\title{R function plotting histograms and scatter plots of data, residuals and Z-scores}
\usage{
diagnostic.plots(data, plot.options = c("matrix", "singles", "none"))
}
\arguments{
\item{data}{is a dataframe with aggregated number of reported deaths, baseline, Zscores}
\item{plot.options}{selects for output graph type, default is matrix}
}
\description{
R function plotting histograms and scatter plots of data, residuals and Z-scores
}
|
\name{hc_polygon-dispatch}
\alias{hc_polygon}
\title{
Method dispatch page for hc_polygon
}
\description{
Method dispatch page for \code{hc_polygon}.
}
\section{Dispatch}{
\code{hc_polygon} can be dispatched on following classes:
\itemize{
\item \code{\link{hc_polygon,GenomicHilbertCurve-method}}, \code{\link{GenomicHilbertCurve-class}} class method
\item \code{\link{hc_polygon,HilbertCurve-method}}, \code{\link{HilbertCurve-class}} class method
}
}
\examples{
# no example
NULL
}
| /man/hc_polygon-dispatch.rd | permissive | jokergoo/HilbertCurve | R | false | false | 488 | rd | \name{hc_polygon-dispatch}
\alias{hc_polygon}
\title{
Method dispatch page for hc_polygon
}
\description{
Method dispatch page for \code{hc_polygon}.
}
\section{Dispatch}{
\code{hc_polygon} can be dispatched on following classes:
\itemize{
\item \code{\link{hc_polygon,GenomicHilbertCurve-method}}, \code{\link{GenomicHilbertCurve-class}} class method
\item \code{\link{hc_polygon,HilbertCurve-method}}, \code{\link{HilbertCurve-class}} class method
}
}
\examples{
# no example
NULL
}
|
\name{mediator}
\alias{mediator}
\title{Simple mediator analysis and graph}
\description{
A function that conducts a simple mediation analysis and makes the figure
shown in Wright and London (2009). }
\usage{
mediator(x, y, m, ...)
}
\arguments{
\item{x}{ The predictor variable }
\item{y}{ The response variable }
\item{m}{ The mediator }
\item{\dots}{ Other arguments }
}
\value{
The graph is the main output.
}
\author{Daniel B. Wright}
\note{
There are other mediation packages. This was shown in Wright and London
to illustrate how to make a function. It does not do anything particularly
novel or clever.
}
\examples{
set.seed(143)
leaflet <- rep(c(0,1),each=50)
fairskin <- rbinom(100,1,.5)
likely <- rbinom(100,10,.20 + .2*leaflet + .2*fairskin)
plan <- rbinom(100,7,likely/15+leaflet*.2)
mediator(leaflet,plan,likely)
}
| /man/mediator.Rd | no_license | cran/mrt | R | false | false | 877 | rd | \name{mediator}
\alias{mediator}
\title{Simple mediator analysis and graph}
\description{
A function that conducts a simple mediation analysis and makes the figure
shown in Wright and London (2009). }
\usage{
mediator(x, y, m, ...)
}
\arguments{
\item{x}{ The predictor variable }
\item{y}{ The response variable }
\item{m}{ The mediator }
\item{\dots}{ Other arguments }
}
\value{
The graph is the main output.
}
\author{Daniel B. Wright}
\note{
There are other mediation packages. This was shown in Wright and London
to illustrate how to make a function. It does not do anything particularly
novel or clever.
}
\examples{
set.seed(143)
leaflet <- rep(c(0,1),each=50)
fairskin <- rbinom(100,1,.5)
likely <- rbinom(100,10,.20 + .2*leaflet + .2*fairskin)
plan <- rbinom(100,7,likely/15+leaflet*.2)
mediator(leaflet,plan,likely)
}
|
allIdentical <- function(...) {
x <- list(...)
if(length(x)==1) x <- x[[1L]]
stopifnot(length(x)>=2L)
res <- identical(x[[1]], x[[2]])
if(length(x)>2) {
for(i in 3:length(x)) {
res <- identical(x[[i]], x[[i-1]]) && res
if(!res) return(FALSE)
}
}
return(res)
}
| /ribiosUtils/R/allIdentical.R | no_license | RCBiczok/ribios | R | false | false | 294 | r | allIdentical <- function(...) {
x <- list(...)
if(length(x)==1) x <- x[[1L]]
stopifnot(length(x)>=2L)
res <- identical(x[[1]], x[[2]])
if(length(x)>2) {
for(i in 3:length(x)) {
res <- identical(x[[i]], x[[i-1]]) && res
if(!res) return(FALSE)
}
}
return(res)
}
|
test_that("fix_windows_url works properly", {
testthat::skip_if_not(is_windows())
# Should add file:/// to file paths
expect_equal(
suppressWarnings(fix_windows_url("c:/path/file.html")),
"file:///c:/path/file.html"
)
expect_equal(
suppressWarnings(fix_windows_url("c:\\path\\file.html")),
"file:///c:/path/file.html"
)
# Currently disabled because I'm not sure exactly should happen when there's
# not a leading drive letter like "c:"
# expect_equal(fix_windows_url("/path/file.html"), "file:///c:/path/file.html")
# expect_equal(fix_windows_url("\\path\\file.html"), "file:///c:/path/file.html")
# expect_equal(fix_windows_url("/path\\file.html"), "file:///c:/path/file.html")
# Shouldn't affect proper URLs
expect_equal(fix_windows_url("file:///c:/path/file.html"), "file:///c:/path/file.html")
expect_equal(fix_windows_url("http://x.org/file.html"), "http://x.org/file.html")
})
| /tests/testthat/test-url.R | no_license | wch/webshot | R | false | false | 930 | r | test_that("fix_windows_url works properly", {
testthat::skip_if_not(is_windows())
# Should add file:/// to file paths
expect_equal(
suppressWarnings(fix_windows_url("c:/path/file.html")),
"file:///c:/path/file.html"
)
expect_equal(
suppressWarnings(fix_windows_url("c:\\path\\file.html")),
"file:///c:/path/file.html"
)
# Currently disabled because I'm not sure exactly should happen when there's
# not a leading drive letter like "c:"
# expect_equal(fix_windows_url("/path/file.html"), "file:///c:/path/file.html")
# expect_equal(fix_windows_url("\\path\\file.html"), "file:///c:/path/file.html")
# expect_equal(fix_windows_url("/path\\file.html"), "file:///c:/path/file.html")
# Shouldn't affect proper URLs
expect_equal(fix_windows_url("file:///c:/path/file.html"), "file:///c:/path/file.html")
expect_equal(fix_windows_url("http://x.org/file.html"), "http://x.org/file.html")
})
|
## Array Metadata interface from R
##
## Fundamentally we have two access methods, one 'simple' just stating
## a URI (so repeated and/or remote access is more costly) and one
## 'direct' using an external pointer. The wrappers here switch
## accordingly
setup <- function(tmp, verbose=FALSE) {
if (verbose) cat("Using ", tmp, "\n")
if (dir.exists(tmp)) unlink(tmp, recursive = TRUE, force = TRUE)
dim <- tiledb_dim("dim", domain = c(1L, 4L))
dom <- tiledb_domain(c(dim))
a1 <- tiledb_attr("a1", type = "INT32")
a2 <- tiledb_attr("a2", type = "INT32")
sch <- tiledb_array_schema(dom, c(a1, a2), sparse=TRUE)
tiledb_array_create(tmp, sch)
arr <- tiledb_sparse(tmp, as.data.frame=FALSE)
arrW <- tiledb:::libtiledb_array_open(arr@ptr, "WRITE")
tiledb:::put_metadata(arrW, "vec", c(1.1, 2.2, 3.3))
arrW <- tiledb:::libtiledb_array_open(arr@ptr, "WRITE")
tiledb:::put_metadata(arrW, "txt", "the quick brown fox")
tiledb:::libtiledb_array_close(arrW)
arr
}
.isArray <- function(arr) is(arr, "tiledb_sparse") || is(arr, "tiledb_dense")
.assertArray <- function(arr) stopifnot(is(arr, "tiledb_sparse") || is(arr, "tiledb_dense"))
tiledb_array_open <- function(arr, type=c("READ","WRITE")) {
type <- match.arg(type)
arr@ptr <- tiledb:::libtiledb_array_open(arr@ptr, type)
arr
}
tiledb_array_close <- function(arr) {
tiledb:::libtiledb_array_close(arr@ptr)
arr
}
tiledb_has_metadata <- function(arr, key) {
if (is.character(arr)) {
return(tiledb:::has_metadata_simple(arr, key))
} else if (!.isArray(arr)) {
message("Neither (text) URI nor Array.")
return(NULL)
}
## Now deal with (default) case of an array object
## Check for 'is it open' ?
if (!tiledb:::libtiledb_array_is_open(arr@ptr)) {
stop("Array is not open, cannot access metadata.", call.=FALSE)
}
## Run query
return(tiledb:::has_metadata(arr@ptr, key))
}
tiledb_num_metadata <- function(arr) {
if (is.character(arr)) {
return(tiledb:::num_metadata_simple(arr))
} else if (!.isArray(arr)) {
message("Neither (text) URI nor Array.")
return(NULL)
}
## Now deal with (default) case of an array object
## Check for 'is it open' ?
if (!tiledb:::libtiledb_array_is_open(arr@ptr)) {
stop("Array is not open, cannot access metadata.", call.=FALSE)
}
## Run query
return(tiledb:::num_metadata(arr@ptr))
}
tiledb_get_metadata <- function(arr, key) {
if (is.character(arr)) {
return(tiledb:::get_metadata_simple(arr, key))
} else if (!.isArray(arr)) {
message("Neither (text) URI nor Array.")
return(NULL)
}
## Now deal with (default) case of an array object
## Check for 'is it open' ?
if (!tiledb:::libtiledb_array_is_open(arr@ptr)) {
stop("Array is not open, cannot access metadata.", call.=FALSE)
}
## Run query
return(tiledb:::get_metadata(arr@ptr, key))
}
tiledb_put_metadata <- function(arr, key, val) {
if (is.character(arr)) {
return(tiledb:::put_metadata_simple(arr, key, val))
} else if (!.isArray(arr)) {
message("Neither (text) URI nor Array.")
return(NULL)
}
## Now deal with (default) case of an array object
## Check for 'is it open' ?
if (!tiledb:::libtiledb_array_is_open(arr@ptr)) {
stop("Array is not open, cannot access metadata.", call.=FALSE)
}
## Run query
return(tiledb:::put_metadata(arr@ptr, key, val))
}
library(tiledb)
tmp <- "/tmp/fooarray" #tempfile()
if (!dir.exists(tmp)) {
arr <- setup(tmp, TRUE)
} else {
arr <- tiledb_sparse(tmp, as.data.frame=FALSE)
}
arr <- tiledb_array_open(arr, "READ")
cat("Do we have 'arr::vec': ", ifelse(tiledb_has_metadata(arr, "vec"), "yes", "no"), "\n")
cat("Do we have 'arr::mat': ", ifelse(tiledb_has_metadata(arr, "mat"), "yes", "no"), "\n")
cat("Do we have 'arr::txt': ", ifelse(tiledb_has_metadata(arr, "txt"), "yes", "no"), "\n")
cat("Count for 'arr': ", tiledb_num_metadata(arr), "\n")
cat("Get for 'arr::vec': ", format( tiledb_get_metadata(arr, "vec"), collapse=","), "\n")
arr <- tiledb_array_close(arr)
arr <- tiledb_array_open(arr, "WRITE")
cat("Adding to 'arr': ", tiledb_put_metadata(arr, "foo", "bar"), "\n")
arr <- tiledb_array_close(arr)
arr <- tiledb_array_open(arr, "READ")
cat("Count for 'arr': ", tiledb_num_metadata(arr), "\n")
cat("Done\n")
| /inst/examples/ex_metadata_2.R | permissive | aaronwolen/TileDB-R | R | false | false | 4,276 | r |
## Array Metadata interface from R
##
## Fundamentally we have two access methods, one 'simple' just stating
## a URI (so repeated and/or remote access is more costly) and one
## 'direct' using an external pointer. The wrappers here switch
## accordingly
setup <- function(tmp, verbose=FALSE) {
if (verbose) cat("Using ", tmp, "\n")
if (dir.exists(tmp)) unlink(tmp, recursive = TRUE, force = TRUE)
dim <- tiledb_dim("dim", domain = c(1L, 4L))
dom <- tiledb_domain(c(dim))
a1 <- tiledb_attr("a1", type = "INT32")
a2 <- tiledb_attr("a2", type = "INT32")
sch <- tiledb_array_schema(dom, c(a1, a2), sparse=TRUE)
tiledb_array_create(tmp, sch)
arr <- tiledb_sparse(tmp, as.data.frame=FALSE)
arrW <- tiledb:::libtiledb_array_open(arr@ptr, "WRITE")
tiledb:::put_metadata(arrW, "vec", c(1.1, 2.2, 3.3))
arrW <- tiledb:::libtiledb_array_open(arr@ptr, "WRITE")
tiledb:::put_metadata(arrW, "txt", "the quick brown fox")
tiledb:::libtiledb_array_close(arrW)
arr
}
.isArray <- function(arr) is(arr, "tiledb_sparse") || is(arr, "tiledb_dense")
.assertArray <- function(arr) stopifnot(is(arr, "tiledb_sparse") || is(arr, "tiledb_dense"))
tiledb_array_open <- function(arr, type=c("READ","WRITE")) {
type <- match.arg(type)
arr@ptr <- tiledb:::libtiledb_array_open(arr@ptr, type)
arr
}
tiledb_array_close <- function(arr) {
tiledb:::libtiledb_array_close(arr@ptr)
arr
}
tiledb_has_metadata <- function(arr, key) {
if (is.character(arr)) {
return(tiledb:::has_metadata_simple(arr, key))
} else if (!.isArray(arr)) {
message("Neither (text) URI nor Array.")
return(NULL)
}
## Now deal with (default) case of an array object
## Check for 'is it open' ?
if (!tiledb:::libtiledb_array_is_open(arr@ptr)) {
stop("Array is not open, cannot access metadata.", call.=FALSE)
}
## Run query
return(tiledb:::has_metadata(arr@ptr, key))
}
tiledb_num_metadata <- function(arr) {
if (is.character(arr)) {
return(tiledb:::num_metadata_simple(arr))
} else if (!.isArray(arr)) {
message("Neither (text) URI nor Array.")
return(NULL)
}
## Now deal with (default) case of an array object
## Check for 'is it open' ?
if (!tiledb:::libtiledb_array_is_open(arr@ptr)) {
stop("Array is not open, cannot access metadata.", call.=FALSE)
}
## Run query
return(tiledb:::num_metadata(arr@ptr))
}
tiledb_get_metadata <- function(arr, key) {
if (is.character(arr)) {
return(tiledb:::get_metadata_simple(arr, key))
} else if (!.isArray(arr)) {
message("Neither (text) URI nor Array.")
return(NULL)
}
## Now deal with (default) case of an array object
## Check for 'is it open' ?
if (!tiledb:::libtiledb_array_is_open(arr@ptr)) {
stop("Array is not open, cannot access metadata.", call.=FALSE)
}
## Run query
return(tiledb:::get_metadata(arr@ptr, key))
}
tiledb_put_metadata <- function(arr, key, val) {
if (is.character(arr)) {
return(tiledb:::put_metadata_simple(arr, key, val))
} else if (!.isArray(arr)) {
message("Neither (text) URI nor Array.")
return(NULL)
}
## Now deal with (default) case of an array object
## Check for 'is it open' ?
if (!tiledb:::libtiledb_array_is_open(arr@ptr)) {
stop("Array is not open, cannot access metadata.", call.=FALSE)
}
## Run query
return(tiledb:::put_metadata(arr@ptr, key, val))
}
library(tiledb)
tmp <- "/tmp/fooarray" #tempfile()
if (!dir.exists(tmp)) {
arr <- setup(tmp, TRUE)
} else {
arr <- tiledb_sparse(tmp, as.data.frame=FALSE)
}
arr <- tiledb_array_open(arr, "READ")
cat("Do we have 'arr::vec': ", ifelse(tiledb_has_metadata(arr, "vec"), "yes", "no"), "\n")
cat("Do we have 'arr::mat': ", ifelse(tiledb_has_metadata(arr, "mat"), "yes", "no"), "\n")
cat("Do we have 'arr::txt': ", ifelse(tiledb_has_metadata(arr, "txt"), "yes", "no"), "\n")
cat("Count for 'arr': ", tiledb_num_metadata(arr), "\n")
cat("Get for 'arr::vec': ", format( tiledb_get_metadata(arr, "vec"), collapse=","), "\n")
arr <- tiledb_array_close(arr)
arr <- tiledb_array_open(arr, "WRITE")
cat("Adding to 'arr': ", tiledb_put_metadata(arr, "foo", "bar"), "\n")
arr <- tiledb_array_close(arr)
arr <- tiledb_array_open(arr, "READ")
cat("Count for 'arr': ", tiledb_num_metadata(arr), "\n")
cat("Done\n")
|
pop_size_female <- read.csv('pop_size_corrected.txt') %>%
filter(Age==0) %>% .[,"Female"]
pop_size_male <- read.csv('pop_size_corrected.txt') %>%
filter(Age==0) %>% .[,"Male"]
plot(pop_size_female/pop_size_male)
mean((pop_size_female/pop_size_male)[30:57])
# female_to_male_ratio=0.9489044
| /female_to_male_ratio.R | no_license | mkhlgrv/olg | R | false | false | 297 | r | pop_size_female <- read.csv('pop_size_corrected.txt') %>%
filter(Age==0) %>% .[,"Female"]
pop_size_male <- read.csv('pop_size_corrected.txt') %>%
filter(Age==0) %>% .[,"Male"]
plot(pop_size_female/pop_size_male)
mean((pop_size_female/pop_size_male)[30:57])
# female_to_male_ratio=0.9489044
|
\name{supplc}
\alias{supplc}
\title{
Supplementary Columns in Correspondence Analysis
}
\description{
Using the results of a correspondence analysis, project new columns into the
factor space.
}
\usage{
supplc(a, ca.res)
}
\arguments{
\item{a}{
data matrix to be projected. Must have same number of rows as matrix which
was initially input to the correspondence analysis.
}
\item{ca.res}{
the output of a correspondence analysis. The following components of this
object are used: \code{evals}, \code{rproj} and \code{cproj}.
}}
\value{
a list containing the matrix \code{proj},
projections of the columns of \code{a} on the correspondence analysis factors.
}
\references{
See function \code{ca}.
}
\seealso{
Correspondence analysis: \code{\link{ca}}.
Supplementary rows and columns: \code{\link{supplr}}, \code{\link{supplc}}. Initial data coding:
\code{\link{flou}}, \code{\link{logique}}. Other functions producing objects of class "reddim":
\code{\link{pca}}, \code{\link{sammon}}. Other related functions: \code{\link{prcomp}}, \code{\link{cancor}}, \code{\link{cmdscale}}.
}
\examples{
data(USArrests)
USArrests <- as.matrix(USArrests)
corr <- ca(USArrests[,1:2])
newproj <- supplc(USArrests[,3:4], corr)
# plot of first and second factors, and of supplementary columns:
plot(corr$cproj[,1], corr$cproj[,2],type="n")
text(corr$cproj[,1], corr$cproj[,2])
points(newproj$proj[,1], newproj$proj[,2], col=2)
}
\keyword{multivariate}
\keyword{algebra}
% Converted by Sd2Rd version 0.2-a5.
| /man/supplc.Rd | no_license | cran/multiv | R | false | false | 1,502 | rd | \name{supplc}
\alias{supplc}
\title{
Supplementary Columns in Correspondence Analysis
}
\description{
Using the results of a correspondence analysis, project new columns into the
factor space.
}
\usage{
supplc(a, ca.res)
}
\arguments{
\item{a}{
data matrix to be projected. Must have same number of rows as matrix which
was initially input to the correspondence analysis.
}
\item{ca.res}{
the output of a correspondence analysis. The following components of this
object are used: \code{evals}, \code{rproj} and \code{cproj}.
}}
\value{
a list containing the matrix \code{proj},
projections of the columns of \code{a} on the correspondence analysis factors.
}
\references{
See function \code{ca}.
}
\seealso{
Correspondence analysis: \code{\link{ca}}.
Supplementary rows and columns: \code{\link{supplr}}, \code{\link{supplc}}. Initial data coding:
\code{\link{flou}}, \code{\link{logique}}. Other functions producing objects of class "reddim":
\code{\link{pca}}, \code{\link{sammon}}. Other related functions: \code{\link{prcomp}}, \code{\link{cancor}}, \code{\link{cmdscale}}.
}
\examples{
data(USArrests)
USArrests <- as.matrix(USArrests)
corr <- ca(USArrests[,1:2])
newproj <- supplc(USArrests[,3:4], corr)
# plot of first and second factors, and of supplementary columns:
plot(corr$cproj[,1], corr$cproj[,2],type="n")
text(corr$cproj[,1], corr$cproj[,2])
points(newproj$proj[,1], newproj$proj[,2], col=2)
}
\keyword{multivariate}
\keyword{algebra}
% Converted by Sd2Rd version 0.2-a5.
|
# loading data
source('download_data_and_clean.R')
# opening graphics device
png(filename='plot1.png',width=480,height=480,units='px')
# plotting the data
hist(powerconsumed$GlobalActivePower,main='Global Active Power',xlab='Global Active Power (kilowatts)',col='red')
# closing the graphics device
x<-dev.off() | /plot1.R | no_license | akshaylike/ExData_Plotting1 | R | false | false | 324 | r | # loading data
source('download_data_and_clean.R')
# opening graphics device
png(filename='plot1.png',width=480,height=480,units='px')
# plotting the data
hist(powerconsumed$GlobalActivePower,main='Global Active Power',xlab='Global Active Power (kilowatts)',col='red')
# closing the graphics device
x<-dev.off() |
function(input, output) {
output$plot <- renderPlotly({
p1 <- ggplot(data = zeitdat, aes(x = Station, y = wartezeit)) + geom_boxplot() +
labs(x = "Station", y = "Waiting time in hours") + ggtitle("Distribution waiting time per department") +
theme(plot.margin=unit(c(1.5,1.5,1.5,1.5),"cm")) +
theme(axis.text=element_text(size=18), axis.title=element_text(size=22), plot.title = element_text(size = 24, face = "bold"),
axis.title.x = element_text(margin = margin(t = 20, r = 0, b = 0, l = 0)),
axis.title.y = element_text(margin = margin(t = 0, r = 20, b = 0, l = 0)))
#ppl <- ggplotly(p1, dynamicTicks = FALSE)
print(p1)
})
}
| /Analyse/shinyApp/server.R | no_license | maximizeIT/dhc18 | R | false | false | 691 | r |
function(input, output) {
output$plot <- renderPlotly({
p1 <- ggplot(data = zeitdat, aes(x = Station, y = wartezeit)) + geom_boxplot() +
labs(x = "Station", y = "Waiting time in hours") + ggtitle("Distribution waiting time per department") +
theme(plot.margin=unit(c(1.5,1.5,1.5,1.5),"cm")) +
theme(axis.text=element_text(size=18), axis.title=element_text(size=22), plot.title = element_text(size = 24, face = "bold"),
axis.title.x = element_text(margin = margin(t = 20, r = 0, b = 0, l = 0)),
axis.title.y = element_text(margin = margin(t = 0, r = 20, b = 0, l = 0)))
#ppl <- ggplotly(p1, dynamicTicks = FALSE)
print(p1)
})
}
|
plot.evouniparam <-
function(x, legend = TRUE, legendposi = "topright", axisLABEL = "Tree-based uniqueness", type="b", col = if(is.numeric(x)) NULL else sample(colors(distinct = TRUE), nrow(x$uni)), lty = if(is.numeric(x)) NULL else rep(1, nrow(x$uni)), pch = if(is.numeric(x)) NULL else rep(19, nrow(x$uni)), ...)
{
if(is.numeric(x)){
y <- as.vector(x)
names(y) <- names(x)
dotchart(y, xlab = axisLABEL, ...)
}
if(is.list(x)){
if(length(col)==1) col <- rep(col, nrow(x$uni))
if(length(pch)==1) pch <- rep(pch, nrow(x$uni))
plot(x$q, x$uni[1, ], type = type, col = col[1], ylim = c(min(x$uni), max(x$uni)), pch = pch[1], , ylab=axisLABEL, xlab="q", ...)
for(i in 1:nrow(x$uni)){
lines(x$q, x$uni[i, ], type = type, col = col[i], pch = pch[i], ...)
}
if(legend[1]){
legend(legendposi, legend = rownames(x$uni), col = col, lty = lty, pch = pch, ...)
}
}
}
| /R/plot.evouniparam.R | no_license | cran/adiv | R | false | false | 978 | r | plot.evouniparam <-
function(x, legend = TRUE, legendposi = "topright", axisLABEL = "Tree-based uniqueness", type="b", col = if(is.numeric(x)) NULL else sample(colors(distinct = TRUE), nrow(x$uni)), lty = if(is.numeric(x)) NULL else rep(1, nrow(x$uni)), pch = if(is.numeric(x)) NULL else rep(19, nrow(x$uni)), ...)
{
if(is.numeric(x)){
y <- as.vector(x)
names(y) <- names(x)
dotchart(y, xlab = axisLABEL, ...)
}
if(is.list(x)){
if(length(col)==1) col <- rep(col, nrow(x$uni))
if(length(pch)==1) pch <- rep(pch, nrow(x$uni))
plot(x$q, x$uni[1, ], type = type, col = col[1], ylim = c(min(x$uni), max(x$uni)), pch = pch[1], , ylab=axisLABEL, xlab="q", ...)
for(i in 1:nrow(x$uni)){
lines(x$q, x$uni[i, ], type = type, col = col[i], pch = pch[i], ...)
}
if(legend[1]){
legend(legendposi, legend = rownames(x$uni), col = col, lty = lty, pch = pch, ...)
}
}
}
|
#
#
#
setwd("~/Desktop/Coursera - Exploratory data analysis")
hh_power <- read.csv("household_power_consumption.txt", sep=";",na.strings=c("NA","?"))
x<-hh_power$Date=="1/2/2007" | hh_power$Date=="2/2/2007"
hhpwc<-hh_power[x,]
rm(hh_power)
rm(x)
hhpwc$dt<-as.POSIXct(paste(as.character(hhpwc$Date),as.character(hhpwc$Time)), format = "%d/%m/%Y %H:%M")
# Plot 4
#================
#
# 1. Use the results from producing plot 1 and 2 and 3.
# 2. Set the number of plots to be produced per row and column
# 3. Create the first plot (this is plot 2)
# 4. Create the second plot (Similar to plot 2 but different variable)
# 5. Create third plot (this is plot 3)
# 6. Plot the last plot (this is a variant on plot 2)
# 7. Save the plot
par(mfrow=c(2,2))
plot(hhpwc$dt,hhpwc$Global_active_power,type="l", ylab="Global Active Power",xlab="")
plot(hhpwc$dt,hhpwc$Voltage,type="l", ylab="Voltage",xlab="datetime")
with(hhpwc,{
plot(dt, Sub_metering_1, type="n",ylab="Energy sub metering",xlab="") # No data in the plot
lines(dt,Sub_metering_1,type="l",col="black")
lines(dt,Sub_metering_2,type="l",col="red")
lines(dt,Sub_metering_3,type="l",col="blue")
legend("topright", legend=legtxt, col=legcol, lwd=1, lty=leglty,y.intersp=1,xjust=1)
})
plot(hhpwc$dt,hhpwc$Global_reactive_power,type="l", ylab="Global_reactive_power",xlab="datetime")
dev.copy(png, "plot4.png")
dev.off() | /Plot4.R | no_license | Harrykoch/ExData_Plotting1 | R | false | false | 1,383 | r | #
#
#
setwd("~/Desktop/Coursera - Exploratory data analysis")
hh_power <- read.csv("household_power_consumption.txt", sep=";",na.strings=c("NA","?"))
x<-hh_power$Date=="1/2/2007" | hh_power$Date=="2/2/2007"
hhpwc<-hh_power[x,]
rm(hh_power)
rm(x)
hhpwc$dt<-as.POSIXct(paste(as.character(hhpwc$Date),as.character(hhpwc$Time)), format = "%d/%m/%Y %H:%M")
# Plot 4
#================
#
# 1. Use the results from producing plot 1 and 2 and 3.
# 2. Set the number of plots to be produced per row and column
# 3. Create the first plot (this is plot 2)
# 4. Create the second plot (Similar to plot 2 but different variable)
# 5. Create third plot (this is plot 3)
# 6. Plot the last plot (this is a variant on plot 2)
# 7. Save the plot
par(mfrow=c(2,2))
plot(hhpwc$dt,hhpwc$Global_active_power,type="l", ylab="Global Active Power",xlab="")
plot(hhpwc$dt,hhpwc$Voltage,type="l", ylab="Voltage",xlab="datetime")
with(hhpwc,{
plot(dt, Sub_metering_1, type="n",ylab="Energy sub metering",xlab="") # No data in the plot
lines(dt,Sub_metering_1,type="l",col="black")
lines(dt,Sub_metering_2,type="l",col="red")
lines(dt,Sub_metering_3,type="l",col="blue")
legend("topright", legend=legtxt, col=legcol, lwd=1, lty=leglty,y.intersp=1,xjust=1)
})
plot(hhpwc$dt,hhpwc$Global_reactive_power,type="l", ylab="Global_reactive_power",xlab="datetime")
dev.copy(png, "plot4.png")
dev.off() |
seq1 = read.table(paste0(dir, "/xci_paper_data/phylo_seqs/beast1/sequence1_align.txt"), sep="\n")
seq1 = as.character(seq1$V1[-grep("\\*", seq1$V1)])
seq1 = gsub(" +", " ", seq1)
seq1 = data.frame(do.call("rbind", strsplit(as.character(seq1), " |\t", seq1)))
colnames(seq1) = c("CC", "seq","n")
if(length(which(seq1$CC == "")) > 0) seq1 = seq1[-which(seq1$CC == ""),]
CCs = do.call("rbind", strsplit(as.character(unique(seq1$CC)), "-|_"))[,1]
if(length(which(toupper(CCs) == "X")) > 0){
CCs[which(toupper(CCs) == "X")] = "B6"
seq1$CC = gsub("X", "B6", toupper(seq1$CC))
}
strings = data.frame(CC = CCs, string = "")
strings$CC = as.character(strings$CC)
strings$string = apply(strings, 1, function(x){
paste(seq1$seq[grep(as.character(paste(x["CC"])), as.character(seq1$CC))], collapse="")
})
strings = strings[order(strings$CC), ]
header = paste0("#NEXUS
BEGIN DATA;
\tDIMENSIONS NTAX=", length(unique(strings$CC)), " NCHAR=", unique(unlist(lapply(strings$string, nchar))), ";
\tFORMAT MISSING=N GAP=- DATATYPE=DNA;
\tMATRIX")
ender = "\t;
END;"
seq1_out = paste(header, paste0("\t", apply(strings, 1, function(x) paste0(x, collapse="\t")), collapse="\n"), ender, sep="\n")
write.table(seq1_out, col.names = F, row.names = F, quote = F,
file.path(dir, "xci_paper_data/phylo_seqs/sequence1_102709036-102711871.nex"))
#####################################################################
| /old_versions/beast_files.R | no_license | kathiesun/TReC_matnut | R | false | false | 1,446 | r | seq1 = read.table(paste0(dir, "/xci_paper_data/phylo_seqs/beast1/sequence1_align.txt"), sep="\n")
seq1 = as.character(seq1$V1[-grep("\\*", seq1$V1)])
seq1 = gsub(" +", " ", seq1)
seq1 = data.frame(do.call("rbind", strsplit(as.character(seq1), " |\t", seq1)))
colnames(seq1) = c("CC", "seq","n")
if(length(which(seq1$CC == "")) > 0) seq1 = seq1[-which(seq1$CC == ""),]
CCs = do.call("rbind", strsplit(as.character(unique(seq1$CC)), "-|_"))[,1]
if(length(which(toupper(CCs) == "X")) > 0){
CCs[which(toupper(CCs) == "X")] = "B6"
seq1$CC = gsub("X", "B6", toupper(seq1$CC))
}
strings = data.frame(CC = CCs, string = "")
strings$CC = as.character(strings$CC)
strings$string = apply(strings, 1, function(x){
paste(seq1$seq[grep(as.character(paste(x["CC"])), as.character(seq1$CC))], collapse="")
})
strings = strings[order(strings$CC), ]
header = paste0("#NEXUS
BEGIN DATA;
\tDIMENSIONS NTAX=", length(unique(strings$CC)), " NCHAR=", unique(unlist(lapply(strings$string, nchar))), ";
\tFORMAT MISSING=N GAP=- DATATYPE=DNA;
\tMATRIX")
ender = "\t;
END;"
seq1_out = paste(header, paste0("\t", apply(strings, 1, function(x) paste0(x, collapse="\t")), collapse="\n"), ender, sep="\n")
write.table(seq1_out, col.names = F, row.names = F, quote = F,
file.path(dir, "xci_paper_data/phylo_seqs/sequence1_102709036-102711871.nex"))
#####################################################################
|
ss.aipe.cv.sensitivity <- function(True.C.of.V=NULL, Estimated.C.of.V=NULL, width=NULL, degree.of.certainty=NULL, assurance=NULL, certainty=NULL, mean=100, Specified.N=NULL, conf.level=.95, G=1000, print.iter=TRUE)
{
if(!is.null(certainty)& is.null(degree.of.certainty)&is.null(assurance)) degree.of.certainty<-certainty
if (is.null(assurance) && !is.null (degree.of.certainty)& is.null(certainty)) assurance <-degree.of.certainty
if (!is.null(assurance) && is.null (degree.of.certainty)& is.null(certainty)) assurance -> degree.of.certainty
if(!is.null(assurance) && !is.null (degree.of.certainty) && assurance!=degree.of.certainty)
stop("The arguments 'assurance' and 'degree.of.certainty' must have the same value.")
if(!is.null(assurance) && !is.null (certainty) && assurance!=certainty)
stop("The arguments 'assurance' and 'certainty' must have the same value.")
if(!is.null(degree.of.certainty) && !is.null (certainty) && degree.of.certainty!=certainty)
stop("The arguments 'degree.of.certainty' and 'certainty' must have the same value.")
if(is.null(Estimated.C.of.V))
{
if(is.null(Specified.N)) stop("Since you did not specify an \'Estimated.C.of.V\', \'Specified.N\' must be specified.")
N <- Specified.N
}
if(!is.null(Estimated.C.of.V))
{
if(!is.null(Specified.N)) stop("Since you specified an \'Estimated.C.of.V\', \'Specified.N\' should not be specified.")
N <- ss.aipe.cv(C.of.V=Estimated.C.of.V, mu=NULL, sigma=NULL, width=width, conf.level=conf.level, alpha.lower=NULL, alpha.upper=NULL, degree.of.certainty=degree.of.certainty, Suppress.Statement=TRUE)
}
CN <- c("Lower.Limit", "Upper.Limit", "CV", "Int.OK", "Width")
Results <- matrix(NA, G, length(CN))
colnames(Results) <- CN
for(i in 1:G)
{
if(print.iter==TRUE) cat(c(i),"\n")
X <- rnorm(N, mean=mean, sd=True.C.of.V*mean)
CI.for.CV <- ci.cv(data=X, conf.level=conf.level)
Results[i,1] <- CI.for.CV$Lower
Results[i,2] <- CI.for.CV$Upper
Results[i,3] <- CI.for.CV$C.of.V
Results[i,4] <- sum((Results[i,1] <= True.C.of.V) & (True.C.of.V <= Results[i,2]))
Results[i,5] <- Results[i,2] - Results[i,1]
}
# Observed coefficients of variation.
Obs.CV <- Results[,3]
Results <- as.data.frame(Results)
Summary.of.Results <- list(Mean.CV=mean(Obs.CV), Median.CV=median(Obs.CV), SD.CV=(var(Obs.CV))^.5,
Mean.CI.width=mean(Results[,2]-Results[,1]), Median.CI.width=median(Results[,2]-Results[,1]), SD.CI.width=(var(Results[,2]-Results[,1]))^.5,
Pct.CI.Less.w=mean((Results[,2]-Results[,1])<=width)*100, Pct.CI.Miss.Low=mean(c(True.C.of.V <= Results[,1]))*100, Pct.CI.Miss.High=mean(c(True.C.of.V >= Results[,2]))*100, Total.Type.I.Error=(mean((True.C.of.V <= Results[,1]) | (True.C.of.V >= Results[,2]))))
###################################################################################################
# Vector of specification values.
if(is.null(degree.of.certainty)) degree.of.certainty <- 0
if(is.null(Estimated.C.of.V)) MBESS.tmp <- NULL
if(!is.null(Estimated.C.of.V)) MBESS.tmp <- round(Estimated.C.of.V, 4)
Specifications <- list(Sample.Size=round(N), True.C.of.V=round(True.C.of.V, 4), Estimated.C.of.V=MBESS.tmp,
conf.level=conf.level, desired.width=width, degree.of.certainty=degree.of.certainty, G=round(G))
return(list(Data.from.Simulation=Results, Specifications=Specifications, Summary.of.Results=Summary.of.Results))
}
| /MBESS/R/ss.aipe.cv.sensitivity.R | no_license | ingted/R-Examples | R | false | false | 3,384 | r | ss.aipe.cv.sensitivity <- function(True.C.of.V=NULL, Estimated.C.of.V=NULL, width=NULL, degree.of.certainty=NULL, assurance=NULL, certainty=NULL, mean=100, Specified.N=NULL, conf.level=.95, G=1000, print.iter=TRUE)
{
if(!is.null(certainty)& is.null(degree.of.certainty)&is.null(assurance)) degree.of.certainty<-certainty
if (is.null(assurance) && !is.null (degree.of.certainty)& is.null(certainty)) assurance <-degree.of.certainty
if (!is.null(assurance) && is.null (degree.of.certainty)& is.null(certainty)) assurance -> degree.of.certainty
if(!is.null(assurance) && !is.null (degree.of.certainty) && assurance!=degree.of.certainty)
stop("The arguments 'assurance' and 'degree.of.certainty' must have the same value.")
if(!is.null(assurance) && !is.null (certainty) && assurance!=certainty)
stop("The arguments 'assurance' and 'certainty' must have the same value.")
if(!is.null(degree.of.certainty) && !is.null (certainty) && degree.of.certainty!=certainty)
stop("The arguments 'degree.of.certainty' and 'certainty' must have the same value.")
if(is.null(Estimated.C.of.V))
{
if(is.null(Specified.N)) stop("Since you did not specify an \'Estimated.C.of.V\', \'Specified.N\' must be specified.")
N <- Specified.N
}
if(!is.null(Estimated.C.of.V))
{
if(!is.null(Specified.N)) stop("Since you specified an \'Estimated.C.of.V\', \'Specified.N\' should not be specified.")
N <- ss.aipe.cv(C.of.V=Estimated.C.of.V, mu=NULL, sigma=NULL, width=width, conf.level=conf.level, alpha.lower=NULL, alpha.upper=NULL, degree.of.certainty=degree.of.certainty, Suppress.Statement=TRUE)
}
CN <- c("Lower.Limit", "Upper.Limit", "CV", "Int.OK", "Width")
Results <- matrix(NA, G, length(CN))
colnames(Results) <- CN
for(i in 1:G)
{
if(print.iter==TRUE) cat(c(i),"\n")
X <- rnorm(N, mean=mean, sd=True.C.of.V*mean)
CI.for.CV <- ci.cv(data=X, conf.level=conf.level)
Results[i,1] <- CI.for.CV$Lower
Results[i,2] <- CI.for.CV$Upper
Results[i,3] <- CI.for.CV$C.of.V
Results[i,4] <- sum((Results[i,1] <= True.C.of.V) & (True.C.of.V <= Results[i,2]))
Results[i,5] <- Results[i,2] - Results[i,1]
}
# Observed coefficients of variation.
Obs.CV <- Results[,3]
Results <- as.data.frame(Results)
Summary.of.Results <- list(Mean.CV=mean(Obs.CV), Median.CV=median(Obs.CV), SD.CV=(var(Obs.CV))^.5,
Mean.CI.width=mean(Results[,2]-Results[,1]), Median.CI.width=median(Results[,2]-Results[,1]), SD.CI.width=(var(Results[,2]-Results[,1]))^.5,
Pct.CI.Less.w=mean((Results[,2]-Results[,1])<=width)*100, Pct.CI.Miss.Low=mean(c(True.C.of.V <= Results[,1]))*100, Pct.CI.Miss.High=mean(c(True.C.of.V >= Results[,2]))*100, Total.Type.I.Error=(mean((True.C.of.V <= Results[,1]) | (True.C.of.V >= Results[,2]))))
###################################################################################################
# Vector of specification values.
if(is.null(degree.of.certainty)) degree.of.certainty <- 0
if(is.null(Estimated.C.of.V)) MBESS.tmp <- NULL
if(!is.null(Estimated.C.of.V)) MBESS.tmp <- round(Estimated.C.of.V, 4)
Specifications <- list(Sample.Size=round(N), True.C.of.V=round(True.C.of.V, 4), Estimated.C.of.V=MBESS.tmp,
conf.level=conf.level, desired.width=width, degree.of.certainty=degree.of.certainty, G=round(G))
return(list(Data.from.Simulation=Results, Specifications=Specifications, Summary.of.Results=Summary.of.Results))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scenarios.R
\name{plotROC}
\alias{plotROC}
\title{Plots the ROC curve for a result or model}
\usage{
plotROC(x, correct, posValue = NULL, xlim = 0:1, ylim = 0:1,
asp = 1, type = NULL, pch = "x", add = FALSE, ...)
}
\arguments{
\item{x}{either the result from a \code{\link{test}} or a model}
\item{correct}{either the true values or testing data for the model}
\item{posValue}{the label marking the positive value.
If \code{NULL} (default) then the larger value.}
\item{xlim}{sets better defaults for \code{\link{plot.default}}}
\item{ylim}{sets better defaults for \code{\link{plot.default}}}
\item{asp}{sets better defaults for \code{\link{plot.default}}}
\item{type}{sets better defaults for \code{\link{plot.default}}}
\item{pch}{sets better defaults for \code{\link{plot.default}}}
\item{add}{if `FALSE` (default) produces a new plot and if `TRUE` adds to existing plot.}
\item{...}{gets passed to \code{\link{plot.default}}}
}
\description{
This can be used either using \code{\link{rocSVM}} or \code{\link{lsSVM}}
}
\examples{
\dontrun{
banana <- liquidData('banana-bc')
model <- rocSVM(Y~.,banana$train)
plotROC(model ,banana$test)
# or:
result <- test(model, banana$test)
plotROC(result, banana$test$Y)
model.ls <- lsSVM(Y~., banana$train)
result <- plotROC(model.ls, banana$test)
}
}
\seealso{
rocSVM, lsSVM
\code{\link{rocSVM}}
}
| /man/plotROC.Rd | no_license | cran/liquidSVM | R | false | true | 1,433 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scenarios.R
\name{plotROC}
\alias{plotROC}
\title{Plots the ROC curve for a result or model}
\usage{
plotROC(x, correct, posValue = NULL, xlim = 0:1, ylim = 0:1,
asp = 1, type = NULL, pch = "x", add = FALSE, ...)
}
\arguments{
\item{x}{either the result from a \code{\link{test}} or a model}
\item{correct}{either the true values or testing data for the model}
\item{posValue}{the label marking the positive value.
If \code{NULL} (default) then the larger value.}
\item{xlim}{sets better defaults for \code{\link{plot.default}}}
\item{ylim}{sets better defaults for \code{\link{plot.default}}}
\item{asp}{sets better defaults for \code{\link{plot.default}}}
\item{type}{sets better defaults for \code{\link{plot.default}}}
\item{pch}{sets better defaults for \code{\link{plot.default}}}
\item{add}{if `FALSE` (default) produces a new plot and if `TRUE` adds to existing plot.}
\item{...}{gets passed to \code{\link{plot.default}}}
}
\description{
This can be used either using \code{\link{rocSVM}} or \code{\link{lsSVM}}
}
\examples{
\dontrun{
banana <- liquidData('banana-bc')
model <- rocSVM(Y~.,banana$train)
plotROC(model ,banana$test)
# or:
result <- test(model, banana$test)
plotROC(result, banana$test$Y)
model.ls <- lsSVM(Y~., banana$train)
result <- plotROC(model.ls, banana$test)
}
}
\seealso{
rocSVM, lsSVM
\code{\link{rocSVM}}
}
|
library(shiny)
library(plyr)
library(tidyverse)
library(googlesheets)
library(shinythemes)
library(plotly)
# Define UI for application that draws a histogram
ui <- fluidPage(theme = shinytheme("paper"),
navbarPage("SLCo PFS: REACH Data Dashboard",
tabPanel("Dashboard",
h3("Dashboard Overview"),
h4("Welcome to the SLCO-REACH DataVis Dashboard"),
p("This dashboard is designed to allow you to explore the data related to the SLCO-REACH project. Click
on the Category Bar at the top of the screen to see different categories of data. Once you've
found a plot you like, you can use its interactive features to explore your data. Double click a series
on the legend to isolate the plot to that one data series!")
),
tabPanel("Program Overview",
h3("Program Overview"),
plotlyOutput("programOverviewPlot"),
h3("Client Information"),
h4("Age"),
plotlyOutput("agesLinePlot"),
h4("Race/Ethnicity"),
plotlyOutput("raceLinePlot")
),
tabPanel("Referrals and Randomization",
h3("Referrals and Randomization"),
h4("Randomized into REACH from Jail"),
plotlyOutput("randomizedBarPlot"),
h4("Days Between Randomization and Enrollment"),
plotlyOutput("betweenEnrollmentdBarPlot"),
h4("Contacts Between Randomization and Enrollment"),
plotlyOutput("contactsBetweenEnrollmentdBarPlot"),
h4("Number of REACH Assessments Conducted"),
plotlyOutput("assessmentsBarPlot")
),
tabPanel("Service Delivery",
h3("Service Delivery"),
h4("Number of Clients by Delivery Type"),
plotlyOutput("serviceDeliveryLinePlot"),
h4("Time Spent on Highest Needs of Client"),
plotlyOutput("highestNeedBarPlot")
),
tabPanel("Employment",
h3("Employment"),
h4("Client Engagement"),
plotlyOutput("employmentLinePlot"),
h4("Total Percent of Employment"),
plotlyOutput("employmentBarPlot")
),
tabPanel("Housing",
h3("Housing"),
h4("Client Numbers"),
plotlyOutput("housingResidentLinePlot"),
h4("Average Length of Stay"),
plotlyOutput("housingCapacityLinePlotLength"),
h4("Bed Days Filled"),
plotlyOutput("bedDaysLinePlot")
),
tabPanel("SUD Treatment",
h3("SUD Treatment"),
h4("SUD Numbers"),
plotlyOutput("SUDLinePlot"),
h4("SUD hourly breakdown"),
plotlyOutput("SUDBarPlot"),
h3("UA Treatment"),
h4("UA Numbers"),
plotlyOutput("UALinePlot"),
h4("UA Breakdown"),
plotlyOutput("UASLinePlot")
),
tabPanel("Recidivism",
h3("Recidivism"),
h4("Engagements Number"),
plotlyOutput("engagementsLinePlot"),
h4("Contacts to disengaged individuals"),
plotlyOutput("engagementsMethodsLinePlot")
),
tabPanel("Staffing",
h3("Staffing"),
plotlyOutput("staffingLinePlot")
),
tabPanel("Fidelity and Training",
h3("Fidelity and Training"),
plotlyOutput("fidelityScoreLinePlot")
),
tabPanel("Exits",
h3("Exits"),
h4("Number of Exits"),
plotlyOutput("exitLinePlot"),
h4("Overall Attrition"),
plotlyOutput("exitAttritionLinePlot")
),
tabPanel("Financial",
h3("Financial Data"),
plotlyOutput("financesLinePlot")
)
),
HTML('<center><img src="footer.jpg"></center>')
)
# Define server logic required to draw a histogram
server <- function(input, output) {
ax <- list(
title = 'Month',
zeroline = TRUE,
showline = TRUE,
zerolinewidth = 1,
zerolinecolor = toRGB("white")
)
gap <- gs_title("REACH Service Provider Report - Updated 02-08-19")
myData <- gap %>%
gs_read(ws = "Updated Service Report")
## Wrangling:
#transpose the data to put observations into rows
tData <- t(myData)
#make column names the names of the first row
colnames(tData) = tData[1, ] # assigns column names from the first row
tData = tData[-1, ] # removes the first row from the data
#make the row names the names of the first column
rownames(tData) <-tData[ ,1] # assigns column names from the first row
tData <- tData[, -1] # removes the first row from the data
# remove the 'totals' month
tData <- tData[-c(4, 8, 12, 16, 17), ]
#remove the 'header' columns
tData <- tData[ ,-c(1, 9, 10, 14, 15, 20, 33, 38, 46, 53, 63, 69, 74, 80, 87, 98) ]
xaxis <- rownames(tData)
y2 <- tData[,1]
### Plot Program Overview
## plot Number of individuals randomized into REACH this month via line graph
months <- factor(xaxis,levels = c("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"))
## Plot Program Overview:
output$programOverviewPlot <- renderPlotly({programOverviewPlot <- plot_ly(x = months, y = strtoi(tData[,1]), name = 'Randomized', type = 'scatter', mode = 'lines+markers') %>%
#Plot Number of individuals referred to REACH this month
add_trace(y = strtoi(tData[,2]), name = 'Referred', mode = 'lines+markers') %>%
#Plot Number of new clients enrolled in REACH this month
add_trace(y = strtoi(tData[,3]), name = 'New Clients', mode = 'lines+markers') %>%
#Plot Number of REACH clients actively receiving services
add_trace(y = strtoi(tData[,4]), name = 'Receiving Services', mode = 'lines+markers') %>%
#Plot Total number of individuals enrolled in REACH
add_trace(y = strtoi(tData[,5]), name = 'Total Enrolled', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,9]), name = 'Completed REACH', mode = 'lines+markers')%>%
layout(yaxis = list(title = 'Number of Individuals', rangemode = "tozero"), xaxis = ax)
})
# Client Information
# Plot Client Information as Line Graph
output$agesLinePlot <- renderPlotly({agesLinePlot <- plot_ly(x = months, y = strtoi(tData[,11]), name = '18-25', type = 'scatter', mode = 'lines+markers') %>%
#Plot Number of individuals referred to REACH this month
add_trace(y = strtoi(tData[,12]), name = '26-35', mode = 'lines+markers') %>%
#Plot Number of new clients enrolled in REACH this month
add_trace(y = strtoi(tData[,13]), name = '35-44', mode = 'lines+markers') %>%
#Plot Number of REACH clients actively receiving services
add_trace(y = strtoi(tData[,14]), name = '45+', mode = 'lines+markers')%>%
layout(yaxis = list(title = 'Number of Individuals', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
#Plot Race as Line Graph
output$raceLinePlot <- renderPlotly({ raceLinePlot <- plot_ly(x = months, y = strtoi(tData[,15]), name = 'American Indian', type = 'scatter', mode = 'lines+markers') %>%
#Plot Number of individuals referred to REACH this month
add_trace(y = strtoi(tData[,16]), name = 'Asian', mode = 'lines+markers') %>%
#Plot Number of new clients enrolled in REACH this month
add_trace(y = strtoi(tData[,17]), name = 'Black/African American', mode = 'lines+markers') %>%
#Plot Number of REACH clients actively receiving services
add_trace(y = strtoi(tData[,18]), name = 'Black/African American, White', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,19]), name = 'Pacific Islander', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,20]), name = 'Other: Single race', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,21]), name = 'Other: Two or more races', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,22]), name = 'White', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,23]), name = 'Mexican', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,24]), name = 'Not of Hispanic Origin', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,25]), name = 'Other: Hispanic', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,26]), name = 'Puerto Rican', mode = 'lines+markers')%>%
layout(yaxis = list(title = 'Number of Individuals', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
# Referrals and Randomization
output$randomizedBarPlot <- renderPlotly({randomizedBarPlot <- plot_ly(x = months, y = strtoi(tData[,27]), type = 'bar', name = 'Randomized into REACH') %>%
layout(yaxis = list(title = 'Number of Individuals Randomized into REACH', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$betweenEnrollmentdBarPlot <- renderPlotly({betweenEnrollmentdBarPlot <- plot_ly(x = months, y = as.double(tData[,28]), type = 'bar', name = 'Randomized into REACH') %>%
layout(yaxis = list(title = 'Avg. Days from Randomization to Enrollment'), xaxis = list(title = 'Month'))
})
output$contactsBetweenEnrollmentdBarPlot <- renderPlotly({contactsBetweenEnrollmentdBarPlot <- plot_ly(x = months, y = as.double(tData[,29]), type = 'bar', name = 'Randomized into REACH') %>%
layout(yaxis = list(title = 'Avg. Contacts from Randomization to Enrollment', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$assessmentsBarPlot <- renderPlotly({assessmentsBarPlot <- plot_ly(x = months, y = strtoi(tData[,30]), type = 'bar', name = 'Randomized into REACH') %>%
layout(yaxis = list(title = 'Assessments Conducted', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
# Service Delivery
output$serviceDeliveryLinePlot <- renderPlotly({serviceDeliveryLinePlot <- plot_ly(x = months, y = strtoi(tData[,31]), name = 'Intensive Treatment', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,32]), name = 'Transition', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,33]), name = 'Sustained Recovery', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,34]), name = 'Long-term Recovery', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,35]), name = '200 Hours of Therapy', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,37]), name = 'Completed MRT', mode = 'lines+markers')%>%
layout(yaxis = list(title = 'Number of Individuals Receiving', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$highestNeedBarPlot <- renderPlotly({ highestNeedBarPlot <- plot_ly(x = months, y = as.numeric(sub("%", "", tData[,36])), type = 'bar', name = 'Randomized into REACH') %>%
layout(yaxis = list(title = '% of Time Spent On Highest Priority', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
# Employment
output$employmentLinePlot <- renderPlotly({employmentLinePlot <- plot_ly(x = months, y = strtoi(tData[,38]), name = 'Completed Assessment', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,39]), name = 'Obtained Employment ', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,40]), name = 'Engaged With REACH Employment', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,41]), name = 'Obtained a Job with DWS', mode = 'lines+markers')%>% #could error with ?
add_trace(y = strtoi(tData[,42]), name = 'Engaged with Vocational Training', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,44]), name = 'Lost Their Job', mode = 'lines+markers')%>%
layout(yaxis = list(title = 'Number of Individuals', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$employmentBarPlot <- renderPlotly({employmentBarPlot <- plot_ly(x = months, y = as.numeric(sub("%", "", tData[,43])), type = 'bar', name = 'REACH Clients') %>%
layout(yaxis = list(title = '% of REACH Clients Employed', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
# Housing
output$housingResidentLinePlot <- renderPlotly({housingResidentLinePlot <- plot_ly(x = months, y = strtoi(tData[,45]), name = 'Completed Housing Assessments', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,45]), name = 'In Need of Residence', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,46]), name = 'Placed in REACH Recovery Residence', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,47]), name = 'Currently Housed in REACH Recovery', mode = 'lines+markers')%>% #could error with ?
add_trace(y = strtoi(tData[,49]), name = 'Unique Clients served in REACH Recovery', mode = 'lines+markers')%>%
layout(yaxis = list(title = 'Number of Clients', rangemode = "tozero"), xaxis = list(title = 'Month', ax))
})
output$housingCapacityLinePlotLength <- renderPlotly({ housingCapacityLinePlot <- plot_ly(x = months, y = strtoi(tData[,48]), name = 'Average Length of Stay', type = 'scatter', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Days', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$bedDaysLinePlot <- renderPlotly({bedDaysLinePlot <- plot_ly(x = months, y = as.numeric(sub("%", "", tData[,50])), name = 'In Residence', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = as.numeric(sub("%", "", tData[,51])), name = 'By Transitional', mode = 'lines+markers') %>%
layout(yaxis = list(title = '% of Bed Days Filled', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
# SUD treatment
output$SUDLinePlot <- renderPlotly({SUDLinePlot <- plot_ly(x = months, y = strtoi(tData[,53]), name = 'SUD', type = 'scatter', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Number Completed', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$UALinePlot <- renderPlotly({SUDLinePlot <- plot_ly(x = months, y = strtoi(tData[,54]), name = 'UA', type = 'scatter', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Number Completed', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$UASLinePlot <- renderPlotly({UASLinePlot <- plot_ly(x = months, y = as.numeric(sub("%", "", tData[,55])), name = 'Positive', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = as.numeric(sub("%", "", tData[,56])), name = 'No-show', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Percent (%)', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$SUDBarPlot <- renderPlotly({SUDBarPlot <- plot_ly(x = months, y = as.double(sub("%", "", tData[,57]))/100, type = 'bar', name = 'REACH Clients') %>% #divide by 100 as hours are entered as a percentage
layout(yaxis = list(title = 'Average Number of Hours Per Client', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
# Recidivism
output$engagementsLinePlot <- renderPlotly({engagementsLinePlot <- plot_ly(x = months, y = strtoi(tData[,58]), name = 'Post-Incarceration Re-engagements', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,55]), name = 'Successful Re-engagements', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,55]), name = 'Left Unsuccessfully', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Number Completed', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$engagementsMethodsLinePlot <- renderPlotly({engagementsMethodsLinePlot <- plot_ly(x = months, y = as.double(tData[,59]), name = 'Avg. Days Between Jail and Re-enrollment', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = as.double(tData[,60]), name = 'Contact Attempts', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Number', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
#Staffing
output$staffingLinePlot <- renderPlotly({staffingLinePlot <- plot_ly(x = months, y = strtoi(tData[,62]), name = 'Case Managers', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,63]), name = 'Mentors', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,64]), name = 'Program Managers', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,65]), name = 'Admission Coordinators', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,66]), name = 'Therapists', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Number on Staff', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
#Fidelity
output$fidelityScoreLinePlot <- renderPlotly({fidelityScoreLinePlot <- plot_ly(x = months, y = as.numeric(sub("%", "", tData[,67])), name = 'Staff Trained In Modalities', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = as.numeric(sub("%", "", tData[,78])), name = 'MRT groups with Supervision', mode = 'lines+markers') %>%
add_trace(y = as.numeric(sub("%", "", tData[,69])), name = 'Clinicians Receiving Fidelity Checks', mode = 'lines+markers') %>%
add_trace(y = as.numeric(sub("%", "", tData[,70])), name = 'Fidelity Score for MRT', mode = 'lines+markers') %>%
add_trace(y = as.numeric(sub("%", "", tData[,71])), name = 'Fidelity Score for MI', mode = 'lines+markers') %>%
add_trace(y = as.numeric(sub("%", "", tData[,72])), name = 'Fidelity Score for TA', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Percent (%)', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
#Exits
output$exitLinePlot <- renderPlotly({exitLinePlot <- plot_ly(x = months, y = strtoi(tData[,73]), name = 'Total Unplanned Exits', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,74]), name = 'Jail', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,75]), name = 'Prison', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,76]), name = 'Self Termination', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,77]), name = 'No Contact', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,78]), name = 'Total Terminated by FSH', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,79]), name = 'Deceased', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,80]), name = 'Transfered Programs', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,82]), name = 'Planned Exits', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Number of Clients that Exitted', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$exitAttritionLinePlot <- renderPlotly({exitAttritionLinePlot <- plot_ly(x = months, y = as.numeric(sub("%", "", tData[,81])), name = 'Attrition', type = 'scatter', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Percent (%)', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
#Finances
output$financesLinePlot <- renderPlotly({financesLinePlot <- plot_ly(x = months, y = as.double(tData[,83]), name = 'Finances', type = 'scatter', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Dollars ($)', rangemode = "tozero"), xaxis = list(title = 'Month', rangemode = "tozero"))
})
}
# Run the application
shinyApp(ui = ui, server = server) | /exampleapp.R | no_license | Sorenson-Impact/SLCO_Dashboard | R | false | false | 21,401 | r | library(shiny)
library(plyr)
library(tidyverse)
library(googlesheets)
library(shinythemes)
library(plotly)
# Define UI for application that draws a histogram
ui <- fluidPage(theme = shinytheme("paper"),
navbarPage("SLCo PFS: REACH Data Dashboard",
tabPanel("Dashboard",
h3("Dashboard Overview"),
h4("Welcome to the SLCO-REACH DataVis Dashboard"),
p("This dashboard is designed to allow you to explore the data related to the SLCO-REACH project. Click
on the Category Bar at the top of the screen to see different categories of data. Once you've
found a plot you like, you can use its interactive features to explore your data. Double click a series
on the legend to isolate the plot to that one data series!")
),
tabPanel("Program Overview",
h3("Program Overview"),
plotlyOutput("programOverviewPlot"),
h3("Client Information"),
h4("Age"),
plotlyOutput("agesLinePlot"),
h4("Race/Ethnicity"),
plotlyOutput("raceLinePlot")
),
tabPanel("Referrals and Randomization",
h3("Referrals and Randomization"),
h4("Randomized into REACH from Jail"),
plotlyOutput("randomizedBarPlot"),
h4("Days Between Randomization and Enrollment"),
plotlyOutput("betweenEnrollmentdBarPlot"),
h4("Contacts Between Randomization and Enrollment"),
plotlyOutput("contactsBetweenEnrollmentdBarPlot"),
h4("Number of REACH Assessments Conducted"),
plotlyOutput("assessmentsBarPlot")
),
tabPanel("Service Delivery",
h3("Service Delivery"),
h4("Number of Clients by Delivery Type"),
plotlyOutput("serviceDeliveryLinePlot"),
h4("Time Spent on Highest Needs of Client"),
plotlyOutput("highestNeedBarPlot")
),
tabPanel("Employment",
h3("Employment"),
h4("Client Engagement"),
plotlyOutput("employmentLinePlot"),
h4("Total Percent of Employment"),
plotlyOutput("employmentBarPlot")
),
tabPanel("Housing",
h3("Housing"),
h4("Client Numbers"),
plotlyOutput("housingResidentLinePlot"),
h4("Average Length of Stay"),
plotlyOutput("housingCapacityLinePlotLength"),
h4("Bed Days Filled"),
plotlyOutput("bedDaysLinePlot")
),
tabPanel("SUD Treatment",
h3("SUD Treatment"),
h4("SUD Numbers"),
plotlyOutput("SUDLinePlot"),
h4("SUD hourly breakdown"),
plotlyOutput("SUDBarPlot"),
h3("UA Treatment"),
h4("UA Numbers"),
plotlyOutput("UALinePlot"),
h4("UA Breakdown"),
plotlyOutput("UASLinePlot")
),
tabPanel("Recidivism",
h3("Recidivism"),
h4("Engagements Number"),
plotlyOutput("engagementsLinePlot"),
h4("Contacts to disengaged individuals"),
plotlyOutput("engagementsMethodsLinePlot")
),
tabPanel("Staffing",
h3("Staffing"),
plotlyOutput("staffingLinePlot")
),
tabPanel("Fidelity and Training",
h3("Fidelity and Training"),
plotlyOutput("fidelityScoreLinePlot")
),
tabPanel("Exits",
h3("Exits"),
h4("Number of Exits"),
plotlyOutput("exitLinePlot"),
h4("Overall Attrition"),
plotlyOutput("exitAttritionLinePlot")
),
tabPanel("Financial",
h3("Financial Data"),
plotlyOutput("financesLinePlot")
)
),
HTML('<center><img src="footer.jpg"></center>')
)
# Define server logic required to draw a histogram
server <- function(input, output) {
ax <- list(
title = 'Month',
zeroline = TRUE,
showline = TRUE,
zerolinewidth = 1,
zerolinecolor = toRGB("white")
)
gap <- gs_title("REACH Service Provider Report - Updated 02-08-19")
myData <- gap %>%
gs_read(ws = "Updated Service Report")
## Wrangling:
#transpose the data to put observations into rows
tData <- t(myData)
#make column names the names of the first row
colnames(tData) = tData[1, ] # assigns column names from the first row
tData = tData[-1, ] # removes the first row from the data
#make the row names the names of the first column
rownames(tData) <-tData[ ,1] # assigns column names from the first row
tData <- tData[, -1] # removes the first row from the data
# remove the 'totals' month
tData <- tData[-c(4, 8, 12, 16, 17), ]
#remove the 'header' columns
tData <- tData[ ,-c(1, 9, 10, 14, 15, 20, 33, 38, 46, 53, 63, 69, 74, 80, 87, 98) ]
xaxis <- rownames(tData)
y2 <- tData[,1]
### Plot Program Overview
## plot Number of individuals randomized into REACH this month via line graph
months <- factor(xaxis,levels = c("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"))
## Plot Program Overview:
output$programOverviewPlot <- renderPlotly({programOverviewPlot <- plot_ly(x = months, y = strtoi(tData[,1]), name = 'Randomized', type = 'scatter', mode = 'lines+markers') %>%
#Plot Number of individuals referred to REACH this month
add_trace(y = strtoi(tData[,2]), name = 'Referred', mode = 'lines+markers') %>%
#Plot Number of new clients enrolled in REACH this month
add_trace(y = strtoi(tData[,3]), name = 'New Clients', mode = 'lines+markers') %>%
#Plot Number of REACH clients actively receiving services
add_trace(y = strtoi(tData[,4]), name = 'Receiving Services', mode = 'lines+markers') %>%
#Plot Total number of individuals enrolled in REACH
add_trace(y = strtoi(tData[,5]), name = 'Total Enrolled', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,9]), name = 'Completed REACH', mode = 'lines+markers')%>%
layout(yaxis = list(title = 'Number of Individuals', rangemode = "tozero"), xaxis = ax)
})
# Client Information
# Plot Client Information as Line Graph
output$agesLinePlot <- renderPlotly({agesLinePlot <- plot_ly(x = months, y = strtoi(tData[,11]), name = '18-25', type = 'scatter', mode = 'lines+markers') %>%
#Plot Number of individuals referred to REACH this month
add_trace(y = strtoi(tData[,12]), name = '26-35', mode = 'lines+markers') %>%
#Plot Number of new clients enrolled in REACH this month
add_trace(y = strtoi(tData[,13]), name = '35-44', mode = 'lines+markers') %>%
#Plot Number of REACH clients actively receiving services
add_trace(y = strtoi(tData[,14]), name = '45+', mode = 'lines+markers')%>%
layout(yaxis = list(title = 'Number of Individuals', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
#Plot Race as Line Graph
output$raceLinePlot <- renderPlotly({ raceLinePlot <- plot_ly(x = months, y = strtoi(tData[,15]), name = 'American Indian', type = 'scatter', mode = 'lines+markers') %>%
#Plot Number of individuals referred to REACH this month
add_trace(y = strtoi(tData[,16]), name = 'Asian', mode = 'lines+markers') %>%
#Plot Number of new clients enrolled in REACH this month
add_trace(y = strtoi(tData[,17]), name = 'Black/African American', mode = 'lines+markers') %>%
#Plot Number of REACH clients actively receiving services
add_trace(y = strtoi(tData[,18]), name = 'Black/African American, White', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,19]), name = 'Pacific Islander', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,20]), name = 'Other: Single race', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,21]), name = 'Other: Two or more races', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,22]), name = 'White', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,23]), name = 'Mexican', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,24]), name = 'Not of Hispanic Origin', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,25]), name = 'Other: Hispanic', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,26]), name = 'Puerto Rican', mode = 'lines+markers')%>%
layout(yaxis = list(title = 'Number of Individuals', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
# Referrals and Randomization
output$randomizedBarPlot <- renderPlotly({randomizedBarPlot <- plot_ly(x = months, y = strtoi(tData[,27]), type = 'bar', name = 'Randomized into REACH') %>%
layout(yaxis = list(title = 'Number of Individuals Randomized into REACH', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$betweenEnrollmentdBarPlot <- renderPlotly({betweenEnrollmentdBarPlot <- plot_ly(x = months, y = as.double(tData[,28]), type = 'bar', name = 'Randomized into REACH') %>%
layout(yaxis = list(title = 'Avg. Days from Randomization to Enrollment'), xaxis = list(title = 'Month'))
})
output$contactsBetweenEnrollmentdBarPlot <- renderPlotly({contactsBetweenEnrollmentdBarPlot <- plot_ly(x = months, y = as.double(tData[,29]), type = 'bar', name = 'Randomized into REACH') %>%
layout(yaxis = list(title = 'Avg. Contacts from Randomization to Enrollment', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$assessmentsBarPlot <- renderPlotly({assessmentsBarPlot <- plot_ly(x = months, y = strtoi(tData[,30]), type = 'bar', name = 'Randomized into REACH') %>%
layout(yaxis = list(title = 'Assessments Conducted', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
# Service Delivery
output$serviceDeliveryLinePlot <- renderPlotly({serviceDeliveryLinePlot <- plot_ly(x = months, y = strtoi(tData[,31]), name = 'Intensive Treatment', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,32]), name = 'Transition', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,33]), name = 'Sustained Recovery', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,34]), name = 'Long-term Recovery', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,35]), name = '200 Hours of Therapy', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,37]), name = 'Completed MRT', mode = 'lines+markers')%>%
layout(yaxis = list(title = 'Number of Individuals Receiving', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$highestNeedBarPlot <- renderPlotly({ highestNeedBarPlot <- plot_ly(x = months, y = as.numeric(sub("%", "", tData[,36])), type = 'bar', name = 'Randomized into REACH') %>%
layout(yaxis = list(title = '% of Time Spent On Highest Priority', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
# Employment
output$employmentLinePlot <- renderPlotly({employmentLinePlot <- plot_ly(x = months, y = strtoi(tData[,38]), name = 'Completed Assessment', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,39]), name = 'Obtained Employment ', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,40]), name = 'Engaged With REACH Employment', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,41]), name = 'Obtained a Job with DWS', mode = 'lines+markers')%>% #could error with ?
add_trace(y = strtoi(tData[,42]), name = 'Engaged with Vocational Training', mode = 'lines+markers')%>%
add_trace(y = strtoi(tData[,44]), name = 'Lost Their Job', mode = 'lines+markers')%>%
layout(yaxis = list(title = 'Number of Individuals', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$employmentBarPlot <- renderPlotly({employmentBarPlot <- plot_ly(x = months, y = as.numeric(sub("%", "", tData[,43])), type = 'bar', name = 'REACH Clients') %>%
layout(yaxis = list(title = '% of REACH Clients Employed', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
# Housing
output$housingResidentLinePlot <- renderPlotly({housingResidentLinePlot <- plot_ly(x = months, y = strtoi(tData[,45]), name = 'Completed Housing Assessments', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,45]), name = 'In Need of Residence', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,46]), name = 'Placed in REACH Recovery Residence', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,47]), name = 'Currently Housed in REACH Recovery', mode = 'lines+markers')%>% #could error with ?
add_trace(y = strtoi(tData[,49]), name = 'Unique Clients served in REACH Recovery', mode = 'lines+markers')%>%
layout(yaxis = list(title = 'Number of Clients', rangemode = "tozero"), xaxis = list(title = 'Month', ax))
})
output$housingCapacityLinePlotLength <- renderPlotly({ housingCapacityLinePlot <- plot_ly(x = months, y = strtoi(tData[,48]), name = 'Average Length of Stay', type = 'scatter', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Days', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$bedDaysLinePlot <- renderPlotly({bedDaysLinePlot <- plot_ly(x = months, y = as.numeric(sub("%", "", tData[,50])), name = 'In Residence', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = as.numeric(sub("%", "", tData[,51])), name = 'By Transitional', mode = 'lines+markers') %>%
layout(yaxis = list(title = '% of Bed Days Filled', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
# SUD treatment
output$SUDLinePlot <- renderPlotly({SUDLinePlot <- plot_ly(x = months, y = strtoi(tData[,53]), name = 'SUD', type = 'scatter', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Number Completed', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$UALinePlot <- renderPlotly({SUDLinePlot <- plot_ly(x = months, y = strtoi(tData[,54]), name = 'UA', type = 'scatter', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Number Completed', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$UASLinePlot <- renderPlotly({UASLinePlot <- plot_ly(x = months, y = as.numeric(sub("%", "", tData[,55])), name = 'Positive', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = as.numeric(sub("%", "", tData[,56])), name = 'No-show', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Percent (%)', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$SUDBarPlot <- renderPlotly({SUDBarPlot <- plot_ly(x = months, y = as.double(sub("%", "", tData[,57]))/100, type = 'bar', name = 'REACH Clients') %>% #divide by 100 as hours are entered as a percentage
layout(yaxis = list(title = 'Average Number of Hours Per Client', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
# Recidivism
output$engagementsLinePlot <- renderPlotly({engagementsLinePlot <- plot_ly(x = months, y = strtoi(tData[,58]), name = 'Post-Incarceration Re-engagements', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,55]), name = 'Successful Re-engagements', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,55]), name = 'Left Unsuccessfully', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Number Completed', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$engagementsMethodsLinePlot <- renderPlotly({engagementsMethodsLinePlot <- plot_ly(x = months, y = as.double(tData[,59]), name = 'Avg. Days Between Jail and Re-enrollment', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = as.double(tData[,60]), name = 'Contact Attempts', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Number', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
#Staffing
output$staffingLinePlot <- renderPlotly({staffingLinePlot <- plot_ly(x = months, y = strtoi(tData[,62]), name = 'Case Managers', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,63]), name = 'Mentors', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,64]), name = 'Program Managers', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,65]), name = 'Admission Coordinators', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,66]), name = 'Therapists', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Number on Staff', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
#Fidelity
output$fidelityScoreLinePlot <- renderPlotly({fidelityScoreLinePlot <- plot_ly(x = months, y = as.numeric(sub("%", "", tData[,67])), name = 'Staff Trained In Modalities', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = as.numeric(sub("%", "", tData[,78])), name = 'MRT groups with Supervision', mode = 'lines+markers') %>%
add_trace(y = as.numeric(sub("%", "", tData[,69])), name = 'Clinicians Receiving Fidelity Checks', mode = 'lines+markers') %>%
add_trace(y = as.numeric(sub("%", "", tData[,70])), name = 'Fidelity Score for MRT', mode = 'lines+markers') %>%
add_trace(y = as.numeric(sub("%", "", tData[,71])), name = 'Fidelity Score for MI', mode = 'lines+markers') %>%
add_trace(y = as.numeric(sub("%", "", tData[,72])), name = 'Fidelity Score for TA', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Percent (%)', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
#Exits
output$exitLinePlot <- renderPlotly({exitLinePlot <- plot_ly(x = months, y = strtoi(tData[,73]), name = 'Total Unplanned Exits', type = 'scatter', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,74]), name = 'Jail', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,75]), name = 'Prison', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,76]), name = 'Self Termination', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,77]), name = 'No Contact', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,78]), name = 'Total Terminated by FSH', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,79]), name = 'Deceased', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,80]), name = 'Transfered Programs', mode = 'lines+markers') %>%
add_trace(y = strtoi(tData[,82]), name = 'Planned Exits', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Number of Clients that Exitted', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
output$exitAttritionLinePlot <- renderPlotly({exitAttritionLinePlot <- plot_ly(x = months, y = as.numeric(sub("%", "", tData[,81])), name = 'Attrition', type = 'scatter', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Percent (%)', rangemode = "tozero"), xaxis = list(title = 'Month'))
})
#Finances
output$financesLinePlot <- renderPlotly({financesLinePlot <- plot_ly(x = months, y = as.double(tData[,83]), name = 'Finances', type = 'scatter', mode = 'lines+markers') %>%
layout(yaxis = list(title = 'Dollars ($)', rangemode = "tozero"), xaxis = list(title = 'Month', rangemode = "tozero"))
})
}
# Run the application
shinyApp(ui = ui, server = server) |
library(ape)
testtree <- read.tree("12652_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="12652_0_unrooted.txt") | /codeml_files/newick_trees_processed_and_cleaned/12652_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("12652_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="12652_0_unrooted.txt") |
## Clear-all
rm(list = ls()) # Clear variables
graphics.off() # Clear plots
cat("\014") # Clear console
## Choose motorway
mX <- "m25"
##################################################################################
# Discard Links with 10% or more missing data in speed, travel_time or flow #
##################################################################################
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
complete_data = 0:76
file_name = paste('link_data_', complete_data, '.csv', sep = '')
df = data.frame()
for (i in 1:77) {
print(paste('Reading M25 data. Progress: ',round(100*i/77,2) ,'%'))
df = rbind(df, read.csv(paste('../00_Data/01_Raw_data/M25_link_data/', file_name[i], sep = '')))
}
test = list.files(path = '../00_Data/01_Raw_data/M25_link_data/',pattern="*.csv")
df = rbind(read.csv(test))
link_list = unique(df$link_id)
count = 1
remove_links = matrix(0, nrow = length(link_list))
for (link in link_list) {
travel_time = df$travel_time[df$link_id == link]
flow = df$flow[df$link_id == link]
speed = df$speed[df$link_id == link]
temp_tt = sum(is.na(travel_time))
#temp_tt
temp_flow = sum(is.na(flow))
#temp_flow
temp_speed = sum(is.na(speed))
#temp_speed
if (temp_tt > 0.1*length(travel_time) | temp_flow > 0.1*length(flow) | temp_speed > 0.1*length(speed)){
remove_links[count] = 1
}
count = count + 1
}
complete_data = complete_data[remove_links == 0]
## INITIAL DATA HANDLING ################################################################
# Load motorway data into dataframe
rm(list= ls()[!(ls() %in% c('complete_data','mX'))])
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
file_name = paste('link_data_', complete_data, '.csv', sep = '')
df = data.frame()
for (i in 1:length(complete_data)) {
print(paste('Reading M25 data. Progress: ',round(100*i/length(complete_data),2) ,'%'))
df = rbind(df, read.csv(paste('../00_Data/01_Raw_data/M25_link_data/', file_name[i], sep = '')))
}
links_list_df = data.frame(unique(df$link_id))
m_data_interp = df
file_name2 <- paste('../00_Data/01_Raw_data/',mX,'_data.RData',sep="")
save(m_data_interp,links_list_df, file = file_name2)
file_name5 <- paste('../00_Data/01_Raw_data/',mX,'_data.csv',sep="")
write.csv(m_data_interp, file = file_name5, col.names=TRUE)
m_data_interp$time_zone_info = NULL
m_data_interp$interpolated_flow = NULL
m_data_interp$interpolated_concentration = NULL
m_data_interp$interpolated_speed = NULL
m_data_interp$interpolated_headway = NULL
m_data_interp$interpolated_travel_time = NULL
m_data_interp$interpolated_profile_time = NULL
m_data_interp$smoothed_interpolated_concentration = NULL
m_data_interp$smoothed_interpolated_flow = NULL
m_data_interp$smoothed_interpolated_headway = NULL
m_data_interp$smoothed_interpolated_profile_time = NULL
m_data_interp$smoothed_interpolated_speed = NULL
m_data_interp$smoothed_interpolated_travel_time = NULL
m_data_interp$interpolated_headway = NULL
m_data_interp$interpolated_concentration = NULL
m_data_interp$bla <- rep(seq(0,1439),length(m_data_interp$speed)/(1440*length(unique(m_data_interp$link_id))))
colnames(m_data_interp) = c("link_id", "adjusted_time", "m_date", "day_week", "adjusted_time2", "traffic_flow", "traffic_concentration",
"traffic_speed", "traffic_headway", "travel_time", "thales_profile","absolute_time")
m_data = m_data_interp
file_name3 <- paste('../00_Data/01_Raw_data/',mX,'_data.RData',sep="")
save(m_data,links_list_df, file = file_name3)
file_name4 <- paste('../00_Data/01_Raw_data/',mX,'_data.csv',sep="")
write.csv(m_data_interp, file = file_name4, col.names=TRUE)
| /01_Data_Preprocessing/1_M25_Import_Interpolation.R | no_license | ACabrejas/NTIS_Profiles | R | false | false | 3,777 | r | ## Clear-all
rm(list = ls()) # Clear variables
graphics.off() # Clear plots
cat("\014") # Clear console
## Choose motorway
mX <- "m25"
##################################################################################
# Discard Links with 10% or more missing data in speed, travel_time or flow #
##################################################################################
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
complete_data = 0:76
file_name = paste('link_data_', complete_data, '.csv', sep = '')
df = data.frame()
for (i in 1:77) {
print(paste('Reading M25 data. Progress: ',round(100*i/77,2) ,'%'))
df = rbind(df, read.csv(paste('../00_Data/01_Raw_data/M25_link_data/', file_name[i], sep = '')))
}
test = list.files(path = '../00_Data/01_Raw_data/M25_link_data/',pattern="*.csv")
df = rbind(read.csv(test))
link_list = unique(df$link_id)
count = 1
remove_links = matrix(0, nrow = length(link_list))
for (link in link_list) {
travel_time = df$travel_time[df$link_id == link]
flow = df$flow[df$link_id == link]
speed = df$speed[df$link_id == link]
temp_tt = sum(is.na(travel_time))
#temp_tt
temp_flow = sum(is.na(flow))
#temp_flow
temp_speed = sum(is.na(speed))
#temp_speed
if (temp_tt > 0.1*length(travel_time) | temp_flow > 0.1*length(flow) | temp_speed > 0.1*length(speed)){
remove_links[count] = 1
}
count = count + 1
}
complete_data = complete_data[remove_links == 0]
## INITIAL DATA HANDLING ################################################################
# Load motorway data into dataframe
rm(list= ls()[!(ls() %in% c('complete_data','mX'))])
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
file_name = paste('link_data_', complete_data, '.csv', sep = '')
df = data.frame()
for (i in 1:length(complete_data)) {
print(paste('Reading M25 data. Progress: ',round(100*i/length(complete_data),2) ,'%'))
df = rbind(df, read.csv(paste('../00_Data/01_Raw_data/M25_link_data/', file_name[i], sep = '')))
}
links_list_df = data.frame(unique(df$link_id))
m_data_interp = df
file_name2 <- paste('../00_Data/01_Raw_data/',mX,'_data.RData',sep="")
save(m_data_interp,links_list_df, file = file_name2)
file_name5 <- paste('../00_Data/01_Raw_data/',mX,'_data.csv',sep="")
write.csv(m_data_interp, file = file_name5, col.names=TRUE)
m_data_interp$time_zone_info = NULL
m_data_interp$interpolated_flow = NULL
m_data_interp$interpolated_concentration = NULL
m_data_interp$interpolated_speed = NULL
m_data_interp$interpolated_headway = NULL
m_data_interp$interpolated_travel_time = NULL
m_data_interp$interpolated_profile_time = NULL
m_data_interp$smoothed_interpolated_concentration = NULL
m_data_interp$smoothed_interpolated_flow = NULL
m_data_interp$smoothed_interpolated_headway = NULL
m_data_interp$smoothed_interpolated_profile_time = NULL
m_data_interp$smoothed_interpolated_speed = NULL
m_data_interp$smoothed_interpolated_travel_time = NULL
m_data_interp$interpolated_headway = NULL
m_data_interp$interpolated_concentration = NULL
m_data_interp$bla <- rep(seq(0,1439),length(m_data_interp$speed)/(1440*length(unique(m_data_interp$link_id))))
colnames(m_data_interp) = c("link_id", "adjusted_time", "m_date", "day_week", "adjusted_time2", "traffic_flow", "traffic_concentration",
"traffic_speed", "traffic_headway", "travel_time", "thales_profile","absolute_time")
m_data = m_data_interp
file_name3 <- paste('../00_Data/01_Raw_data/',mX,'_data.RData',sep="")
save(m_data,links_list_df, file = file_name3)
file_name4 <- paste('../00_Data/01_Raw_data/',mX,'_data.csv',sep="")
write.csv(m_data_interp, file = file_name4, col.names=TRUE)
|
# Load in data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Require ggplot2
library(ggplot2)
# Subset to only the on-road sources in Baltimore, MD
baltimore.LA.cars <- subset(NEI, fips == "24510" | fips == "06037" & type == 'ON-ROAD')
# Aggregate
cars.by.Year <- aggregate(Emissions ~ year + fips, baltimore.LA.cars, sum)
# Make data more presentable
cars.by.Year[cars.by.Year=="06037"] <- "Los Angeles"
cars.by.Year[cars.by.Year=="24510"] <- "Baltimore"
colnames(cars.by.Year) <- c("Year", "City", "Emissions")
# Plot
png('plot6.png')
ggplot(cars.by.Year, aes(x = Year, y = Emissions, group = City, colour = City)) +
geom_line(size = 1.5) +
geom_point(size = 3) +
expand_limits(y = 0) +
ggtitle("Vehicular Emmisions in Baltimore, MD and Los Angeles, Ca 1999 - 2008") +
ylab(expression('Total PM'[2.5]*' in tons')) +
xlab("Year")
dev.off()
| /4 - Exploratory Data Analysis/Week 4/plot6.R | no_license | sawyerWeld/DataScience-Coursera | R | false | false | 904 | r | # Load in data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Require ggplot2
library(ggplot2)
# Subset to only the on-road sources in Baltimore, MD
baltimore.LA.cars <- subset(NEI, fips == "24510" | fips == "06037" & type == 'ON-ROAD')
# Aggregate
cars.by.Year <- aggregate(Emissions ~ year + fips, baltimore.LA.cars, sum)
# Make data more presentable
cars.by.Year[cars.by.Year=="06037"] <- "Los Angeles"
cars.by.Year[cars.by.Year=="24510"] <- "Baltimore"
colnames(cars.by.Year) <- c("Year", "City", "Emissions")
# Plot
png('plot6.png')
ggplot(cars.by.Year, aes(x = Year, y = Emissions, group = City, colour = City)) +
geom_line(size = 1.5) +
geom_point(size = 3) +
expand_limits(y = 0) +
ggtitle("Vehicular Emmisions in Baltimore, MD and Los Angeles, Ca 1999 - 2008") +
ylab(expression('Total PM'[2.5]*' in tons')) +
xlab("Year")
dev.off()
|
## the list of functions created by makeCacheMatrix are:
## set puts the input matrix into the global envt
## get provides a way of retrieving the matrix on demand
## setinv computes the inverse of the input matrix and stores it in the global envt
## getinv retrieves the calculated inverse
##
## makeCacheMatric creates a closure for a list of functions for retrieving or computing
## the inverse of a square matric
## The function CacheMatrix calls the function created by makeCacheMatrix
makeCacheMatrix <- function(x = matrix()) {
p<- NULL
set <- function(y) {
x <<- y
p <<- NULL
}
get <- function() x
setinv <- function(invx) p <<-invx
getinv <- function() p
list(set=set, get=get, setinv= setinv, getinv = getinv)
}
## cacheSolve, using the "getinv" function attempts to find a previously cached value for "p",
## which is the inverse of the matrix, in the local or global environment. If it is found,
## then it prints a message to the console and returns "m" to the local environment.
## If not found, then it call the other functions created by makeCacheMatrix which do the following:
## 1) gets the input matrix
## 2) calcs the inverse and stores it in "p" locally
## 3) stores the inverse in a "p" in the global envt
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
p<- x$getinv()
print(!is.null(p))
if(!is.null(p)) {
print("getting cached inverse")
return(p)
}
data <- x$get()
print(x$get())
p <- solve(data,...)
x$setinv(p)
p
}
| /cachematrix.R | no_license | FishTales4967/ProgrammingAssignment2 | R | false | false | 1,595 | r | ## the list of functions created by makeCacheMatrix are:
## set puts the input matrix into the global envt
## get provides a way of retrieving the matrix on demand
## setinv computes the inverse of the input matrix and stores it in the global envt
## getinv retrieves the calculated inverse
##
## makeCacheMatric creates a closure for a list of functions for retrieving or computing
## the inverse of a square matric
## The function CacheMatrix calls the function created by makeCacheMatrix
makeCacheMatrix <- function(x = matrix()) {
p<- NULL
set <- function(y) {
x <<- y
p <<- NULL
}
get <- function() x
setinv <- function(invx) p <<-invx
getinv <- function() p
list(set=set, get=get, setinv= setinv, getinv = getinv)
}
## cacheSolve, using the "getinv" function attempts to find a previously cached value for "p",
## which is the inverse of the matrix, in the local or global environment. If it is found,
## then it prints a message to the console and returns "m" to the local environment.
## If not found, then it call the other functions created by makeCacheMatrix which do the following:
## 1) gets the input matrix
## 2) calcs the inverse and stores it in "p" locally
## 3) stores the inverse in a "p" in the global envt
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
p<- x$getinv()
print(!is.null(p))
if(!is.null(p)) {
print("getting cached inverse")
return(p)
}
data <- x$get()
print(x$get())
p <- solve(data,...)
x$setinv(p)
p
}
|
###Script to clean and summarise climate sation data into ClimateBC type variables
###Kiri Daust, 2018
library(reshape2)
library(dplyr)
library(magrittr)
library(ggplot2)
library(foreach)
library(tcltk)
library(rgdal)
library(sp)
library(sf)
library(raster)
library(rgeos)
library(maptools)
###Set months for each variables
wt <- c("Dec","Jan","Feb")
sp <- c("Mar","April","May")
sm <- c("June","July","Aug")
at <- c("Sept","Oct","Nov")
meanSm <- c("May","June","July","Aug","Sept")
pptWt <- c("Oct","Nov","Dec","Jan","Feb","Mar")
wd <- tk_choose.dir(); setwd(wd)
###Precipetation
dat <- read.csv(file.choose(), stringsAsFactors = F)###import ppt file
dat <- dat[,!colnames(dat) %in% c("St_Flag","El_Flag","Annual")]
###Clean
dat <- melt(dat, id.vars = c("St_ID","Name","Elevation","Long","Lat"))
colnames(dat)[6:7] <- c("Month","Value")
dat <- dat[order(dat$Name, dat$Month),]
dat[dat == -9999] <- NA
stNames <- unique(dat$St_ID)
###loop through each station id, and calculate each variable if no NAs
pptOut <- foreach(st = stNames, .combine = rbind) %do% {
sub <- dat[dat$St_ID == st,]
MAP <- NA; pptWt <- NA; pptSp <- NA; pptSm <- NA; pptAt <- NA; MSP <- NA; MWP <- NA###Set initial value to NA
if(!any(is.na(sub$Value))){
MAP <- sum(sub$Value)
}
if(!any(is.na(sub$Value[sub$Month %in% wt]))){
pptWt <- sum(sub$Value[sub$Month %in% wt])
}
if(!any(is.na(sub$Value[sub$Month %in% sp]))){
pptSp <- sum(sub$Value[sub$Month %in% sp])
}
if(!any(is.na(sub$Value[sub$Month %in% sm]))){
pptSm <- sum(sub$Value[sub$Month %in% sm])
}
if(!any(is.na(sub$Value[sub$Month %in% at]))){
pptAt <- sum(sub$Value[sub$Month %in% at])
}
if(!any(is.na(sub$Value[sub$Month %in% meanSm]))){
MSP <- sum(sub$Value[sub$Month %in% meanSm])
}
if(!any(is.na(sub$Value[sub$Month %in% pptWt]))){
MWP <- sum(sub$Value[sub$Month %in% pptWt])
}
out <- data.frame(St_ID = st,Name = sub$Name[1], Long = sub$Long[1], Lat = sub$Lat[1], MAP = MAP, PPT_wt = pptWt, PPT_sp = pptSp, PPT_sm = pptSm,
PPT_at = pptAt, MSP = MSP)
out
}
####Min Temperature
dat <- read.csv(file.choose(), stringsAsFactors = F) ###import min temperature data
dat <- dat[,!colnames(dat) %in% c("St_Flag","El_Flag","Annual")]
dat <- melt(dat, id.vars = c("St_ID","Name","Elevation","Long","Lat"))
colnames(dat)[6:7] <- c("Month","Value")
dat <- dat[order(dat$Name, dat$Month),]
dat[dat == -9999] <- NA
dat$Value <- dat$Value/10
stNames <- unique(as.character(dat$St_ID))
###Loop through and calculate variables
tMin <- foreach(st = stNames, .combine = rbind) %do% {
sub <- dat[dat$St_ID == st,]
MeanMin <- NA; Tmin_wt <- NA; Tmin_sp <- NA;
Tmin_sm <- NA; Tmin_at <- NA; MWMT <- NA; MCMT <- NA; ###Set inital to NA
if(!any(is.na(sub$Value))){
MeanMin <- mean(sub$Value)
}
if(!any(is.na(sub$Value[sub$Month %in% wt]))){
Tmin_wt <- mean(sub$Value[sub$Month %in% wt])
MCMT <- min(sub$Value[sub$Month %in% wt])###Min value in winter months
}
if(!any(is.na(sub$Value[sub$Month %in% sp]))){
Tmin_sp <- mean(sub$Value[sub$Month %in% sp])
}
if(!any(is.na(sub$Value[sub$Month %in% sm]))){
Tmin_sm <- mean(sub$Value[sub$Month %in% sm])
MWMT <- max(sub$Value[sub$Month %in% sm])###Max value in summer months
}
if(!any(is.na(sub$Value[sub$Month %in% at]))){
Tmin_at <- mean(sub$Value[sub$Month %in% at])
}
out <- data.frame(St_ID = st, Name = sub$Name[1], Long = sub$Long[1], Lat = sub$Lat[1], MeanMin = MeanMin, Tmin_wt = Tmin_wt, Tmin_sp = Tmin_sp, Tmin_sm = Tmin_sm,
Tmin_at = Tmin_at, MCMTmin = MCMT, MWMTmin = MWMT)
out
}
##test <- aggregate(Value ~ Month, dat, FUN = mean, na.rm = T)
####Max Temp
dat <- read.csv(file.choose(), stringsAsFactors = F) ###import max temperature data
dat <- dat[,!colnames(dat) %in% c("St_Flag","El_Flag","Annual")]
dat <- melt(dat, id.vars = c("St_ID","Name","Elevation","Long","Lat"))
colnames(dat)[6:7] <- c("Month","Value")
dat <- dat[order(dat$Name, dat$Month),]
dat[dat == -9999] <- NA
dat$Value <- dat$Value/10
stNames <- unique(as.character(dat$St_ID))
tMax <- foreach(st = stNames, .combine = rbind) %do% {
sub <- dat[dat$St_ID == st,]
MeanMin <- NA; Tmin_wt <- NA; Tmin_sp <- NA; Tmin_sm <- NA; Tmin_at <- NA;MWMT <- NA; MCMT <- NA
if(!any(is.na(sub$Value))){
MeanMin <- mean(sub$Value)
}
if(!any(is.na(sub$Value[sub$Month %in% wt]))){
Tmin_wt <- mean(sub$Value[sub$Month %in% wt])
MCMT <- min(sub$Value[sub$Month %in% wt])
}
if(!any(is.na(sub$Value[sub$Month %in% sp]))){
Tmin_sp <- mean(sub$Value[sub$Month %in% sp])
}
if(!any(is.na(sub$Value[sub$Month %in% sm]))){
Tmin_sm <- mean(sub$Value[sub$Month %in% sm])
MWMT <- max(sub$Value[sub$Month %in% sm])
}
if(!any(is.na(sub$Value[sub$Month %in% at]))){
Tmin_at <- mean(sub$Value[sub$Month %in% at])
}
out <- data.frame(St_ID = st,Name = sub$Name[1], Long = sub$Long[1], Lat = sub$Lat[1], MeanMax = MeanMin, Tmax_wt = Tmin_wt, Tmax_sp = Tmin_sp, Tmax_sm = Tmin_sm,
Tmax_at = Tmin_at, MCMTmax = MCMT, MWMTmax = MWMT)
out
}
###combine into one file and calculate additional variables
aveTemp <- tMax
aveTemp <- merge(aveTemp, tMin, by = "St_ID")
aveTemp <- merge(aveTemp, pptOut, by = "St_ID", all.x = TRUE)
aveTemp$MAT <- (aveTemp$MeanMax + aveTemp$MeanMin)/2 ###average between min and max
aveTemp$MCMTAve <- (aveTemp$MCMTmax+aveTemp$MCMTmin)/2
aveTemp$MWMTAve <- (aveTemp$MWMTmax+aveTemp$MWMTmin)/2
aveTemp$TD <- aveTemp$MWMTAve - aveTemp$MCMTAve
aveTemp$AHM <- (aveTemp$MAT+10)/(aveTemp$MAP/1000)
aveTemp$SHM <- aveTemp$MWMTAve/(aveTemp$MSP/1000)
aveTemp <- within(aveTemp, {Tave_sp <- (Tmin_sp+Tmax_sp)/2
Tave_sm <- (Tmin_sm+Tmax_sm)/2
Tave_at <- (Tmin_at+Tmax_at)/2
Tave_wt <- (Tmin_wt+Tmax_wt)/2})
outVars <- c("St_ID","Name.x","PPT_sp", "PPT_sm", "PPT_at", "PPT_wt", "Tmax_sp", "Tmax_sm", "Tmax_at", "Tmax_wt","Tmin_sp",
"Tmin_sm", "Tmin_at", "Tmin_wt","Tave_sp","Tave_sm", "Tave_at", "Tave_wt",
"MSP", "MAP", "MAT", "MWMTAve", "MCMTAve", "TD", "AHM", "SHM") ###variables to export
StationOut <- aveTemp[,outVars]
colnames(StationOut) <- c("STATION","Name", "PPT_sp", "PPT_sm", "PPT_at", "PPT_wt", "Tmax_sp", "Tmax_sm", "Tmax_at", "Tmax_wt",
"Tmin_sp", "Tmin_sm", "Tmin_at", "Tmin_wt","Tave_sp","Tave_sm", "Tave_at", "Tave_wt",
"MSP", "MAP", "MAT", "MWMT", "MCMT", "TD", "AHM", "SHM")
write.csv(StationOut, "StationSummary.csv", row.names = FALSE) ###Final data set
####Now create list of all stations for climateBC
ppt <- read.csv(file.choose())
tmax <- read.csv(file.choose())
tmin <- read.csv(file.choose())
st.list <- rbind(ppt[,c(1,3,6,7,4)],tmax[,c(1,3,6,7,4)],tmin[,c(1,3,6,7,4)])
st.list <- st.list[unique(st.list$St_ID),]
####Assign BGCs to stations######################
setwd(tk_choose.dir())
bec11 <- st_read(dsn="bgc.v11.gdb",layer="bgcv11_bc") ##read bec file
CRS.albers <- CRS ("+proj=aea +lat_1=50 +lat_2=58.5 +lat_0=45 +lon_0=-126 +x_0=1000000 +y_0=0 +datum=NAD83 +units=m +no_defs")
allUnits <- unique(as.character(bec11$MAP_LABEL))###What units are in BEC?
dem <- raster("bc25fill") ###Read DEM
require(doParallel)
set.seed(123321)
coreNum <- as.numeric(detectCores()-2)
coreNo <- makeCluster(coreNum)
registerDoParallel(coreNo, cores = coreNum)
###Only keeps stations in a BGC
stBGCOut <- foreach(BGC = allUnits, .combine = rbind, .packages = c("sp","sf","raster")) %dopar%{
dat <- st.list
pointsOrig <- dat
coordinates(dat) <- c("Long","Lat")
proj4string(dat) <- CRS("+init=epsg:4326")
dat <- spTransform(dat, CRS.albers) # standard albers projection for BC gov't data
tempPoly <- bec11[bec11$MAP_LABEL == BGC,]
tempPoly <- as(tempPoly, "Spatial") ##conver to sp
tempPoly <- spTransform(tempPoly, CRS.albers)
dat <- over(dat, tempPoly) ###which ones are inside the BGC
pointsOrig <- pointsOrig[!is.na(dat$BGC_LABEL),] ###Remove points not inside BGC
if(nrow(pointsOrig) > 0){ ###check that some points fall inside BGC=
pointsOrig$BGC <- BGC
pointsOrig
}
}
###add elevation data - given elevation data is often innacurate or missing
temp <- stBGCOut
coordinates(temp) <- c("Long","Lat")
proj4string(temp) <- CRS("+init=epsg:4326")
temp <- spTransform(temp, CRS(proj4string(dem)))
stBGCOut$ElevationGood <- raster::extract(dem,temp)
stPointsOut <- stBGCOut[,c("St_ID","BGC","Lat","Long","ElevationGood")]
colnames(stPointsOut) <- c("ID1","ID2","Lat","Long","Elevation")
write.csv(stPointsOut, "StPoints.csv", row.names = FALSE)###write file to input to climateBC
############################################################################################
###Old Code###
colnames(stBGCOut)[1] <- "St_ID"
stBGCOut <- merge(stBGCOut, pptOut[,-(2:3)], by = "St_ID", all.x = TRUE)
stBGCOut <- merge(stBGCOut, tMin[,c(1,4:7)], by = "St_ID", all.x = TRUE)
stBGCOut <- merge(stBGCOut, tMax[,c(1,4:7)], by = "St_ID", all.x = TRUE)
stBGCOut <- merge(stBGCOut, aveTemp[,c(1,6:8)], by = "St_ID", all.x = TRUE)
##save <- stBGCOut
stBGCOut <- stBGCOut[!duplicated(stBGCOut$St_ID),]
write.csv(stBGCOut, "StationDataOct21.csv", row.names = F)
####Cool recursive function##########3
solveTowers <- function(n, source, destination, spare){
if(n == 1){
cat("From",source,"To",destination,"\n", sep = " ")
}else{
solveTowers(n - 1, source, spare, destination)
cat("From",source,"To",destination,"\n", sep = " ")
solveTowers(n-1, spare, destination, source)
}
}
#######
install.packages("sn")
library(sn)
f1 <- makeSECdistr(dp=c(3,2,5), family="SN", name="First-SN")
show(f1)
summary(f1)
plot(f11)
plot(f1, probs=c(0.1, 0.9))
#
f2 <- makeSECdistr(dp=c(3, 5, -4, 8), family="ST", name="First-ST")
f9 <- makeSECdistr(dp=c(5, 1, Inf, 0.5), family="ESN", name="ESN,alpha=Inf")
#
dp0 <- list(xi=1:2, Omega=diag(3:4), alpha=c(3, -5))
f10 <- makeSECdistr(dp=dp0, family="SN", name="SN-2d", compNames=c("u1", "u2"))
#
dp1 <- list(xi=1:2, Omega=diag(1:2)+outer(c(3,3),c(2,2)), alpha=c(-3, 5), nu=6)
f11 <- makeSECdistr(dp=dp1, family="ST", name="ST-2d", compNames=c("t1", "t2"))
data(ais)
m1 <- selm(log(Fe) ~ BMI + LBM, family="SN", data=ais)
print(m1)
summary(m1)
s<- summary(m1, "DP", cov=TRUE, cor=TRUE)
plot(m1)
plot(m1, param.type="DP")
logLik(m1)
coef(m1)
coef(m1, "DP")
var <- vcov(m1)
| /Prism Station Data/StationClean.R | no_license | FLNRO-Smithers-Research/BGC-Climate-Summaries | R | false | false | 10,332 | r | ###Script to clean and summarise climate sation data into ClimateBC type variables
###Kiri Daust, 2018
library(reshape2)
library(dplyr)
library(magrittr)
library(ggplot2)
library(foreach)
library(tcltk)
library(rgdal)
library(sp)
library(sf)
library(raster)
library(rgeos)
library(maptools)
###Set months for each variables
wt <- c("Dec","Jan","Feb")
sp <- c("Mar","April","May")
sm <- c("June","July","Aug")
at <- c("Sept","Oct","Nov")
meanSm <- c("May","June","July","Aug","Sept")
pptWt <- c("Oct","Nov","Dec","Jan","Feb","Mar")
wd <- tk_choose.dir(); setwd(wd)
###Precipetation
dat <- read.csv(file.choose(), stringsAsFactors = F)###import ppt file
dat <- dat[,!colnames(dat) %in% c("St_Flag","El_Flag","Annual")]
###Clean
dat <- melt(dat, id.vars = c("St_ID","Name","Elevation","Long","Lat"))
colnames(dat)[6:7] <- c("Month","Value")
dat <- dat[order(dat$Name, dat$Month),]
dat[dat == -9999] <- NA
stNames <- unique(dat$St_ID)
###loop through each station id, and calculate each variable if no NAs
pptOut <- foreach(st = stNames, .combine = rbind) %do% {
sub <- dat[dat$St_ID == st,]
MAP <- NA; pptWt <- NA; pptSp <- NA; pptSm <- NA; pptAt <- NA; MSP <- NA; MWP <- NA###Set initial value to NA
if(!any(is.na(sub$Value))){
MAP <- sum(sub$Value)
}
if(!any(is.na(sub$Value[sub$Month %in% wt]))){
pptWt <- sum(sub$Value[sub$Month %in% wt])
}
if(!any(is.na(sub$Value[sub$Month %in% sp]))){
pptSp <- sum(sub$Value[sub$Month %in% sp])
}
if(!any(is.na(sub$Value[sub$Month %in% sm]))){
pptSm <- sum(sub$Value[sub$Month %in% sm])
}
if(!any(is.na(sub$Value[sub$Month %in% at]))){
pptAt <- sum(sub$Value[sub$Month %in% at])
}
if(!any(is.na(sub$Value[sub$Month %in% meanSm]))){
MSP <- sum(sub$Value[sub$Month %in% meanSm])
}
if(!any(is.na(sub$Value[sub$Month %in% pptWt]))){
MWP <- sum(sub$Value[sub$Month %in% pptWt])
}
out <- data.frame(St_ID = st,Name = sub$Name[1], Long = sub$Long[1], Lat = sub$Lat[1], MAP = MAP, PPT_wt = pptWt, PPT_sp = pptSp, PPT_sm = pptSm,
PPT_at = pptAt, MSP = MSP)
out
}
####Min Temperature
dat <- read.csv(file.choose(), stringsAsFactors = F) ###import min temperature data
dat <- dat[,!colnames(dat) %in% c("St_Flag","El_Flag","Annual")]
dat <- melt(dat, id.vars = c("St_ID","Name","Elevation","Long","Lat"))
colnames(dat)[6:7] <- c("Month","Value")
dat <- dat[order(dat$Name, dat$Month),]
dat[dat == -9999] <- NA
dat$Value <- dat$Value/10
stNames <- unique(as.character(dat$St_ID))
###Loop through and calculate variables
tMin <- foreach(st = stNames, .combine = rbind) %do% {
sub <- dat[dat$St_ID == st,]
MeanMin <- NA; Tmin_wt <- NA; Tmin_sp <- NA;
Tmin_sm <- NA; Tmin_at <- NA; MWMT <- NA; MCMT <- NA; ###Set inital to NA
if(!any(is.na(sub$Value))){
MeanMin <- mean(sub$Value)
}
if(!any(is.na(sub$Value[sub$Month %in% wt]))){
Tmin_wt <- mean(sub$Value[sub$Month %in% wt])
MCMT <- min(sub$Value[sub$Month %in% wt])###Min value in winter months
}
if(!any(is.na(sub$Value[sub$Month %in% sp]))){
Tmin_sp <- mean(sub$Value[sub$Month %in% sp])
}
if(!any(is.na(sub$Value[sub$Month %in% sm]))){
Tmin_sm <- mean(sub$Value[sub$Month %in% sm])
MWMT <- max(sub$Value[sub$Month %in% sm])###Max value in summer months
}
if(!any(is.na(sub$Value[sub$Month %in% at]))){
Tmin_at <- mean(sub$Value[sub$Month %in% at])
}
out <- data.frame(St_ID = st, Name = sub$Name[1], Long = sub$Long[1], Lat = sub$Lat[1], MeanMin = MeanMin, Tmin_wt = Tmin_wt, Tmin_sp = Tmin_sp, Tmin_sm = Tmin_sm,
Tmin_at = Tmin_at, MCMTmin = MCMT, MWMTmin = MWMT)
out
}
##test <- aggregate(Value ~ Month, dat, FUN = mean, na.rm = T)
####Max Temp
dat <- read.csv(file.choose(), stringsAsFactors = F) ###import max temperature data
dat <- dat[,!colnames(dat) %in% c("St_Flag","El_Flag","Annual")]
dat <- melt(dat, id.vars = c("St_ID","Name","Elevation","Long","Lat"))
colnames(dat)[6:7] <- c("Month","Value")
dat <- dat[order(dat$Name, dat$Month),]
dat[dat == -9999] <- NA
dat$Value <- dat$Value/10
stNames <- unique(as.character(dat$St_ID))
tMax <- foreach(st = stNames, .combine = rbind) %do% {
sub <- dat[dat$St_ID == st,]
MeanMin <- NA; Tmin_wt <- NA; Tmin_sp <- NA; Tmin_sm <- NA; Tmin_at <- NA;MWMT <- NA; MCMT <- NA
if(!any(is.na(sub$Value))){
MeanMin <- mean(sub$Value)
}
if(!any(is.na(sub$Value[sub$Month %in% wt]))){
Tmin_wt <- mean(sub$Value[sub$Month %in% wt])
MCMT <- min(sub$Value[sub$Month %in% wt])
}
if(!any(is.na(sub$Value[sub$Month %in% sp]))){
Tmin_sp <- mean(sub$Value[sub$Month %in% sp])
}
if(!any(is.na(sub$Value[sub$Month %in% sm]))){
Tmin_sm <- mean(sub$Value[sub$Month %in% sm])
MWMT <- max(sub$Value[sub$Month %in% sm])
}
if(!any(is.na(sub$Value[sub$Month %in% at]))){
Tmin_at <- mean(sub$Value[sub$Month %in% at])
}
out <- data.frame(St_ID = st,Name = sub$Name[1], Long = sub$Long[1], Lat = sub$Lat[1], MeanMax = MeanMin, Tmax_wt = Tmin_wt, Tmax_sp = Tmin_sp, Tmax_sm = Tmin_sm,
Tmax_at = Tmin_at, MCMTmax = MCMT, MWMTmax = MWMT)
out
}
###combine into one file and calculate additional variables
aveTemp <- tMax
aveTemp <- merge(aveTemp, tMin, by = "St_ID")
aveTemp <- merge(aveTemp, pptOut, by = "St_ID", all.x = TRUE)
aveTemp$MAT <- (aveTemp$MeanMax + aveTemp$MeanMin)/2 ###average between min and max
aveTemp$MCMTAve <- (aveTemp$MCMTmax+aveTemp$MCMTmin)/2
aveTemp$MWMTAve <- (aveTemp$MWMTmax+aveTemp$MWMTmin)/2
aveTemp$TD <- aveTemp$MWMTAve - aveTemp$MCMTAve
aveTemp$AHM <- (aveTemp$MAT+10)/(aveTemp$MAP/1000)
aveTemp$SHM <- aveTemp$MWMTAve/(aveTemp$MSP/1000)
aveTemp <- within(aveTemp, {Tave_sp <- (Tmin_sp+Tmax_sp)/2
Tave_sm <- (Tmin_sm+Tmax_sm)/2
Tave_at <- (Tmin_at+Tmax_at)/2
Tave_wt <- (Tmin_wt+Tmax_wt)/2})
outVars <- c("St_ID","Name.x","PPT_sp", "PPT_sm", "PPT_at", "PPT_wt", "Tmax_sp", "Tmax_sm", "Tmax_at", "Tmax_wt","Tmin_sp",
"Tmin_sm", "Tmin_at", "Tmin_wt","Tave_sp","Tave_sm", "Tave_at", "Tave_wt",
"MSP", "MAP", "MAT", "MWMTAve", "MCMTAve", "TD", "AHM", "SHM") ###variables to export
StationOut <- aveTemp[,outVars]
colnames(StationOut) <- c("STATION","Name", "PPT_sp", "PPT_sm", "PPT_at", "PPT_wt", "Tmax_sp", "Tmax_sm", "Tmax_at", "Tmax_wt",
"Tmin_sp", "Tmin_sm", "Tmin_at", "Tmin_wt","Tave_sp","Tave_sm", "Tave_at", "Tave_wt",
"MSP", "MAP", "MAT", "MWMT", "MCMT", "TD", "AHM", "SHM")
write.csv(StationOut, "StationSummary.csv", row.names = FALSE) ###Final data set
####Now create list of all stations for climateBC
ppt <- read.csv(file.choose())
tmax <- read.csv(file.choose())
tmin <- read.csv(file.choose())
st.list <- rbind(ppt[,c(1,3,6,7,4)],tmax[,c(1,3,6,7,4)],tmin[,c(1,3,6,7,4)])
st.list <- st.list[unique(st.list$St_ID),]
####Assign BGCs to stations######################
setwd(tk_choose.dir())
bec11 <- st_read(dsn="bgc.v11.gdb",layer="bgcv11_bc") ##read bec file
CRS.albers <- CRS ("+proj=aea +lat_1=50 +lat_2=58.5 +lat_0=45 +lon_0=-126 +x_0=1000000 +y_0=0 +datum=NAD83 +units=m +no_defs")
allUnits <- unique(as.character(bec11$MAP_LABEL))###What units are in BEC?
dem <- raster("bc25fill") ###Read DEM
require(doParallel)
set.seed(123321)
coreNum <- as.numeric(detectCores()-2)
coreNo <- makeCluster(coreNum)
registerDoParallel(coreNo, cores = coreNum)
###Only keeps stations in a BGC
stBGCOut <- foreach(BGC = allUnits, .combine = rbind, .packages = c("sp","sf","raster")) %dopar%{
dat <- st.list
pointsOrig <- dat
coordinates(dat) <- c("Long","Lat")
proj4string(dat) <- CRS("+init=epsg:4326")
dat <- spTransform(dat, CRS.albers) # standard albers projection for BC gov't data
tempPoly <- bec11[bec11$MAP_LABEL == BGC,]
tempPoly <- as(tempPoly, "Spatial") ##conver to sp
tempPoly <- spTransform(tempPoly, CRS.albers)
dat <- over(dat, tempPoly) ###which ones are inside the BGC
pointsOrig <- pointsOrig[!is.na(dat$BGC_LABEL),] ###Remove points not inside BGC
if(nrow(pointsOrig) > 0){ ###check that some points fall inside BGC=
pointsOrig$BGC <- BGC
pointsOrig
}
}
###add elevation data - given elevation data is often innacurate or missing
temp <- stBGCOut
coordinates(temp) <- c("Long","Lat")
proj4string(temp) <- CRS("+init=epsg:4326")
temp <- spTransform(temp, CRS(proj4string(dem)))
stBGCOut$ElevationGood <- raster::extract(dem,temp)
stPointsOut <- stBGCOut[,c("St_ID","BGC","Lat","Long","ElevationGood")]
colnames(stPointsOut) <- c("ID1","ID2","Lat","Long","Elevation")
write.csv(stPointsOut, "StPoints.csv", row.names = FALSE)###write file to input to climateBC
############################################################################################
###Old Code###
colnames(stBGCOut)[1] <- "St_ID"
stBGCOut <- merge(stBGCOut, pptOut[,-(2:3)], by = "St_ID", all.x = TRUE)
stBGCOut <- merge(stBGCOut, tMin[,c(1,4:7)], by = "St_ID", all.x = TRUE)
stBGCOut <- merge(stBGCOut, tMax[,c(1,4:7)], by = "St_ID", all.x = TRUE)
stBGCOut <- merge(stBGCOut, aveTemp[,c(1,6:8)], by = "St_ID", all.x = TRUE)
##save <- stBGCOut
stBGCOut <- stBGCOut[!duplicated(stBGCOut$St_ID),]
write.csv(stBGCOut, "StationDataOct21.csv", row.names = F)
####Cool recursive function##########3
solveTowers <- function(n, source, destination, spare){
if(n == 1){
cat("From",source,"To",destination,"\n", sep = " ")
}else{
solveTowers(n - 1, source, spare, destination)
cat("From",source,"To",destination,"\n", sep = " ")
solveTowers(n-1, spare, destination, source)
}
}
#######
install.packages("sn")
library(sn)
f1 <- makeSECdistr(dp=c(3,2,5), family="SN", name="First-SN")
show(f1)
summary(f1)
plot(f11)
plot(f1, probs=c(0.1, 0.9))
#
f2 <- makeSECdistr(dp=c(3, 5, -4, 8), family="ST", name="First-ST")
f9 <- makeSECdistr(dp=c(5, 1, Inf, 0.5), family="ESN", name="ESN,alpha=Inf")
#
dp0 <- list(xi=1:2, Omega=diag(3:4), alpha=c(3, -5))
f10 <- makeSECdistr(dp=dp0, family="SN", name="SN-2d", compNames=c("u1", "u2"))
#
dp1 <- list(xi=1:2, Omega=diag(1:2)+outer(c(3,3),c(2,2)), alpha=c(-3, 5), nu=6)
f11 <- makeSECdistr(dp=dp1, family="ST", name="ST-2d", compNames=c("t1", "t2"))
data(ais)
m1 <- selm(log(Fe) ~ BMI + LBM, family="SN", data=ais)
print(m1)
summary(m1)
s<- summary(m1, "DP", cov=TRUE, cor=TRUE)
plot(m1)
plot(m1, param.type="DP")
logLik(m1)
coef(m1)
coef(m1, "DP")
var <- vcov(m1)
|
<?xml version="1.0" encoding="iso-8859-1"?>
<resource schema="cycle0fits">
<meta name="title">ALMA Cycle 0 FITS</meta>
<meta name="creationDate">2015-03-19T18:01:00Z</meta>
<meta name="description" format="plain">
This resource contains the fits files generated by the ALMA pipeline
for Cycle 0. Only public data is published here.
</meta>
<meta name="copyright">Free to use.</meta>
<meta name="creator.name">ALMA</meta>
<meta name="creator.logo">http://www.eso.org/public/archives/logos/screen/alma-logo.jpg</meta>
<meta name="subject">Millimiter/submillimiter Astronomy</meta>
<meta name="facility">ALMA OSF</meta>
<table id="cycle0" onDisk="True" adql="True">
<mixin
calibLevel="3"
collectionName="'ALMA CYCLE0 FITS'"
dec="s_dec"
ra="s_ra"
targetName="target_name"
productType="'cube, image'"
oUCD="'phot.flux.density.sb'"
facilityName="'ALMA'"
instrumentName="'ALMA'" >
//obscore#publishSIAP
</mixin>
<mixin>
//siap#pgs
</mixin>
<mixin>
//scs#q3cindex
</mixin>
<!--DESCRIPTION-->
<meta name="description">
ALMA CYCLE0 FITS
</meta>
<column name="target_name" type="text"
description="Targeted observation"
utype="obscore:target.name" ucd="meta.id;meta.main"
verbLevel="15">
<property name="std">1</property>
</column>
<column name="s_ra"
ucd="pos.eq.ra;meta.main"
tablehead="RA"
description="Right Ascension"
verbLevel="10"/>
<column name="s_dec"
ucd="pos.eq.dec;meta.main"
tablehead="Dec"
description="Declination"
verbLevel="10"/>
</table>
<rowmaker id="build_fits">
<map dest="target_name">@OBJECT</map>
<map dest="s_dec">@CRVAL1</map>
<map dest="s_ra">@CRVAL2</map>
<apply procDef="//siap#computePGS"/>
<apply procDef="//siap#setMeta">
<bind name="title">@OBJECT</bind>
<bind name="instrument">"ALMA/CASA"</bind>
<bind name="dateObs">@DATE-OBS</bind>
</apply>
</rowmaker>
<data id="import_content">
<sources pattern="*.fits"/>
<fitsProdGrammar qnd="True">
<maxHeaderBlocks>326</maxHeaderBlocks>
<rowfilter procDef="__system__/products#define">
<bind key="table">"alma-fits.cycle0"</bind>
</rowfilter>
</fitsProdGrammar>
<register services="__system__/tap#run"/>
<make table="cycle0" rowmaker="build_fits"/>
</data>
<service id="siap-cycle0-fits" allowed="form,siap.xml">
<meta name="shortName">SIAP ALMA CYCLE0 FITS</meta>
<meta name="title">Sample Image Access for ALMA Cycle 0 Fits</meta>
<meta name="sia.type">pointed</meta>
<meta name="testQuery.pos.ra">0.8</meta>
<meta name="testQuery.size.ra">0.8</meta>
<publish render="siap.xml" sets="local,ivo_managed"/>
<publish render="form" sets="local"/>
<dbCore id="query_images" queriedTable="cycle0">
<condDesc original="//siap#protoInput"/>
<condDesc original="//siap#humanInput"/>
</dbCore>
</service>
<service id="scs-cycle0-fits" allowed="form,scs.xml">
<meta name="shortName">SCS ALMA CYCLE0 FITS</meta>
<meta name="title">Simple Cone Search for ALMA Cycle 0 Fits</meta>
<meta name="testQuery.ra">0.01</meta>
<meta name="testQuery.dec">0.01</meta>
<dbCore queriedTable="cycle0">
<condDesc original="//scs#humanInput"/>
<condDesc original="//scs#protoInput"/>
</dbCore>
<publish render="scs.xml" sets="local,ivo_managed"/>
<publish render="form" sets="local"/>
</service>
</resource>
| /inputs/cycle0fits/q.rd | no_license | paramire/LIRAE-DaCHS | R | false | false | 3,422 | rd | <?xml version="1.0" encoding="iso-8859-1"?>
<resource schema="cycle0fits">
<meta name="title">ALMA Cycle 0 FITS</meta>
<meta name="creationDate">2015-03-19T18:01:00Z</meta>
<meta name="description" format="plain">
This resource contains the fits files generated by the ALMA pipeline
for Cycle 0. Only public data is published here.
</meta>
<meta name="copyright">Free to use.</meta>
<meta name="creator.name">ALMA</meta>
<meta name="creator.logo">http://www.eso.org/public/archives/logos/screen/alma-logo.jpg</meta>
<meta name="subject">Millimiter/submillimiter Astronomy</meta>
<meta name="facility">ALMA OSF</meta>
<table id="cycle0" onDisk="True" adql="True">
<mixin
calibLevel="3"
collectionName="'ALMA CYCLE0 FITS'"
dec="s_dec"
ra="s_ra"
targetName="target_name"
productType="'cube, image'"
oUCD="'phot.flux.density.sb'"
facilityName="'ALMA'"
instrumentName="'ALMA'" >
//obscore#publishSIAP
</mixin>
<mixin>
//siap#pgs
</mixin>
<mixin>
//scs#q3cindex
</mixin>
<!--DESCRIPTION-->
<meta name="description">
ALMA CYCLE0 FITS
</meta>
<column name="target_name" type="text"
description="Targeted observation"
utype="obscore:target.name" ucd="meta.id;meta.main"
verbLevel="15">
<property name="std">1</property>
</column>
<column name="s_ra"
ucd="pos.eq.ra;meta.main"
tablehead="RA"
description="Right Ascension"
verbLevel="10"/>
<column name="s_dec"
ucd="pos.eq.dec;meta.main"
tablehead="Dec"
description="Declination"
verbLevel="10"/>
</table>
<rowmaker id="build_fits">
<map dest="target_name">@OBJECT</map>
<map dest="s_dec">@CRVAL1</map>
<map dest="s_ra">@CRVAL2</map>
<apply procDef="//siap#computePGS"/>
<apply procDef="//siap#setMeta">
<bind name="title">@OBJECT</bind>
<bind name="instrument">"ALMA/CASA"</bind>
<bind name="dateObs">@DATE-OBS</bind>
</apply>
</rowmaker>
<data id="import_content">
<sources pattern="*.fits"/>
<fitsProdGrammar qnd="True">
<maxHeaderBlocks>326</maxHeaderBlocks>
<rowfilter procDef="__system__/products#define">
<bind key="table">"alma-fits.cycle0"</bind>
</rowfilter>
</fitsProdGrammar>
<register services="__system__/tap#run"/>
<make table="cycle0" rowmaker="build_fits"/>
</data>
<service id="siap-cycle0-fits" allowed="form,siap.xml">
<meta name="shortName">SIAP ALMA CYCLE0 FITS</meta>
<meta name="title">Sample Image Access for ALMA Cycle 0 Fits</meta>
<meta name="sia.type">pointed</meta>
<meta name="testQuery.pos.ra">0.8</meta>
<meta name="testQuery.size.ra">0.8</meta>
<publish render="siap.xml" sets="local,ivo_managed"/>
<publish render="form" sets="local"/>
<dbCore id="query_images" queriedTable="cycle0">
<condDesc original="//siap#protoInput"/>
<condDesc original="//siap#humanInput"/>
</dbCore>
</service>
<service id="scs-cycle0-fits" allowed="form,scs.xml">
<meta name="shortName">SCS ALMA CYCLE0 FITS</meta>
<meta name="title">Simple Cone Search for ALMA Cycle 0 Fits</meta>
<meta name="testQuery.ra">0.01</meta>
<meta name="testQuery.dec">0.01</meta>
<dbCore queriedTable="cycle0">
<condDesc original="//scs#humanInput"/>
<condDesc original="//scs#protoInput"/>
</dbCore>
<publish render="scs.xml" sets="local,ivo_managed"/>
<publish render="form" sets="local"/>
</service>
</resource>
|
# install.packages('keras')
# install.packages('purrr')
# install.packages('functional')
library(MASS)
library(caret)
library(fGarch)
library(fitdistrplus)
library(pracma)
library(BBmisc)
library(functional)
library(dplyr)
library(keras)
library(lubridate)
library(tensorflow)
Sys.sleep(5)
install_tensorflow(restart_session = FALSE)
setwd("/home/jonghyeon3/extension_AD/evaluations/data")
fn<-list.files(getwd())
#data load and preprocess
{
input = data.frame(read.csv('lp10k-0.1-0.csv', header=T))
input = input[which(is.element(input$Case, unique(input$Case)[1001:2000])),]
normal= input[which(input$anomaly_type =="normal"),]
anomaly= input[which(input$anomaly_type !="normal"),]
normal_seq = aggregate(normal$Activity, by=list(normal$Case), FUN=paste0)
anomaly_seq = aggregate(anomaly$Activity, by=list(anomaly$Case), FUN=paste0)
delete_case= anomaly_seq[which(is.element(anomaly_seq$x , normal_seq$x)),'Group.1']
input = input[which(!is.element(input$Case, delete_case)),]
input$Event = 1:nrow(input)
input$Event = as.factor(input$Event)
one= rep(1, nrow(input))
input[,'start'] = ave(one, by= input$Case, FUN= cumsum) -1
input[which(input$start !=1),'start'] =0
}
####
#functions
{
fun_leverage = function(x){
A<- ginv(t(x)%*%x)
H_part1<- x%*%A
h_diag <- colSums(t(H_part1)*t(x))
return(h_diag)
}
fun_embedding = function(ActivityID, embedding_size){
model <- keras_model_sequential()
model %>% layer_embedding(input_dim = length(unique(ActivityID))+1, output_dim = embedding_size, input_length = 1, name="embedding") %>%
layer_flatten() %>%
layer_dense(units=40, activation = "relu") %>%
layer_dense(units=10, activation = "relu") %>%
layer_dense(units=1)
model %>% compile(loss = "mse", optimizer = "sgd", metric="accuracy")
layer <- get_layer(model, "embedding")
embeddings <- data.frame(layer$get_weights()[[1]])
embeddings$ActivityID <- c("none", levels(ActivityID) )
return(embeddings)
}
fun_onehot = function(data){
if(length(levels(data$ActivityID))>1){
a<- model.matrix(~ActivityID, data = data)
A<- as.numeric(data[,2])
A[which(A!=1)] <- 0
a<- cbind(ActivityID1 = A, a[,-1])
onehot<- as.data.frame(a)
}else{
A<- as.numeric(data[,2])
A[which(A!=1)] <- 0
a<- cbind(ActivityID1 = A)
onehot<- as.data.frame(a)
}
return(onehot)
}
fun_batch_remove_TRUE = function(input, Min, start_index, Max, until, embedding_size_p, remove_threshold ){}
fun_batch_remove_FALSE = function(input, Min, Max, until, embedding_size_p ){
#prepare data
pre<-input
pre= pre[ with(pre, order(Case,timestamp)),]
one= rep(1, nrow(pre))
pre[,'start'] = ave(one, by= pre$Case, FUN= cumsum) -1
pre[which(pre$start !=1),'start'] =0
pre= pre[ with(pre, order(timestamp)),]
pre[,'Event'] = as.factor(1:nrow(pre))
pre[,'num_case'] = cumsum(pre$start)
pre[,'leverage'] = rep(-1, nrow(pre))
pre[,'t1'] = rep(0, nrow(pre))
pre[,'t2'] = rep(0, nrow(pre))
pre[,'t3'] = rep(0, nrow(pre))
pre[,'tn']= rep(0, nrow(pre))
pre[,'time'] = rep(0, nrow(pre))
event_num = nrow(pre)
case_num= length(unique(pre$Case))
start_index = which(pre$num_case == Min +1)[1]
last_index = nrow(pre)
leverage_start <- Sys.time()
pre2 = pre[1:start_index,]
cur_len = sum(pre2$start)
data<- pre2[,c("Case","Activity","order")]
names(data)[1:2] <- c("ID", "ActivityID")
#basic: Max should be larger than Min or equal
if(Max< (Min+1)){
Max=Min+1
}
# Max option
if(cur_len > Max ){
del_case = pre[which(pre$start==1),'Case'][1:(cur_len-Max)]
pre = pre[which(!is.element(pre$Case, del_case)),]
pre[,'num_case'] = cumsum(pre$start)
event_num = nrow(pre)
case_num= length(unique(pre$Case))
last_index = nrow(pre)
pre2 = pre2[which(!is.element(pre2$Case, del_case)),]
data<- pre2[,c("Case","Activity","order")]
names(data)[1:2] <- c("ID", "ActivityID")
cur_len = sum(pre2$start)
start_index = nrow(pre2)
last_index = nrow(pre)
}
if(start_index == last_index){
#skip
}else{
if(embedding_size_p>0){
num_act= length(unique(data$ActivityID))
embedding_size = round(num_act*embedding_size_p)
# deep embedding encoding
embeddings = fun_embedding(as.factor(data$ActivityID), embedding_size)
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
n= length(unique(data[,1]))
m = max(table(data[,1]))
data$order = as.character(data$order)
data$ID = as.character(data$ID)
all3 = merge(data, embeddings, by='ActivityID', all.x=T)
all3= all3[ with(all3, order(ID, order)),]
all3 = all3[,c("ID","ActivityID",names(all3)[(ncol(all3)-embedding_size+1):ncol(all3)])]
num_event = nrow(all3)
max<- m*(embedding_size)
c=unique(pre2[,c("Case","anomaly_type")]) #CHANGE
label = as.character(c[,2])
# prefix encoding
prefixL = as.numeric()
newdat2<- matrix(NA, nrow=num_event , ncol=max)
for(j in 1:num_event){
cut = all3[which(all3[1:j,1]== all3[j,1] ),-c(1:2)]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3 = data.frame(cbind(Case=as.character(all3[,1]), label= as.character(pre2$anomaly_type), newdat2))
x2= newdat3[which(prefixL == prefixL[start_index]),-(1:2)]
x2 = x2[,1:(prefixL[start_index]*embedding_size)]
}else{
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
# One-hot encoding
data1 <- fun_onehot(data)
newdat <- cbind(data[,1], data1)
newdat[,1] <- as.factor(newdat[,1])
n<- length(levels((newdat[,1]))) # the number of cases
m<-max(table((newdat[,1]))) # maximum trace length
num_act= ncol(newdat)-1
num_event = nrow(newdat)
max<- m*num_act
c=unique(pre2[,c("Case","anomaly_type")])
# prefix encoding
prefixL = as.numeric()
newdat2<- matrix(NA, nrow=num_event , ncol=max)
for(j in 1:num_event){
cut = newdat[which(newdat[1:j,1]== newdat[j,1] ),-1]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
act_save = names(newdat) #change 1
newdat3 = data.frame(cbind(Case=as.character(newdat[,1]), label= as.character(pre2$anomaly_type), newdat2))
x2= newdat3[which(prefixL == prefixL[start_index]),-(1:2)]
x2 = x2[,1:(prefixL[start_index]*num_act)]
}
#Caculate leverage
x= as.matrix(sapply(x2, as.numeric))
h_diag <- fun_leverage(x)
pre[start_index, 'leverage'] = h_diag[length(h_diag)]
leverage_end <- Sys.time()
pre[start_index, 'time'] = (leverage_end-leverage_start)
pre[start_index, 'tn'] = (h_diag[length(h_diag)] > (mean(h_diag)+sd(h_diag)))
#Set escape option
if(until==0 | start_index+until>last_index){
until = last_index
}else{
until= start_index+until
}
#Start event steam
for(i in (start_index+1):until){ # last_index
print(paste("Start to calculate leverage score of ", i ,"-th event (total ",event_num," events)" ,sep=''))
leverage_start <- Sys.time()
pre2 = rbind(pre2, pre[i,])
cur_len = sum(pre2$start)
data<- pre2[,c("Case","Activity",'order')]
names(data)[1:2] <- c("ID", "ActivityID")
# Max option
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
if(cur_len > Max ){
del_case = pre2[which(pre2$start==1),'Case']
del_case = del_case[1:(cur_len-Max)]
del_case= del_case[which(!is.element(del_case, object_case))]
data = data[which(!is.element(data[,1], del_case)),]
pre3= pre2[which(!is.element(pre2[,1], del_case)),]
label = as.character(pre3[,c("anomaly_type")])
}else{
label = as.character(pre2[,c("anomaly_type")])
}
if(embedding_size_p>0){
num_act= length(unique(data$ActivityID))
embedding_size = round(num_act*embedding_size_p)
# embedding encoding
embeddings = fun_embedding( as.factor(data$ActivityID), embedding_size)
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
n= length(unique(data[,1]))
m = max(table(data[,1]))
data$order = as.character(data$order)
data$ID = as.character(data$ID)
all3 = merge(data, embeddings, by='ActivityID', all.x=T)
all3= all3[ with(all3, order(ID, order)),]
all3 = all3[,c("ID","ActivityID",names(all3)[(ncol(all3)-embedding_size+1):ncol(all3)])]
num_event = nrow(all3)
max<- m*(embedding_size)
c=unique(pre2[,c("Case","anomaly_type")]) #CHANGE
label = as.character(c[,2])
{ # update event
newdat2<- matrix(NA, nrow=num_event , ncol=max)
prefixL = as.numeric()
for(j in 1:num_event){
cut = all3[which(all3[1:j,1]== all3[j,1] ),-c(1:2)]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
}
# Max option
if(cur_len > Max ){
del_case = pre2[which(pre2$start==1),'Case'][1:(cur_len-Max)]
del_case= del_case[which(!is.element(del_case, object_case))]
pre2 = pre2[which(!is.element(all3[,1], del_case)),]
newdat2 = newdat2[which(!is.element(all3[,1], del_case)),]
label= label[which(!is.element(all3[,1], del_case))]
prefixL= prefixL[which(!is.element(all3[,1], del_case))]
all3 = all3[which(!is.element(all3[,1], del_case)),]
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3 <-data.frame(cbind(Case= as.character(all3[,1]), label= label, newdat2))
x2= newdat3[which(prefixL == prefixL[length(prefixL)]),-(1:2)]
x2 = x2[,1:(prefixL[length(prefixL)]*embedding_size)]
}else{
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
# One-hot encoding
data1 <- fun_onehot(data)
newdat <- cbind(data[,1], data1)
newdat[,1] <- as.factor(newdat[,1])
n<- length(levels((newdat[,1]))) # the number of cases
m<-max(table((newdat[,1]))) # maximum trace length
num_act= ncol(newdat)-1
num_event = nrow(newdat)
max<- m*num_act
newdat2<- matrix(NA, nrow=num_event , ncol=max)
prefixL = as.numeric()
for(j in 1:num_event){
cut = newdat[which(newdat[1:j,1]== newdat[j,1] ),-1]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
act_save = names(newdat) #change 1
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3 <-data.frame(cbind(Case= as.character(newdat[,1]), label= label, newdat2))
x2= newdat3[which(prefixL == prefixL[length(prefixL)]),-(1:2)]
x2 = x2[,1:(prefixL[length(prefixL)]*num_act)]
}
#Calculate leverage
x= as.matrix(sapply(x2, as.numeric))
h_diag <- fun_leverage(x)
pre[i, 'leverage'] = h_diag[length(h_diag)]
leverage_end <- Sys.time()
print(paste("Anomaly score of", i ,"-th event = ", round( h_diag[length(h_diag)],5), " (CaseID=",object_case,")" ,sep=''))
pre[i, 'time'] = (leverage_end-leverage_start)
pre[i, 'tn'] = (h_diag[length(h_diag)] > (mean(h_diag)+sd(h_diag)))
}
return(pre)
}
}
fun_remove_TRUE = function(input, Min,start_index, Max, until,embedding_size_p, remove_threshold ){}
fun_remove_FALSE = function(input, Min, start_index, Max, until, embedding_size_p){}
streaming_score = function(input, Min = 100, start_index = start_index, Max = 0, until=0, batch = TRUE ,embedding_size_p = 0, remove=TRUE, remove_threshold = 0.2){
total_start <- Sys.time()
if(remove==TRUE){
if(batch==TRUE){ #
pre=fun_batch_remove_TRUE(input=input, Min=Min, Max=Max, until=until, embedding_size_p=embedding_size_p, remove_threshold=remove_threshold )
}else{
pre=fun_remove_TRUE(input=input, Min=Min, Max=Max, until=until, embedding_size_p=embedding_size_p, remove_threshold=remove_threshold )
}
}else{
if(batch==TRUE){
pre=fun_batch_remove_FALSE(input=input, Min=Min, Max=Max, until=until, embedding_size_p=embedding_size_p )
}else{
pre=fun_remove_FALSE(input=input, Min=Min, Max=Max, until=until, embedding_size_p=embedding_size_p )
}
}
total_end <- Sys.time()
print(total_end - total_start)
return(pre)
}
}
#Result
{
output = streaming_score(input, Min=100, Max=100, until = 0, batch=TRUE, remove= FALSE, embedding_size_p=0) # onehot
setwd("~/extension_AD/evaluations/total_result/concept_drift_result")
write.csv(output, "result_model2_lp10k_100_late.csv", row.names= FALSE)
}
# plot(see$leverage, ylim= c(0,1),
# col= ifelse(see$label==1 ,'red', 'black' ), cex= ifelse(see$label==1 ,1.0, 0.5), pch= ifelse(see$label==1 ,9, 1)
# , ylab= 'Anomaly score')
#
# plot(see2$leverage, ylim= c(0,1),
# col= ifelse(see2$label==1 ,'red', 'black' ), cex= ifelse(see2$label==1 ,1.0, 0.5), pch= ifelse(see2$label==1 ,9, 1)
# , ylab= 'Anomaly score')
| /concept_drift_code/lp10k_100_late.R | no_license | paai-lab/Online-Anomaly-Detection-Extension-2021 | R | false | false | 14,760 | r | # install.packages('keras')
# install.packages('purrr')
# install.packages('functional')
library(MASS)
library(caret)
library(fGarch)
library(fitdistrplus)
library(pracma)
library(BBmisc)
library(functional)
library(dplyr)
library(keras)
library(lubridate)
library(tensorflow)
Sys.sleep(5)
install_tensorflow(restart_session = FALSE)
setwd("/home/jonghyeon3/extension_AD/evaluations/data")
fn<-list.files(getwd())
#data load and preprocess
{
input = data.frame(read.csv('lp10k-0.1-0.csv', header=T))
input = input[which(is.element(input$Case, unique(input$Case)[1001:2000])),]
normal= input[which(input$anomaly_type =="normal"),]
anomaly= input[which(input$anomaly_type !="normal"),]
normal_seq = aggregate(normal$Activity, by=list(normal$Case), FUN=paste0)
anomaly_seq = aggregate(anomaly$Activity, by=list(anomaly$Case), FUN=paste0)
delete_case= anomaly_seq[which(is.element(anomaly_seq$x , normal_seq$x)),'Group.1']
input = input[which(!is.element(input$Case, delete_case)),]
input$Event = 1:nrow(input)
input$Event = as.factor(input$Event)
one= rep(1, nrow(input))
input[,'start'] = ave(one, by= input$Case, FUN= cumsum) -1
input[which(input$start !=1),'start'] =0
}
####
#functions
{
fun_leverage = function(x){
A<- ginv(t(x)%*%x)
H_part1<- x%*%A
h_diag <- colSums(t(H_part1)*t(x))
return(h_diag)
}
fun_embedding = function(ActivityID, embedding_size){
model <- keras_model_sequential()
model %>% layer_embedding(input_dim = length(unique(ActivityID))+1, output_dim = embedding_size, input_length = 1, name="embedding") %>%
layer_flatten() %>%
layer_dense(units=40, activation = "relu") %>%
layer_dense(units=10, activation = "relu") %>%
layer_dense(units=1)
model %>% compile(loss = "mse", optimizer = "sgd", metric="accuracy")
layer <- get_layer(model, "embedding")
embeddings <- data.frame(layer$get_weights()[[1]])
embeddings$ActivityID <- c("none", levels(ActivityID) )
return(embeddings)
}
fun_onehot = function(data){
if(length(levels(data$ActivityID))>1){
a<- model.matrix(~ActivityID, data = data)
A<- as.numeric(data[,2])
A[which(A!=1)] <- 0
a<- cbind(ActivityID1 = A, a[,-1])
onehot<- as.data.frame(a)
}else{
A<- as.numeric(data[,2])
A[which(A!=1)] <- 0
a<- cbind(ActivityID1 = A)
onehot<- as.data.frame(a)
}
return(onehot)
}
fun_batch_remove_TRUE = function(input, Min, start_index, Max, until, embedding_size_p, remove_threshold ){}
fun_batch_remove_FALSE = function(input, Min, Max, until, embedding_size_p ){
#prepare data
pre<-input
pre= pre[ with(pre, order(Case,timestamp)),]
one= rep(1, nrow(pre))
pre[,'start'] = ave(one, by= pre$Case, FUN= cumsum) -1
pre[which(pre$start !=1),'start'] =0
pre= pre[ with(pre, order(timestamp)),]
pre[,'Event'] = as.factor(1:nrow(pre))
pre[,'num_case'] = cumsum(pre$start)
pre[,'leverage'] = rep(-1, nrow(pre))
pre[,'t1'] = rep(0, nrow(pre))
pre[,'t2'] = rep(0, nrow(pre))
pre[,'t3'] = rep(0, nrow(pre))
pre[,'tn']= rep(0, nrow(pre))
pre[,'time'] = rep(0, nrow(pre))
event_num = nrow(pre)
case_num= length(unique(pre$Case))
start_index = which(pre$num_case == Min +1)[1]
last_index = nrow(pre)
leverage_start <- Sys.time()
pre2 = pre[1:start_index,]
cur_len = sum(pre2$start)
data<- pre2[,c("Case","Activity","order")]
names(data)[1:2] <- c("ID", "ActivityID")
#basic: Max should be larger than Min or equal
if(Max< (Min+1)){
Max=Min+1
}
# Max option
if(cur_len > Max ){
del_case = pre[which(pre$start==1),'Case'][1:(cur_len-Max)]
pre = pre[which(!is.element(pre$Case, del_case)),]
pre[,'num_case'] = cumsum(pre$start)
event_num = nrow(pre)
case_num= length(unique(pre$Case))
last_index = nrow(pre)
pre2 = pre2[which(!is.element(pre2$Case, del_case)),]
data<- pre2[,c("Case","Activity","order")]
names(data)[1:2] <- c("ID", "ActivityID")
cur_len = sum(pre2$start)
start_index = nrow(pre2)
last_index = nrow(pre)
}
if(start_index == last_index){
#skip
}else{
if(embedding_size_p>0){
num_act= length(unique(data$ActivityID))
embedding_size = round(num_act*embedding_size_p)
# deep embedding encoding
embeddings = fun_embedding(as.factor(data$ActivityID), embedding_size)
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
n= length(unique(data[,1]))
m = max(table(data[,1]))
data$order = as.character(data$order)
data$ID = as.character(data$ID)
all3 = merge(data, embeddings, by='ActivityID', all.x=T)
all3= all3[ with(all3, order(ID, order)),]
all3 = all3[,c("ID","ActivityID",names(all3)[(ncol(all3)-embedding_size+1):ncol(all3)])]
num_event = nrow(all3)
max<- m*(embedding_size)
c=unique(pre2[,c("Case","anomaly_type")]) #CHANGE
label = as.character(c[,2])
# prefix encoding
prefixL = as.numeric()
newdat2<- matrix(NA, nrow=num_event , ncol=max)
for(j in 1:num_event){
cut = all3[which(all3[1:j,1]== all3[j,1] ),-c(1:2)]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3 = data.frame(cbind(Case=as.character(all3[,1]), label= as.character(pre2$anomaly_type), newdat2))
x2= newdat3[which(prefixL == prefixL[start_index]),-(1:2)]
x2 = x2[,1:(prefixL[start_index]*embedding_size)]
}else{
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
# One-hot encoding
data1 <- fun_onehot(data)
newdat <- cbind(data[,1], data1)
newdat[,1] <- as.factor(newdat[,1])
n<- length(levels((newdat[,1]))) # the number of cases
m<-max(table((newdat[,1]))) # maximum trace length
num_act= ncol(newdat)-1
num_event = nrow(newdat)
max<- m*num_act
c=unique(pre2[,c("Case","anomaly_type")])
# prefix encoding
prefixL = as.numeric()
newdat2<- matrix(NA, nrow=num_event , ncol=max)
for(j in 1:num_event){
cut = newdat[which(newdat[1:j,1]== newdat[j,1] ),-1]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
act_save = names(newdat) #change 1
newdat3 = data.frame(cbind(Case=as.character(newdat[,1]), label= as.character(pre2$anomaly_type), newdat2))
x2= newdat3[which(prefixL == prefixL[start_index]),-(1:2)]
x2 = x2[,1:(prefixL[start_index]*num_act)]
}
#Caculate leverage
x= as.matrix(sapply(x2, as.numeric))
h_diag <- fun_leverage(x)
pre[start_index, 'leverage'] = h_diag[length(h_diag)]
leverage_end <- Sys.time()
pre[start_index, 'time'] = (leverage_end-leverage_start)
pre[start_index, 'tn'] = (h_diag[length(h_diag)] > (mean(h_diag)+sd(h_diag)))
#Set escape option
if(until==0 | start_index+until>last_index){
until = last_index
}else{
until= start_index+until
}
#Start event steam
for(i in (start_index+1):until){ # last_index
print(paste("Start to calculate leverage score of ", i ,"-th event (total ",event_num," events)" ,sep=''))
leverage_start <- Sys.time()
pre2 = rbind(pre2, pre[i,])
cur_len = sum(pre2$start)
data<- pre2[,c("Case","Activity",'order')]
names(data)[1:2] <- c("ID", "ActivityID")
# Max option
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
if(cur_len > Max ){
del_case = pre2[which(pre2$start==1),'Case']
del_case = del_case[1:(cur_len-Max)]
del_case= del_case[which(!is.element(del_case, object_case))]
data = data[which(!is.element(data[,1], del_case)),]
pre3= pre2[which(!is.element(pre2[,1], del_case)),]
label = as.character(pre3[,c("anomaly_type")])
}else{
label = as.character(pre2[,c("anomaly_type")])
}
if(embedding_size_p>0){
num_act= length(unique(data$ActivityID))
embedding_size = round(num_act*embedding_size_p)
# embedding encoding
embeddings = fun_embedding( as.factor(data$ActivityID), embedding_size)
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
n= length(unique(data[,1]))
m = max(table(data[,1]))
data$order = as.character(data$order)
data$ID = as.character(data$ID)
all3 = merge(data, embeddings, by='ActivityID', all.x=T)
all3= all3[ with(all3, order(ID, order)),]
all3 = all3[,c("ID","ActivityID",names(all3)[(ncol(all3)-embedding_size+1):ncol(all3)])]
num_event = nrow(all3)
max<- m*(embedding_size)
c=unique(pre2[,c("Case","anomaly_type")]) #CHANGE
label = as.character(c[,2])
{ # update event
newdat2<- matrix(NA, nrow=num_event , ncol=max)
prefixL = as.numeric()
for(j in 1:num_event){
cut = all3[which(all3[1:j,1]== all3[j,1] ),-c(1:2)]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
}
# Max option
if(cur_len > Max ){
del_case = pre2[which(pre2$start==1),'Case'][1:(cur_len-Max)]
del_case= del_case[which(!is.element(del_case, object_case))]
pre2 = pre2[which(!is.element(all3[,1], del_case)),]
newdat2 = newdat2[which(!is.element(all3[,1], del_case)),]
label= label[which(!is.element(all3[,1], del_case))]
prefixL= prefixL[which(!is.element(all3[,1], del_case))]
all3 = all3[which(!is.element(all3[,1], del_case)),]
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3 <-data.frame(cbind(Case= as.character(all3[,1]), label= label, newdat2))
x2= newdat3[which(prefixL == prefixL[length(prefixL)]),-(1:2)]
x2 = x2[,1:(prefixL[length(prefixL)]*embedding_size)]
}else{
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
# One-hot encoding
data1 <- fun_onehot(data)
newdat <- cbind(data[,1], data1)
newdat[,1] <- as.factor(newdat[,1])
n<- length(levels((newdat[,1]))) # the number of cases
m<-max(table((newdat[,1]))) # maximum trace length
num_act= ncol(newdat)-1
num_event = nrow(newdat)
max<- m*num_act
newdat2<- matrix(NA, nrow=num_event , ncol=max)
prefixL = as.numeric()
for(j in 1:num_event){
cut = newdat[which(newdat[1:j,1]== newdat[j,1] ),-1]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
act_save = names(newdat) #change 1
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3 <-data.frame(cbind(Case= as.character(newdat[,1]), label= label, newdat2))
x2= newdat3[which(prefixL == prefixL[length(prefixL)]),-(1:2)]
x2 = x2[,1:(prefixL[length(prefixL)]*num_act)]
}
#Calculate leverage
x= as.matrix(sapply(x2, as.numeric))
h_diag <- fun_leverage(x)
pre[i, 'leverage'] = h_diag[length(h_diag)]
leverage_end <- Sys.time()
print(paste("Anomaly score of", i ,"-th event = ", round( h_diag[length(h_diag)],5), " (CaseID=",object_case,")" ,sep=''))
pre[i, 'time'] = (leverage_end-leverage_start)
pre[i, 'tn'] = (h_diag[length(h_diag)] > (mean(h_diag)+sd(h_diag)))
}
return(pre)
}
}
fun_remove_TRUE = function(input, Min,start_index, Max, until,embedding_size_p, remove_threshold ){}
fun_remove_FALSE = function(input, Min, start_index, Max, until, embedding_size_p){}
streaming_score = function(input, Min = 100, start_index = start_index, Max = 0, until=0, batch = TRUE ,embedding_size_p = 0, remove=TRUE, remove_threshold = 0.2){
total_start <- Sys.time()
if(remove==TRUE){
if(batch==TRUE){ #
pre=fun_batch_remove_TRUE(input=input, Min=Min, Max=Max, until=until, embedding_size_p=embedding_size_p, remove_threshold=remove_threshold )
}else{
pre=fun_remove_TRUE(input=input, Min=Min, Max=Max, until=until, embedding_size_p=embedding_size_p, remove_threshold=remove_threshold )
}
}else{
if(batch==TRUE){
pre=fun_batch_remove_FALSE(input=input, Min=Min, Max=Max, until=until, embedding_size_p=embedding_size_p )
}else{
pre=fun_remove_FALSE(input=input, Min=Min, Max=Max, until=until, embedding_size_p=embedding_size_p )
}
}
total_end <- Sys.time()
print(total_end - total_start)
return(pre)
}
}
#Result
{
output = streaming_score(input, Min=100, Max=100, until = 0, batch=TRUE, remove= FALSE, embedding_size_p=0) # onehot
setwd("~/extension_AD/evaluations/total_result/concept_drift_result")
write.csv(output, "result_model2_lp10k_100_late.csv", row.names= FALSE)
}
# plot(see$leverage, ylim= c(0,1),
# col= ifelse(see$label==1 ,'red', 'black' ), cex= ifelse(see$label==1 ,1.0, 0.5), pch= ifelse(see$label==1 ,9, 1)
# , ylab= 'Anomaly score')
#
# plot(see2$leverage, ylim= c(0,1),
# col= ifelse(see2$label==1 ,'red', 'black' ), cex= ifelse(see2$label==1 ,1.0, 0.5), pch= ifelse(see2$label==1 ,9, 1)
# , ylab= 'Anomaly score')
|
#probhat: Multivariate Generalized Kernel Smoothing and Related Statistical Methods
#Copyright (C), Abby Spurdle, 2019 to 2021
#This program is distributed without any warranty.
#This program is free software.
#You can modify it and/or redistribute it, under the terms of:
#The GNU General Public License, version 2, or (at your option) any later version.
#You should have received a copy of this license, with R.
#Also, this license should be available at:
#https://cran.r-project.org/web/licenses/GPL-2
.pdfuv.cks.eval = function (x)
{ . = .THAT ()
x = .val.u.uv (x, .$.any.trunc, .$.is.trunc, .$XLIM)
if (.$is.spline)
.$spline.function (x)
else
{ data = .select.bdata (.$.any.trunc, .$trtype, .$data, .$.xpnd)
y = .iterate.uv (.pdfuv.cks.eval.scalar, .$.internal.isw, .$kernel@f, .$bw, data$n, data$x, .$.internalw, u=x)
.scale.val (y, .$trtype, .$.any.trunc, .$.scalef)
}
}
.cdfuv.cks.eval = function (x, ...)
{ . = .THAT ()
x = .val.u.uv (x, .$.any.trunc, .$.is.trunc, .$XLIM)
if (.$is.spline)
.$spline.function (x)
else
{ data = .select.bdata (.$.any.trunc, .$trtype, .$data, .$.xpnd)
y = .iterate.uv (.cdfuv.cks.eval.scalar, .$.internal.isw, .$kernel@F, .$bw,
data$n, data$x, .$.internalw, .$.low, .$.constv, u=x)
if (.$trtype != "local" && .$.any.trunc.lower)
y = y - .$.const.cdf.lower
.scale.val (y, .$trtype, .$.any.trunc, .$.scalef)
}
}
.qfuv.cks.eval = function (p)
{ . = .THAT ()
.test.y.ok (p)
.$spline.function (p)
}
.pdfmv.cks.eval = function (x)
{ . = .THAT ()
x = .val.u.mv (.$m, x, .$.any.trunc, .$.is.trunc, .$XLIM)
.iterate.mv (.pdfmv.cks.eval.scalar, .$.internal.isw, .$kernel@f, .$m, .$bw,
.$data$n, .$data$x, .$.internalw, u=x)
}
.cdfmv.cks.eval = function (x, ...)
{ . = .THAT ()
x = .val.u.mv (.$m, x, .$.any.trunc, .$.is.trunc, .$XLIM)
.iterate.mv (.cdfmv.cks.eval.scalar, .$.internal.isw, .$kernel@F, .$m, .$bw,
.$data$n, .$data$x, .$.internalw, .$.low, .$.constv, u=x)
}
.pdfc.cks.eval = function (x)
{ . = .THAT ()
x = .val.u.uv (x, .$.any.trunc, .$.is.trunc [.$m,], .$XLIM [.$m,])
if (.$is.spline)
.$spline.function (x)
else
{ .iterate.uv (.pdfc.cks.eval.scalar, .$.constant, .$.v, .$M, .$ncon, .$.internal.isw, .$kernel@f, .$bw,
.$data$n, .$data$x, .$.internalw, u=x)
}
}
.cdfc.cks.eval = function (x)
{ . = .THAT ()
x = .val.u.uv (x, .$.any.trunc, .$.is.trunc [.$m,], .$XLIM [.$m,])
if (.$is.spline)
.$spline.function (x)
else
{ .iterate.uv (.cdfc.cks.eval.scalar, .$.constant, .$.v, .$M, .$ncon, .$.internal.isw, .$kernel@F, .$bw,
.$data$n, .$data$x, .$.internalw, .$.low, .$.constv, u=x)
}
}
.qfc.cks.eval = function (p)
{ . = .THAT ()
.test.y.ok (p)
.$spline.function (p)
}
.pdfmvc.cks.eval = function (x)
{ . = .THAT ()
J = (.$ncon + 1):(.$m)
x = .val.u.mv (.$M, x, .$.any.trunc, .$.is.trunc [J,, drop=FALSE], .$XLIM [J,, drop=FALSE])
.iterate.mv (.pdfc.cks.eval.scalar, .$.constant, .$.v, .$M, .$ncon, .$.internal.isw, .$kernel@f, .$bw,
.$data$n, .$data$x, .$.internalw, u=x)
}
.cdfmvc.cks.eval = function (x)
{ . = .THAT ()
J = (.$ncon + 1):(.$m)
x = .val.u.mv (.$M, x, .$.any.trunc, .$.is.trunc [J,, drop=FALSE], .$XLIM [J,, drop=FALSE])
.iterate.mv (.cdfc.cks.eval.scalar, .$.constant, .$.v, .$M, .$ncon, .$.internal.isw, .$kernel@F, .$bw,
.$data$n, .$data$x, .$.internalw, .$.low, .$.constv, u=x)
}
.chqf.cks.eval = function (p)
{ this.f = .THIS ()
p = .val.u.mv (this.f %$% "m", p)
x = .chqf.cks.eval.ext (this.f, p)
colnames (x) = this.f %$% "xnames"
x
}
.cdfc4chqf.cks.eval = function (x)
{ . = .THAT ()
data = .$data
.iterate.uv (.cdfc4chqf.cks.eval.scalar, .$ncon, .$is.weighted, .$conditions, .$kernel@f, .$kernel@F, .$bw, data$n, data$x, data$w, u=x)
}
.qfc4chqf.cks.eval = function (p)
{ . = .THAT ()
.$spline.function (p)
}
.scale.val = function (y, trtype, trunc, k)
{ if (trunc && (trtype != "local") ) k * y
else y
}
.select.bdata = function (trunc, trtype, data, xpnd)
{ if (trunc && trtype == "reflect") xpnd
else data
}
| /R/ph_cks_eval1.r | no_license | cran/probhat | R | false | false | 4,109 | r | #probhat: Multivariate Generalized Kernel Smoothing and Related Statistical Methods
#Copyright (C), Abby Spurdle, 2019 to 2021
#This program is distributed without any warranty.
#This program is free software.
#You can modify it and/or redistribute it, under the terms of:
#The GNU General Public License, version 2, or (at your option) any later version.
#You should have received a copy of this license, with R.
#Also, this license should be available at:
#https://cran.r-project.org/web/licenses/GPL-2
.pdfuv.cks.eval = function (x)
{ . = .THAT ()
x = .val.u.uv (x, .$.any.trunc, .$.is.trunc, .$XLIM)
if (.$is.spline)
.$spline.function (x)
else
{ data = .select.bdata (.$.any.trunc, .$trtype, .$data, .$.xpnd)
y = .iterate.uv (.pdfuv.cks.eval.scalar, .$.internal.isw, .$kernel@f, .$bw, data$n, data$x, .$.internalw, u=x)
.scale.val (y, .$trtype, .$.any.trunc, .$.scalef)
}
}
.cdfuv.cks.eval = function (x, ...)
{ . = .THAT ()
x = .val.u.uv (x, .$.any.trunc, .$.is.trunc, .$XLIM)
if (.$is.spline)
.$spline.function (x)
else
{ data = .select.bdata (.$.any.trunc, .$trtype, .$data, .$.xpnd)
y = .iterate.uv (.cdfuv.cks.eval.scalar, .$.internal.isw, .$kernel@F, .$bw,
data$n, data$x, .$.internalw, .$.low, .$.constv, u=x)
if (.$trtype != "local" && .$.any.trunc.lower)
y = y - .$.const.cdf.lower
.scale.val (y, .$trtype, .$.any.trunc, .$.scalef)
}
}
.qfuv.cks.eval = function (p)
{ . = .THAT ()
.test.y.ok (p)
.$spline.function (p)
}
.pdfmv.cks.eval = function (x)
{ . = .THAT ()
x = .val.u.mv (.$m, x, .$.any.trunc, .$.is.trunc, .$XLIM)
.iterate.mv (.pdfmv.cks.eval.scalar, .$.internal.isw, .$kernel@f, .$m, .$bw,
.$data$n, .$data$x, .$.internalw, u=x)
}
.cdfmv.cks.eval = function (x, ...)
{ . = .THAT ()
x = .val.u.mv (.$m, x, .$.any.trunc, .$.is.trunc, .$XLIM)
.iterate.mv (.cdfmv.cks.eval.scalar, .$.internal.isw, .$kernel@F, .$m, .$bw,
.$data$n, .$data$x, .$.internalw, .$.low, .$.constv, u=x)
}
.pdfc.cks.eval = function (x)
{ . = .THAT ()
x = .val.u.uv (x, .$.any.trunc, .$.is.trunc [.$m,], .$XLIM [.$m,])
if (.$is.spline)
.$spline.function (x)
else
{ .iterate.uv (.pdfc.cks.eval.scalar, .$.constant, .$.v, .$M, .$ncon, .$.internal.isw, .$kernel@f, .$bw,
.$data$n, .$data$x, .$.internalw, u=x)
}
}
.cdfc.cks.eval = function (x)
{ . = .THAT ()
x = .val.u.uv (x, .$.any.trunc, .$.is.trunc [.$m,], .$XLIM [.$m,])
if (.$is.spline)
.$spline.function (x)
else
{ .iterate.uv (.cdfc.cks.eval.scalar, .$.constant, .$.v, .$M, .$ncon, .$.internal.isw, .$kernel@F, .$bw,
.$data$n, .$data$x, .$.internalw, .$.low, .$.constv, u=x)
}
}
.qfc.cks.eval = function (p)
{ . = .THAT ()
.test.y.ok (p)
.$spline.function (p)
}
.pdfmvc.cks.eval = function (x)
{ . = .THAT ()
J = (.$ncon + 1):(.$m)
x = .val.u.mv (.$M, x, .$.any.trunc, .$.is.trunc [J,, drop=FALSE], .$XLIM [J,, drop=FALSE])
.iterate.mv (.pdfc.cks.eval.scalar, .$.constant, .$.v, .$M, .$ncon, .$.internal.isw, .$kernel@f, .$bw,
.$data$n, .$data$x, .$.internalw, u=x)
}
.cdfmvc.cks.eval = function (x)
{ . = .THAT ()
J = (.$ncon + 1):(.$m)
x = .val.u.mv (.$M, x, .$.any.trunc, .$.is.trunc [J,, drop=FALSE], .$XLIM [J,, drop=FALSE])
.iterate.mv (.cdfc.cks.eval.scalar, .$.constant, .$.v, .$M, .$ncon, .$.internal.isw, .$kernel@F, .$bw,
.$data$n, .$data$x, .$.internalw, .$.low, .$.constv, u=x)
}
.chqf.cks.eval = function (p)
{ this.f = .THIS ()
p = .val.u.mv (this.f %$% "m", p)
x = .chqf.cks.eval.ext (this.f, p)
colnames (x) = this.f %$% "xnames"
x
}
.cdfc4chqf.cks.eval = function (x)
{ . = .THAT ()
data = .$data
.iterate.uv (.cdfc4chqf.cks.eval.scalar, .$ncon, .$is.weighted, .$conditions, .$kernel@f, .$kernel@F, .$bw, data$n, data$x, data$w, u=x)
}
.qfc4chqf.cks.eval = function (p)
{ . = .THAT ()
.$spline.function (p)
}
.scale.val = function (y, trtype, trunc, k)
{ if (trunc && (trtype != "local") ) k * y
else y
}
.select.bdata = function (trunc, trtype, data, xpnd)
{ if (trunc && trtype == "reflect") xpnd
else data
}
|
#' Is an object an expression?
#'
#' @description
#'
#' `is_expression()` tests for expressions, the set of objects that can be
#' obtained from parsing R code. An expression can be one of two
#' things: either a symbolic object (for which `is_symbolic()` returns
#' `TRUE`), or a syntactic literal (testable with
#' `is_syntactic_literal()`). Technically, calls can contain any R
#' object, not necessarily symbolic objects or syntactic
#' literals. However, this only happens in artificial
#' situations. Expressions as we define them only contain numbers,
#' strings, `NULL`, symbols, and calls: this is the complete set of R
#' objects that can be created when R parses source code (e.g. from
#' using [parse_expr()]).
#'
#' Note that we are using the term expression in its colloquial sense
#' and not to refer to [expression()] vectors, a data type that wraps
#' expressions in a vector and which isn't used much in modern R code.
#'
#' @details
#'
#' `is_symbolic()` returns `TRUE` for symbols and calls (objects with
#' type `language`). Symbolic objects are replaced by their value
#' during evaluation. Literals are the complement of symbolic
#' objects. They are their own value and return themselves during
#' evaluation.
#'
#' `is_syntactic_literal()` is a predicate that returns `TRUE` for the
#' subset of literals that are created by R when parsing text (see
#' [parse_expr()]): numbers, strings and `NULL`. Along with symbols,
#' these literals are the terminating nodes in an AST.
#'
#' Note that in the most general sense, a literal is any R object that
#' evaluates to itself and that can be evaluated in the empty
#' environment. For instance, `quote(c(1, 2))` is not a literal, it is
#' a call. However, the result of evaluating it in [base_env()] is a
#' literal(in this case an atomic vector).
#'
#' Pairlists are also a kind of language objects. However, since they
#' are mostly an internal data structure, `is_expression()` returns `FALSE`
#' for pairlists. You can use `is_pairlist()` to explicitly check for
#' them. Pairlists are the data structure for function arguments. They
#' usually do not arise from R code because subsetting a call is a
#' type-preserving operation. However, you can obtain the pairlist of
#' arguments by taking the CDR of the call object from C code. The
#' rlang function [node_cdr()] will do it from R. Another way in
#' which pairlist of arguments arise is by extracting the argument
#' list of a closure with [base::formals()] or [fn_fmls()].
#'
#' @param x An object to test.
#' @seealso [is_call()] for a call predicate.
#' @export
#' @examples
#' q1 <- quote(1)
#' is_expression(q1)
#' is_syntactic_literal(q1)
#'
#' q2 <- quote(x)
#' is_expression(q2)
#' is_symbol(q2)
#'
#' q3 <- quote(x + 1)
#' is_expression(q3)
#' is_call(q3)
#'
#'
#' # Atomic expressions are the terminating nodes of a call tree:
#' # NULL or a scalar atomic vector:
#' is_syntactic_literal("string")
#' is_syntactic_literal(NULL)
#'
#' is_syntactic_literal(letters)
#' is_syntactic_literal(quote(call()))
#'
#' # Parsable literals have the property of being self-quoting:
#' identical("foo", quote("foo"))
#' identical(1L, quote(1L))
#' identical(NULL, quote(NULL))
#'
#' # Like any literals, they can be evaluated within the empty
#' # environment:
#' eval_bare(quote(1L), empty_env())
#'
#' # Whereas it would fail for symbolic expressions:
#' # eval_bare(quote(c(1L, 2L)), empty_env())
#'
#'
#' # Pairlists are also language objects representing argument lists.
#' # You will usually encounter them with extracted formals:
#' fmls <- formals(is_expression)
#' typeof(fmls)
#'
#' # Since they are mostly an internal data structure, is_expression()
#' # returns FALSE for pairlists, so you will have to check explicitly
#' # for them:
#' is_expression(fmls)
#' is_pairlist(fmls)
is_expression <- function(x) {
is_symbolic(x) || is_syntactic_literal(x)
}
#' @export
#' @rdname is_expression
is_syntactic_literal <- function(x) {
switch(typeof(x),
NULL = {
TRUE
},
logical = ,
integer = ,
double = ,
character = {
length(x) == 1
},
complex = {
if (length(x) != 1) {
return(FALSE)
}
is_na(x) || Re(x) == 0
},
FALSE
)
}
#' @export
#' @rdname is_expression
is_symbolic <- function(x) {
typeof(x) %in% c("language", "symbol")
}
#' Turn an expression to a label
#'
#' @description
#'
#' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("questioning")}
#'
#' `expr_text()` turns the expression into a single string, which
#' might be multi-line. `expr_name()` is suitable for formatting
#' names. It works best with symbols and scalar types, but also
#' accepts calls. `expr_label()` formats the expression nicely for use
#' in messages.
#'
#' @param expr An expression to labellise.
#'
#' @section Life cycle:
#'
#' These functions are in the questioning stage because they are
#' redundant with the `quo_` variants and do not handle quosures.
#'
#' @examples
#' # To labellise a function argument, first capture it with
#' # substitute():
#' fn <- function(x) expr_label(substitute(x))
#' fn(x:y)
#'
#' # Strings are encoded
#' expr_label("a\nb")
#'
#' # Names and expressions are quoted with ``
#' expr_label(quote(x))
#' expr_label(quote(a + b + c))
#'
#' # Long expressions are collapsed
#' expr_label(quote(foo({
#' 1 + 2
#' print(x)
#' })))
#' @export
expr_label <- function(expr) {
if (is.character(expr)) {
encodeString(expr, quote = '"')
} else if (is.atomic(expr)) {
format(expr)
} else if (is.name(expr)) {
paste0("`", as.character(expr), "`")
} else {
chr <- deparse_one(expr)
paste0("`", chr, "`")
}
}
#' @rdname expr_label
#' @export
expr_name <- function(expr) {
switch_type(expr,
NULL = "NULL",
symbol = as_string(expr),
quosure = ,
language =
if (is_data_pronoun(expr)) {
data_pronoun_name(expr) %||% "<unknown>"
} else {
name <- deparse_one(expr)
name <- gsub("\n.*$", "...", name)
name
},
if (is_scalar_atomic(expr)) {
# So 1L is translated to "1" and not "1L"
as.character(expr)
} else if (length(expr) == 1) {
name <- expr_text(expr)
name <- gsub("\n.*$", "...", name)
name
} else {
abort("`expr` must quote a symbol, scalar, or call")
}
)
}
#' @rdname expr_label
#' @export
#' @param width Width of each line.
#' @param nlines Maximum number of lines to extract.
expr_text <- function(expr, width = 60L, nlines = Inf) {
if (is_symbol(expr)) {
return(sym_text(expr))
}
str <- deparse(expr, width.cutoff = width, backtick = TRUE)
if (length(str) > nlines) {
str <- c(str[seq_len(nlines - 1)], "...")
}
paste0(str, collapse = "\n")
}
sym_text <- function(sym) {
# Use as_string() to translate unicode tags
text <- as_string(sym)
if (needs_backticks(text)) {
text <- sprintf("`%s`", text)
}
text
}
deparse_one <- function(expr) {
str <- deparse(expr, 60L)
if (length(str) > 1) {
if (is_call(expr, function_sym)) {
expr[[3]] <- quote(...)
str <- deparse(expr, 60L)
} else if (is_call(expr, brace_sym)) {
str <- "{ ... }"
} else if (is_call(expr)) {
str <- deparse(call2(expr[[1]], quote(...)), 60L)
}
str <- paste(str, collapse = "\n")
}
str
}
#' Set and get an expression
#'
#' These helpers are useful to make your function work generically
#' with quosures and raw expressions. First call `get_expr()` to
#' extract an expression. Once you're done processing the expression,
#' call `set_expr()` on the original object to update the expression.
#' You can return the result of `set_expr()`, either a formula or an
#' expression depending on the input type. Note that `set_expr()` does
#' not change its input, it creates a new object.
#'
#' @param x An expression, closure, or one-sided formula. In addition,
#' `set_expr()` accept frames.
#' @param value An updated expression.
#' @param default A default expression to return when `x` is not an
#' expression wrapper. Defaults to `x` itself.
#' @return The updated original input for `set_expr()`. A raw
#' expression for `get_expr()`.
#' @seealso [quo_get_expr()] and [quo_set_expr()] for versions of
#' [get_expr()] and [set_expr()] that only work on quosures.
#' @export
#' @examples
#' f <- ~foo(bar)
#' e <- quote(foo(bar))
#' frame <- identity(identity(ctxt_frame()))
#'
#' get_expr(f)
#' get_expr(e)
#' get_expr(frame)
#'
#' set_expr(f, quote(baz))
#' set_expr(e, quote(baz))
set_expr <- function(x, value) {
if (is_quosure(x)) {
x <- quo_set_expr(x, value)
} else if (is_formula(x)) {
f_rhs(x) <- value
} else if (is_closure(x)) {
body(x) <- value
} else {
x <- value
}
x
}
#' @rdname set_expr
#' @export
get_expr <- function(x, default = x) {
.Call(rlang_get_expression, x, default)
}
expr_type_of <- function(x) {
if (missing(x)) {
return("missing")
}
type <- typeof(x)
if (type %in% c("symbol", "language", "pairlist", "NULL")) {
type
} else {
"literal"
}
}
switch_expr <- function(.x, ...) {
switch(expr_type_of(.x), ...)
}
#' Print an expression
#'
#' @description
#'
#' `expr_print()`, powered by `expr_deparse()`, is an alternative
#' printer for R expressions with a few improvements over the base R
#' printer.
#'
#' * It colourises [quosures][quotation] according to their environment.
#' Quosures from the global environment are printed normally while
#' quosures from local environments are printed in unique colour (or
#' in italic when all colours are taken).
#'
#' * It wraps inlined objects in angular brackets. For instance, an
#' integer vector unquoted in a function call (e.g.
#' `expr(foo(!!(1:3)))`) is printed like this: `foo(<int: 1L, 2L,
#' 3L>)` while by default R prints the code to create that vector:
#' `foo(1:3)` which is ambiguous.
#'
#' * It respects the width boundary (from the global option `width`)
#' in more cases.
#'
#' @param x An object or expression to print.
#' @param width The width of the deparsed or printed expression.
#' Defaults to the global option `width`.
#'
#' @export
#' @examples
#' # It supports any object. Non-symbolic objects are always printed
#' # within angular brackets:
#' expr_print(1:3)
#' expr_print(function() NULL)
#'
#' # Contrast this to how the code to create these objects is printed:
#' expr_print(quote(1:3))
#' expr_print(quote(function() NULL))
#'
#' # The main cause of non-symbolic objects in expressions is
#' # quasiquotation:
#' expr_print(expr(foo(!!(1:3))))
#'
#'
#' # Quosures from the global environment are printed normally:
#' expr_print(quo(foo))
#' expr_print(quo(foo(!!quo(bar))))
#'
#' # Quosures from local environments are colourised according to
#' # their environments (if you have crayon installed):
#' local_quo <- local(quo(foo))
#' expr_print(local_quo)
#'
#' wrapper_quo <- local(quo(bar(!!local_quo, baz)))
#' expr_print(wrapper_quo)
expr_print <- function(x, width = peek_option("width")) {
cat_line(expr_deparse(x, width = width))
}
#' @rdname expr_print
#' @export
expr_deparse <- function(x, width = peek_option("width")) {
deparser <- new_quo_deparser(width = width)
quo_deparse(x, deparser)
}
| /R/expr.R | no_license | COMODr/rlang | R | false | false | 11,253 | r | #' Is an object an expression?
#'
#' @description
#'
#' `is_expression()` tests for expressions, the set of objects that can be
#' obtained from parsing R code. An expression can be one of two
#' things: either a symbolic object (for which `is_symbolic()` returns
#' `TRUE`), or a syntactic literal (testable with
#' `is_syntactic_literal()`). Technically, calls can contain any R
#' object, not necessarily symbolic objects or syntactic
#' literals. However, this only happens in artificial
#' situations. Expressions as we define them only contain numbers,
#' strings, `NULL`, symbols, and calls: this is the complete set of R
#' objects that can be created when R parses source code (e.g. from
#' using [parse_expr()]).
#'
#' Note that we are using the term expression in its colloquial sense
#' and not to refer to [expression()] vectors, a data type that wraps
#' expressions in a vector and which isn't used much in modern R code.
#'
#' @details
#'
#' `is_symbolic()` returns `TRUE` for symbols and calls (objects with
#' type `language`). Symbolic objects are replaced by their value
#' during evaluation. Literals are the complement of symbolic
#' objects. They are their own value and return themselves during
#' evaluation.
#'
#' `is_syntactic_literal()` is a predicate that returns `TRUE` for the
#' subset of literals that are created by R when parsing text (see
#' [parse_expr()]): numbers, strings and `NULL`. Along with symbols,
#' these literals are the terminating nodes in an AST.
#'
#' Note that in the most general sense, a literal is any R object that
#' evaluates to itself and that can be evaluated in the empty
#' environment. For instance, `quote(c(1, 2))` is not a literal, it is
#' a call. However, the result of evaluating it in [base_env()] is a
#' literal(in this case an atomic vector).
#'
#' Pairlists are also a kind of language objects. However, since they
#' are mostly an internal data structure, `is_expression()` returns `FALSE`
#' for pairlists. You can use `is_pairlist()` to explicitly check for
#' them. Pairlists are the data structure for function arguments. They
#' usually do not arise from R code because subsetting a call is a
#' type-preserving operation. However, you can obtain the pairlist of
#' arguments by taking the CDR of the call object from C code. The
#' rlang function [node_cdr()] will do it from R. Another way in
#' which pairlist of arguments arise is by extracting the argument
#' list of a closure with [base::formals()] or [fn_fmls()].
#'
#' @param x An object to test.
#' @seealso [is_call()] for a call predicate.
#' @export
#' @examples
#' q1 <- quote(1)
#' is_expression(q1)
#' is_syntactic_literal(q1)
#'
#' q2 <- quote(x)
#' is_expression(q2)
#' is_symbol(q2)
#'
#' q3 <- quote(x + 1)
#' is_expression(q3)
#' is_call(q3)
#'
#'
#' # Atomic expressions are the terminating nodes of a call tree:
#' # NULL or a scalar atomic vector:
#' is_syntactic_literal("string")
#' is_syntactic_literal(NULL)
#'
#' is_syntactic_literal(letters)
#' is_syntactic_literal(quote(call()))
#'
#' # Parsable literals have the property of being self-quoting:
#' identical("foo", quote("foo"))
#' identical(1L, quote(1L))
#' identical(NULL, quote(NULL))
#'
#' # Like any literals, they can be evaluated within the empty
#' # environment:
#' eval_bare(quote(1L), empty_env())
#'
#' # Whereas it would fail for symbolic expressions:
#' # eval_bare(quote(c(1L, 2L)), empty_env())
#'
#'
#' # Pairlists are also language objects representing argument lists.
#' # You will usually encounter them with extracted formals:
#' fmls <- formals(is_expression)
#' typeof(fmls)
#'
#' # Since they are mostly an internal data structure, is_expression()
#' # returns FALSE for pairlists, so you will have to check explicitly
#' # for them:
#' is_expression(fmls)
#' is_pairlist(fmls)
is_expression <- function(x) {
is_symbolic(x) || is_syntactic_literal(x)
}
#' @export
#' @rdname is_expression
is_syntactic_literal <- function(x) {
switch(typeof(x),
NULL = {
TRUE
},
logical = ,
integer = ,
double = ,
character = {
length(x) == 1
},
complex = {
if (length(x) != 1) {
return(FALSE)
}
is_na(x) || Re(x) == 0
},
FALSE
)
}
#' @export
#' @rdname is_expression
is_symbolic <- function(x) {
typeof(x) %in% c("language", "symbol")
}
#' Turn an expression to a label
#'
#' @description
#'
#' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("questioning")}
#'
#' `expr_text()` turns the expression into a single string, which
#' might be multi-line. `expr_name()` is suitable for formatting
#' names. It works best with symbols and scalar types, but also
#' accepts calls. `expr_label()` formats the expression nicely for use
#' in messages.
#'
#' @param expr An expression to labellise.
#'
#' @section Life cycle:
#'
#' These functions are in the questioning stage because they are
#' redundant with the `quo_` variants and do not handle quosures.
#'
#' @examples
#' # To labellise a function argument, first capture it with
#' # substitute():
#' fn <- function(x) expr_label(substitute(x))
#' fn(x:y)
#'
#' # Strings are encoded
#' expr_label("a\nb")
#'
#' # Names and expressions are quoted with ``
#' expr_label(quote(x))
#' expr_label(quote(a + b + c))
#'
#' # Long expressions are collapsed
#' expr_label(quote(foo({
#' 1 + 2
#' print(x)
#' })))
#' @export
expr_label <- function(expr) {
if (is.character(expr)) {
encodeString(expr, quote = '"')
} else if (is.atomic(expr)) {
format(expr)
} else if (is.name(expr)) {
paste0("`", as.character(expr), "`")
} else {
chr <- deparse_one(expr)
paste0("`", chr, "`")
}
}
#' @rdname expr_label
#' @export
expr_name <- function(expr) {
switch_type(expr,
NULL = "NULL",
symbol = as_string(expr),
quosure = ,
language =
if (is_data_pronoun(expr)) {
data_pronoun_name(expr) %||% "<unknown>"
} else {
name <- deparse_one(expr)
name <- gsub("\n.*$", "...", name)
name
},
if (is_scalar_atomic(expr)) {
# So 1L is translated to "1" and not "1L"
as.character(expr)
} else if (length(expr) == 1) {
name <- expr_text(expr)
name <- gsub("\n.*$", "...", name)
name
} else {
abort("`expr` must quote a symbol, scalar, or call")
}
)
}
#' @rdname expr_label
#' @export
#' @param width Width of each line.
#' @param nlines Maximum number of lines to extract.
expr_text <- function(expr, width = 60L, nlines = Inf) {
if (is_symbol(expr)) {
return(sym_text(expr))
}
str <- deparse(expr, width.cutoff = width, backtick = TRUE)
if (length(str) > nlines) {
str <- c(str[seq_len(nlines - 1)], "...")
}
paste0(str, collapse = "\n")
}
sym_text <- function(sym) {
# Use as_string() to translate unicode tags
text <- as_string(sym)
if (needs_backticks(text)) {
text <- sprintf("`%s`", text)
}
text
}
deparse_one <- function(expr) {
str <- deparse(expr, 60L)
if (length(str) > 1) {
if (is_call(expr, function_sym)) {
expr[[3]] <- quote(...)
str <- deparse(expr, 60L)
} else if (is_call(expr, brace_sym)) {
str <- "{ ... }"
} else if (is_call(expr)) {
str <- deparse(call2(expr[[1]], quote(...)), 60L)
}
str <- paste(str, collapse = "\n")
}
str
}
#' Set and get an expression
#'
#' These helpers are useful to make your function work generically
#' with quosures and raw expressions. First call `get_expr()` to
#' extract an expression. Once you're done processing the expression,
#' call `set_expr()` on the original object to update the expression.
#' You can return the result of `set_expr()`, either a formula or an
#' expression depending on the input type. Note that `set_expr()` does
#' not change its input, it creates a new object.
#'
#' @param x An expression, closure, or one-sided formula. In addition,
#' `set_expr()` accept frames.
#' @param value An updated expression.
#' @param default A default expression to return when `x` is not an
#' expression wrapper. Defaults to `x` itself.
#' @return The updated original input for `set_expr()`. A raw
#' expression for `get_expr()`.
#' @seealso [quo_get_expr()] and [quo_set_expr()] for versions of
#' [get_expr()] and [set_expr()] that only work on quosures.
#' @export
#' @examples
#' f <- ~foo(bar)
#' e <- quote(foo(bar))
#' frame <- identity(identity(ctxt_frame()))
#'
#' get_expr(f)
#' get_expr(e)
#' get_expr(frame)
#'
#' set_expr(f, quote(baz))
#' set_expr(e, quote(baz))
set_expr <- function(x, value) {
if (is_quosure(x)) {
x <- quo_set_expr(x, value)
} else if (is_formula(x)) {
f_rhs(x) <- value
} else if (is_closure(x)) {
body(x) <- value
} else {
x <- value
}
x
}
#' @rdname set_expr
#' @export
get_expr <- function(x, default = x) {
.Call(rlang_get_expression, x, default)
}
expr_type_of <- function(x) {
if (missing(x)) {
return("missing")
}
type <- typeof(x)
if (type %in% c("symbol", "language", "pairlist", "NULL")) {
type
} else {
"literal"
}
}
switch_expr <- function(.x, ...) {
switch(expr_type_of(.x), ...)
}
#' Print an expression
#'
#' @description
#'
#' `expr_print()`, powered by `expr_deparse()`, is an alternative
#' printer for R expressions with a few improvements over the base R
#' printer.
#'
#' * It colourises [quosures][quotation] according to their environment.
#' Quosures from the global environment are printed normally while
#' quosures from local environments are printed in unique colour (or
#' in italic when all colours are taken).
#'
#' * It wraps inlined objects in angular brackets. For instance, an
#' integer vector unquoted in a function call (e.g.
#' `expr(foo(!!(1:3)))`) is printed like this: `foo(<int: 1L, 2L,
#' 3L>)` while by default R prints the code to create that vector:
#' `foo(1:3)` which is ambiguous.
#'
#' * It respects the width boundary (from the global option `width`)
#' in more cases.
#'
#' @param x An object or expression to print.
#' @param width The width of the deparsed or printed expression.
#' Defaults to the global option `width`.
#'
#' @export
#' @examples
#' # It supports any object. Non-symbolic objects are always printed
#' # within angular brackets:
#' expr_print(1:3)
#' expr_print(function() NULL)
#'
#' # Contrast this to how the code to create these objects is printed:
#' expr_print(quote(1:3))
#' expr_print(quote(function() NULL))
#'
#' # The main cause of non-symbolic objects in expressions is
#' # quasiquotation:
#' expr_print(expr(foo(!!(1:3))))
#'
#'
#' # Quosures from the global environment are printed normally:
#' expr_print(quo(foo))
#' expr_print(quo(foo(!!quo(bar))))
#'
#' # Quosures from local environments are colourised according to
#' # their environments (if you have crayon installed):
#' local_quo <- local(quo(foo))
#' expr_print(local_quo)
#'
#' wrapper_quo <- local(quo(bar(!!local_quo, baz)))
#' expr_print(wrapper_quo)
expr_print <- function(x, width = peek_option("width")) {
cat_line(expr_deparse(x, width = width))
}
#' @rdname expr_print
#' @export
expr_deparse <- function(x, width = peek_option("width")) {
deparser <- new_quo_deparser(width = width)
quo_deparse(x, deparser)
}
|
### ------------------------------------------------------
### WEBSCRAPPING DONNEES ELECTORALES MINISTERE INTERIEUR (2017)
### RESULTATS PAR COMMUNES EN IDF
### ------------------------------------------------------
library(xml2)
library(rvest)
# Liens des pages à lire
site <- "http://elections.interieur.gouv.fr/presidentielle-2017/011"
departements <- c("075","077","078","091","092","093","094","095")
# PARIS
dep <- departements[1]
page <- paste(dep,"html",sep=".")
url <- paste (site,dep,page,sep="/")
webpage <- read_html(x = url)
l <- webpage %>% html_nodes(".offset2") %>% html_nodes("a")%>% html_attr("href")
l <- gsub("../../011/075/", "", l)
l <- paste(dep,l,sep="/")
links <- l
links
# AUTRES DEPARTEMENTS
for(i in 2:length(departements)){
dep <- departements[i]
page <- paste(dep,"html",sep=".")
url <- paste (site,dep,page,sep="/")
webpage <- read_html(x = url)
l <- webpage %>% html_nodes(".offset2") %>% html_nodes("a")%>% html_attr("href")
#l <- l[3:(length(l)-1)]
tmp <- paste("../../01/",dep,sep="")
l <- gsub(paste("../../011/",dep,"/",sep=""), "", l)
l <- paste(dep,l,sep="/")
if (i ==2){l2 <- l} else {l2 <- c(l2,l)}
}
for (i in 1:length(l2)) {
url <- paste(site,l2[i],sep="/")
w <- read_html(x = url)
l <- w %>% html_nodes(".offset2") %>% html_nodes("a")%>% html_attr("href")
#l <- l[4:(length(l)-1)]
l <- gsub("../../011/", "", l)
start <- length(grep("[A-Z]", l))+1
stop <- length(l)
l <- l[start:stop]
#l <- strsplit(l2[i],"/")[[1]][1]
links <- c(links,l)
}
################################################"
# Création d'un premier dataframe avec les resultats
df<-data.frame()
for(i in 1:length(links)){
df[i,"link"] <- links[i]
url <- paste (site,links[i],sep="/")
webpage <- read_html(x = url)
name <- webpage %>% html_nodes(".row-fluid .pub-fil-ariane")%>% html_nodes("a")%>% html_text()
df[i,"name"] <- name[4]
results <- webpage %>% html_nodes("table") %>%html_table(header=T)
for (j in 1:11){
if (results[[2]][j,1] == "M. Jean-Luc MÉLENCHON"){index <- j}
}
df[i,"nb_jlm2017"] <- as.numeric(gsub("\\D", "", results[[2]][index,2]))
df[i,"tx_jlm2017"] <- as.numeric(gsub("\\D", ".", results[[2]][index,4]))
df[i,"abstention"] <- as.numeric(gsub("\\D", "", results[[3]][2,2]))
df[i,"exprimés"] <- as.numeric(gsub("\\D", "", results[[3]][6,2]))
}
# Export du fichier
write.csv(df,file = "data/results_comidf_2017.csv")
| /VoteJLM/Extract_Presidentielles_comidf_jlm2017.R | no_license | neocarto/ReproducibleCartography | R | false | false | 2,436 | r | ### ------------------------------------------------------
### WEBSCRAPPING DONNEES ELECTORALES MINISTERE INTERIEUR (2017)
### RESULTATS PAR COMMUNES EN IDF
### ------------------------------------------------------
library(xml2)
library(rvest)
# Liens des pages à lire
site <- "http://elections.interieur.gouv.fr/presidentielle-2017/011"
departements <- c("075","077","078","091","092","093","094","095")
# PARIS
dep <- departements[1]
page <- paste(dep,"html",sep=".")
url <- paste (site,dep,page,sep="/")
webpage <- read_html(x = url)
l <- webpage %>% html_nodes(".offset2") %>% html_nodes("a")%>% html_attr("href")
l <- gsub("../../011/075/", "", l)
l <- paste(dep,l,sep="/")
links <- l
links
# AUTRES DEPARTEMENTS
for(i in 2:length(departements)){
dep <- departements[i]
page <- paste(dep,"html",sep=".")
url <- paste (site,dep,page,sep="/")
webpage <- read_html(x = url)
l <- webpage %>% html_nodes(".offset2") %>% html_nodes("a")%>% html_attr("href")
#l <- l[3:(length(l)-1)]
tmp <- paste("../../01/",dep,sep="")
l <- gsub(paste("../../011/",dep,"/",sep=""), "", l)
l <- paste(dep,l,sep="/")
if (i ==2){l2 <- l} else {l2 <- c(l2,l)}
}
for (i in 1:length(l2)) {
url <- paste(site,l2[i],sep="/")
w <- read_html(x = url)
l <- w %>% html_nodes(".offset2") %>% html_nodes("a")%>% html_attr("href")
#l <- l[4:(length(l)-1)]
l <- gsub("../../011/", "", l)
start <- length(grep("[A-Z]", l))+1
stop <- length(l)
l <- l[start:stop]
#l <- strsplit(l2[i],"/")[[1]][1]
links <- c(links,l)
}
################################################"
# Création d'un premier dataframe avec les resultats
df<-data.frame()
for(i in 1:length(links)){
df[i,"link"] <- links[i]
url <- paste (site,links[i],sep="/")
webpage <- read_html(x = url)
name <- webpage %>% html_nodes(".row-fluid .pub-fil-ariane")%>% html_nodes("a")%>% html_text()
df[i,"name"] <- name[4]
results <- webpage %>% html_nodes("table") %>%html_table(header=T)
for (j in 1:11){
if (results[[2]][j,1] == "M. Jean-Luc MÉLENCHON"){index <- j}
}
df[i,"nb_jlm2017"] <- as.numeric(gsub("\\D", "", results[[2]][index,2]))
df[i,"tx_jlm2017"] <- as.numeric(gsub("\\D", ".", results[[2]][index,4]))
df[i,"abstention"] <- as.numeric(gsub("\\D", "", results[[3]][2,2]))
df[i,"exprimés"] <- as.numeric(gsub("\\D", "", results[[3]][6,2]))
}
# Export du fichier
write.csv(df,file = "data/results_comidf_2017.csv")
|
## https://github.com/lme4/lme4/issues/59
library(lme4)
dat <- read.csv(system.file("testdata","dat20101314.csv",package="lme4"))
NMcopy <- lme4:::Nelder_Mead
cc <- capture.output(lmer(y ~ (1|Operator)+(1|Part)+(1|Part:Operator), data=dat,
control=
lmerControl("NMcopy",
optCtrl= list(iprint=20))))
## check that printing goes through step 140 twice and up to 240 once
findStep <- function(str,n) sum(grepl(paste0("^\\(NM\\) ",n,": "),cc))
stopifnot(findStep(cc,140)==2 && findStep(cc,240)==1)
| /tests/testOptControl.R | no_license | jknowles/lme4 | R | false | false | 586 | r | ## https://github.com/lme4/lme4/issues/59
library(lme4)
dat <- read.csv(system.file("testdata","dat20101314.csv",package="lme4"))
NMcopy <- lme4:::Nelder_Mead
cc <- capture.output(lmer(y ~ (1|Operator)+(1|Part)+(1|Part:Operator), data=dat,
control=
lmerControl("NMcopy",
optCtrl= list(iprint=20))))
## check that printing goes through step 140 twice and up to 240 once
findStep <- function(str,n) sum(grepl(paste0("^\\(NM\\) ",n,": "),cc))
stopifnot(findStep(cc,140)==2 && findStep(cc,240)==1)
|
library('shiny')
library('shinyWidgets')
ui <- fluidPage(
tags$head(
tags$style(HTML("
body {
background-color: white;
}"))),
align='center',
prettyRadioButtons(inputId="plot_type", label="What do you want to represent?", choices=c("Avg time to find Parking vs Length of stay","Avg distance from Parking to destination vs Length of stay"), selected ="Avg time to find Parking vs Length of stay"),
uiOutput("parking_plot")) | /Vis_ParkingLOS/ui.R | no_license | juangordyn/Jamsnot_Vis | R | false | false | 458 | r | library('shiny')
library('shinyWidgets')
ui <- fluidPage(
tags$head(
tags$style(HTML("
body {
background-color: white;
}"))),
align='center',
prettyRadioButtons(inputId="plot_type", label="What do you want to represent?", choices=c("Avg time to find Parking vs Length of stay","Avg distance from Parking to destination vs Length of stay"), selected ="Avg time to find Parking vs Length of stay"),
uiOutput("parking_plot")) |
#' @import dplyr
NULL
#' Connect to any database with a JDBC driver.
#'
#' Use \code{src_JDBC} to connect to an existing database with a JDBC driver,
#' and \code{tbl} to connect to tables within that database.
#' If you are running a local database, leave all parameters set as
#' their defaults to connect. If you're connecting to a remote database,
#' ask your database administrator for the values of these variables.
#'
#' @param driver location of the JDBC driver.
#' @param url JDBC connection url
#' @param create if \code{FALSE}, \code{path} must already exist. If
#' \code{TRUE}, will create a new SQlite3 database at \code{path}.
#' @param src a sqlite src created with \code{src_sqlite}.
#' @param from Either a string giving the name of table in database, or
#' \code{\link{sql}} described a derived table or compound join.
#' @param ... Included for compatibility with the generic, but otherwise
#' ignored.
#' @export
#' @examples
#' \dontrun{
#' # Connection basics ---------------------------------------------------------
#' # To connect to a database first create a src:
#' my_db <- src_sqlite(path = tempfile(), create = TRUE)
#' # Then reference a tbl within that src
#' my_tbl <- tbl(my_db, "my_table")
#' }
#'
#' # Here we'll use the Lahman database: to create your own local copy,
#' # run lahman_sqlite()
#'
#' \donttest{
#' if (require("RSQLite") && has_lahman("sqlite")) {
#' # Methods -------------------------------------------------------------------
#' batting <- tbl(lahman_sqlite(), "Batting")
#' dim(batting)
#' colnames(batting)
#' head(batting)
#'
#' # Data manipulation verbs ---------------------------------------------------
#' filter(batting, yearID > 2005, G > 130)
#' select(batting, playerID:lgID)
#' arrange(batting, playerID, desc(yearID))
#' summarise(batting, G = mean(G), n = n())
#' mutate(batting, rbi2 = 1.0 * R / AB)
#'
#' # note that all operations are lazy: they don't do anything until you
#' # request the data, either by `print()`ing it (which shows the first ten
#' # rows), by looking at the `head()`, or `collect()` the results locally.
#'
#' system.time(recent <- filter(batting, yearID > 2010))
#' system.time(collect(recent))
#'
#' # Group by operations -------------------------------------------------------
#' # To perform operations by group, create a grouped object with group_by
#' players <- group_by(batting, playerID)
#' group_size(players)
#'
#' # sqlite doesn't support windowed functions, which means that only
#' # grouped summaries are really useful:
#' summarise(players, mean_g = mean(G), best_ab = max(AB))
#'
#' # When you group by multiple level, each summarise peels off one level
#' per_year <- group_by(batting, playerID, yearID)
#' stints <- summarise(per_year, stints = max(stint))
#' filter(ungroup(stints), stints > 3)
#' summarise(stints, max(stints))
#'
#' # Joins ---------------------------------------------------------------------
#' player_info <- select(tbl(lahman_sqlite(), "Master"), playerID, hofID,
#' birthYear)
#' hof <- select(filter(tbl(lahman_sqlite(), "HallOfFame"), inducted == "Y"),
#' hofID, votedBy, category)
#'
#' # Match players and their hall of fame data
#' inner_join(player_info, hof)
#' # Keep all players, match hof data where available
#' left_join(player_info, hof)
#' # Find only players in hof
#' semi_join(player_info, hof)
#' # Find players not in hof
#' anti_join(player_info, hof)
#'
#' # Arbitrary SQL -------------------------------------------------------------
#' # You can also provide sql as is, using the sql function:
#' batting2008 <- tbl(lahman_sqlite(),
#' sql("SELECT * FROM Batting WHERE YearID = 2008"))
#' batting2008
#' }
#' }
src_JDBC <- function(driver, url = NULL, user = NULL, password = NULL, ...) {
if (!require("RJDBC")) {
stop("RJDBC package required to connect to JDBC db", call. = FALSE)
}
user <- user %||% ""
con <- dplyr:::dbi_connect(driver, url = url %||% "", user = user %||% "",
password = password %||% "", ...)
.jcall(con@jc, "V", "setAutoCommit", FALSE)
info <- list(url=url, user=user, driver=.jstrVal(con@jc))
src_sql("JDBC", con,
info = info, disco = dplyr:::db_disconnector(con, "JDBC"))
}
#' @export
#' @rdname src_JDBC
tbl.src_JDBC <- function(src, from, ...) {
tbl_sql("JDBC", src = src, from = from, ...)
}
#' @export
# TODO: fix for JDBC
brief_desc.src_JDBC <- function(x) {
info <- x$info
paste0("JDBC ", info$driver, " [", info$url, "]")
}
#' @export
translate_env.src_JDBC <- function(x) {
sql_variant(
base_scalar,
sql_translator(.parent = base_agg,
n = function() sql("count(*)")
),
base_win
)
}
| /R/src-JDBC.r | permissive | jimhester/dplyrJDBC | R | false | false | 4,653 | r | #' @import dplyr
NULL
#' Connect to any database with a JDBC driver.
#'
#' Use \code{src_JDBC} to connect to an existing database with a JDBC driver,
#' and \code{tbl} to connect to tables within that database.
#' If you are running a local database, leave all parameters set as
#' their defaults to connect. If you're connecting to a remote database,
#' ask your database administrator for the values of these variables.
#'
#' @param driver location of the JDBC driver.
#' @param url JDBC connection url
#' @param create if \code{FALSE}, \code{path} must already exist. If
#' \code{TRUE}, will create a new SQlite3 database at \code{path}.
#' @param src a sqlite src created with \code{src_sqlite}.
#' @param from Either a string giving the name of table in database, or
#' \code{\link{sql}} described a derived table or compound join.
#' @param ... Included for compatibility with the generic, but otherwise
#' ignored.
#' @export
#' @examples
#' \dontrun{
#' # Connection basics ---------------------------------------------------------
#' # To connect to a database first create a src:
#' my_db <- src_sqlite(path = tempfile(), create = TRUE)
#' # Then reference a tbl within that src
#' my_tbl <- tbl(my_db, "my_table")
#' }
#'
#' # Here we'll use the Lahman database: to create your own local copy,
#' # run lahman_sqlite()
#'
#' \donttest{
#' if (require("RSQLite") && has_lahman("sqlite")) {
#' # Methods -------------------------------------------------------------------
#' batting <- tbl(lahman_sqlite(), "Batting")
#' dim(batting)
#' colnames(batting)
#' head(batting)
#'
#' # Data manipulation verbs ---------------------------------------------------
#' filter(batting, yearID > 2005, G > 130)
#' select(batting, playerID:lgID)
#' arrange(batting, playerID, desc(yearID))
#' summarise(batting, G = mean(G), n = n())
#' mutate(batting, rbi2 = 1.0 * R / AB)
#'
#' # note that all operations are lazy: they don't do anything until you
#' # request the data, either by `print()`ing it (which shows the first ten
#' # rows), by looking at the `head()`, or `collect()` the results locally.
#'
#' system.time(recent <- filter(batting, yearID > 2010))
#' system.time(collect(recent))
#'
#' # Group by operations -------------------------------------------------------
#' # To perform operations by group, create a grouped object with group_by
#' players <- group_by(batting, playerID)
#' group_size(players)
#'
#' # sqlite doesn't support windowed functions, which means that only
#' # grouped summaries are really useful:
#' summarise(players, mean_g = mean(G), best_ab = max(AB))
#'
#' # When you group by multiple level, each summarise peels off one level
#' per_year <- group_by(batting, playerID, yearID)
#' stints <- summarise(per_year, stints = max(stint))
#' filter(ungroup(stints), stints > 3)
#' summarise(stints, max(stints))
#'
#' # Joins ---------------------------------------------------------------------
#' player_info <- select(tbl(lahman_sqlite(), "Master"), playerID, hofID,
#' birthYear)
#' hof <- select(filter(tbl(lahman_sqlite(), "HallOfFame"), inducted == "Y"),
#' hofID, votedBy, category)
#'
#' # Match players and their hall of fame data
#' inner_join(player_info, hof)
#' # Keep all players, match hof data where available
#' left_join(player_info, hof)
#' # Find only players in hof
#' semi_join(player_info, hof)
#' # Find players not in hof
#' anti_join(player_info, hof)
#'
#' # Arbitrary SQL -------------------------------------------------------------
#' # You can also provide sql as is, using the sql function:
#' batting2008 <- tbl(lahman_sqlite(),
#' sql("SELECT * FROM Batting WHERE YearID = 2008"))
#' batting2008
#' }
#' }
src_JDBC <- function(driver, url = NULL, user = NULL, password = NULL, ...) {
if (!require("RJDBC")) {
stop("RJDBC package required to connect to JDBC db", call. = FALSE)
}
user <- user %||% ""
con <- dplyr:::dbi_connect(driver, url = url %||% "", user = user %||% "",
password = password %||% "", ...)
.jcall(con@jc, "V", "setAutoCommit", FALSE)
info <- list(url=url, user=user, driver=.jstrVal(con@jc))
src_sql("JDBC", con,
info = info, disco = dplyr:::db_disconnector(con, "JDBC"))
}
#' @export
#' @rdname src_JDBC
tbl.src_JDBC <- function(src, from, ...) {
tbl_sql("JDBC", src = src, from = from, ...)
}
#' @export
# TODO: fix for JDBC
brief_desc.src_JDBC <- function(x) {
info <- x$info
paste0("JDBC ", info$driver, " [", info$url, "]")
}
#' @export
translate_env.src_JDBC <- function(x) {
sql_variant(
base_scalar,
sql_translator(.parent = base_agg,
n = function() sql("count(*)")
),
base_win
)
}
|
library(HH)
### Name: position
### Title: Find or assign the implied position for graphing the levels of a
### factor. A new class "positioned", which inherits from "ordered" and
### "factor", is defined.
### Aliases: position position<- is.numeric.positioned
### as.numeric.positioned as.position [.positioned as.positioned
### is.positioned is.na.positioned positioned print.positioned
### unique.positioned unpositioned
### Keywords: dplot
### ** Examples
## ordered with character levels defaults to
## integer position of specified levels
tmp <- ordered(c("mm","cm","m","m","mm","cm"),
levels=c("mm","cm","m")) ## size order
tmp
as.numeric(tmp)
levels(tmp)
position(tmp)
as.position(tmp)
as.positioned(tmp)
positioned(tmp)
unpositioned(tmp)
unique(tmp)
## position is assigned to ordered in specified order
tmp <- ordered(c("cm","mm","m","m","mm","cm"),
levels=c("mm","cm","m")) ## size order
levels(tmp)
position(tmp) <- c(-3, -2, 0) ## log10 assigned in size order
tmp
as.numeric(tmp)
levels(tmp)
position(tmp)
as.position(tmp)
as.positioned(tmp)
positioned(tmp)
unpositioned(tmp)
unique(tmp)
## numeric stays numeric
tmp <- c(0.010, 0.001, 1.000, 1.000, 0.001, 0.010)
tmp
as.numeric(tmp)
levels(tmp)
position(tmp)
as.position(tmp)
as.positioned(tmp)
positioned(tmp)
unpositioned(tmp)
unique(tmp)
## factor with numeric levels, position is integer position in size order
tmp <- factor(c(0.010, 0.001, 1.000, 1.000, 0.001, 0.010))
tmp
as.numeric(tmp)
levels(tmp)
position(tmp)
as.position(tmp)
as.positioned(tmp)
positioned(tmp)
unpositioned(tmp)
unique(tmp)
## ordered with numeric levels, position is numeric value in size order
tmp <- ordered(c(0.010, 0.001, 1.000, 1.000, 0.001, 0.010))
tmp
as.numeric(tmp)
levels(tmp)
position(tmp)
as.position(tmp)
as.positioned(tmp)
positioned(tmp)
unpositioned(tmp)
unique(tmp)
## factor with numeric levels
## position is assigned in size order
tmp <- factor(c(0.010, 0.001, 1.000, 1.000, 0.001, 0.010))
levels(tmp)
position(tmp) <- c(-3, -2, 0) ## log10 assigned in size order
tmp
as.numeric(tmp)
levels(tmp)
position(tmp)
as.position(tmp)
as.positioned(tmp)
positioned(tmp)
unpositioned(tmp)
unique(tmp)
## boxplots coded by week
tmp <- data.frame(Y=rnorm(40, rep(c(20,25,15,22), 10), 5),
week=ordered(rep(1:4, 10)))
position(tmp$week) <- c(1, 2, 4, 8)
if.R(r=
bwplot(Y ~ week, horizontal=FALSE,
scales=list(x=list(limits=c(0,9),
at=position(tmp$week),
labels=position(tmp$week))),
data=tmp, panel=panel.bwplot.intermediate.hh)
,s=
t(bwplot(week ~ Y, at=position(tmp$week),
scales=list(y=list(limits=c(0,9),
at=position(tmp$week), labels=position(tmp$week))),
data=tmp, panel=panel.bwplot.intermediate.hh))
)
#### You probably don't want to use the next two examples.
#### You need to be aware of their behavior.
##
## factor with character levels defaults to
## integer position of sorted levels.
## you probably DON'T want to do this!
tmp <- factor(c("cm","mm","m","m","mm","cm")) ## default alphabetic order
tmp
as.numeric(tmp)
levels(tmp) ## you probably DON'T want to do this!
position(tmp) ## you probably DON'T want to do this!
as.numeric(tmp)
##
## position is assigned to factor in default alphabetic order.
## you probably DON'T want to do this!
tmp <- factor(c("cm","mm","m","m","mm","cm"))
levels(tmp)
position(tmp) <- c(-3, -2, 0) ## assigned in default alphabetic order
tmp
as.numeric(tmp)
levels(tmp) ## you probably DON'T want to do this!
position(tmp) ## you probably DON'T want to do this!
as.numeric(tmp)
| /data/genthat_extracted_code/HH/examples/position.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 3,719 | r | library(HH)
### Name: position
### Title: Find or assign the implied position for graphing the levels of a
### factor. A new class "positioned", which inherits from "ordered" and
### "factor", is defined.
### Aliases: position position<- is.numeric.positioned
### as.numeric.positioned as.position [.positioned as.positioned
### is.positioned is.na.positioned positioned print.positioned
### unique.positioned unpositioned
### Keywords: dplot
### ** Examples
## ordered with character levels defaults to
## integer position of specified levels
tmp <- ordered(c("mm","cm","m","m","mm","cm"),
levels=c("mm","cm","m")) ## size order
tmp
as.numeric(tmp)
levels(tmp)
position(tmp)
as.position(tmp)
as.positioned(tmp)
positioned(tmp)
unpositioned(tmp)
unique(tmp)
## position is assigned to ordered in specified order
tmp <- ordered(c("cm","mm","m","m","mm","cm"),
levels=c("mm","cm","m")) ## size order
levels(tmp)
position(tmp) <- c(-3, -2, 0) ## log10 assigned in size order
tmp
as.numeric(tmp)
levels(tmp)
position(tmp)
as.position(tmp)
as.positioned(tmp)
positioned(tmp)
unpositioned(tmp)
unique(tmp)
## numeric stays numeric
tmp <- c(0.010, 0.001, 1.000, 1.000, 0.001, 0.010)
tmp
as.numeric(tmp)
levels(tmp)
position(tmp)
as.position(tmp)
as.positioned(tmp)
positioned(tmp)
unpositioned(tmp)
unique(tmp)
## factor with numeric levels, position is integer position in size order
tmp <- factor(c(0.010, 0.001, 1.000, 1.000, 0.001, 0.010))
tmp
as.numeric(tmp)
levels(tmp)
position(tmp)
as.position(tmp)
as.positioned(tmp)
positioned(tmp)
unpositioned(tmp)
unique(tmp)
## ordered with numeric levels, position is numeric value in size order
tmp <- ordered(c(0.010, 0.001, 1.000, 1.000, 0.001, 0.010))
tmp
as.numeric(tmp)
levels(tmp)
position(tmp)
as.position(tmp)
as.positioned(tmp)
positioned(tmp)
unpositioned(tmp)
unique(tmp)
## factor with numeric levels
## position is assigned in size order
tmp <- factor(c(0.010, 0.001, 1.000, 1.000, 0.001, 0.010))
levels(tmp)
position(tmp) <- c(-3, -2, 0) ## log10 assigned in size order
tmp
as.numeric(tmp)
levels(tmp)
position(tmp)
as.position(tmp)
as.positioned(tmp)
positioned(tmp)
unpositioned(tmp)
unique(tmp)
## boxplots coded by week
tmp <- data.frame(Y=rnorm(40, rep(c(20,25,15,22), 10), 5),
week=ordered(rep(1:4, 10)))
position(tmp$week) <- c(1, 2, 4, 8)
if.R(r=
bwplot(Y ~ week, horizontal=FALSE,
scales=list(x=list(limits=c(0,9),
at=position(tmp$week),
labels=position(tmp$week))),
data=tmp, panel=panel.bwplot.intermediate.hh)
,s=
t(bwplot(week ~ Y, at=position(tmp$week),
scales=list(y=list(limits=c(0,9),
at=position(tmp$week), labels=position(tmp$week))),
data=tmp, panel=panel.bwplot.intermediate.hh))
)
#### You probably don't want to use the next two examples.
#### You need to be aware of their behavior.
##
## factor with character levels defaults to
## integer position of sorted levels.
## you probably DON'T want to do this!
tmp <- factor(c("cm","mm","m","m","mm","cm")) ## default alphabetic order
tmp
as.numeric(tmp)
levels(tmp) ## you probably DON'T want to do this!
position(tmp) ## you probably DON'T want to do this!
as.numeric(tmp)
##
## position is assigned to factor in default alphabetic order.
## you probably DON'T want to do this!
tmp <- factor(c("cm","mm","m","m","mm","cm"))
levels(tmp)
position(tmp) <- c(-3, -2, 0) ## assigned in default alphabetic order
tmp
as.numeric(tmp)
levels(tmp) ## you probably DON'T want to do this!
position(tmp) ## you probably DON'T want to do this!
as.numeric(tmp)
|
#Assignment 2.
## These functions cache and return the inverse of a matrix, using the '<<-' operator.
## The first function returns a list that does four things:
# 1. set the value of a matrix
# 2. get the value of a matrix
# 3. set the value of its inverse
# 4. get the value of its inverse
makeCacheMatrix <- function(x = matrix()) {
neg.m <- NULL
set <- function(y) {
x <<- y
neg.m <<- NULL
}
get <- function() x
setinverse <- function(inverse) neg.m <<- inverse
getinverse <- function() neg.m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The second function returns the inverse of the defined in makeCacheMatrix(), by first checking to see if it is cached, and then computing, or "solving," if it is not.
cacheSolve <- function(x, ...) {
cacheSolve <- function(x, ...) {
neg.m <- x$getinverse()
if(!is.null(neg.m)) {
message("getting cached data")
return(neg.m)
}
m.data <- x$get()
neg.m <- solve(m.data, ...)
x$setinverse(neg.m)
neg.m
}
}
## The test function demonstrates that the above functions return the inverse of "my_matrix"
test.fun<-function(sq_mat){
test<-makeCacheMatrix(sq_mat)
cacheSolve(test)
}
my_matrix<-matrix(rnorm(25, 3, 1), 5, 5)
row.names(my_matrix)<-c("1","2","3","4","5")
colnames(my_matrix)<-c("A","B","C","D","E")
my_matrix
test.fun(my_matrix)
| /cachematrix.R | no_license | SammyShaw/ProgrammingAssignment2 | R | false | false | 1,627 | r | #Assignment 2.
## These functions cache and return the inverse of a matrix, using the '<<-' operator.
## The first function returns a list that does four things:
# 1. set the value of a matrix
# 2. get the value of a matrix
# 3. set the value of its inverse
# 4. get the value of its inverse
makeCacheMatrix <- function(x = matrix()) {
neg.m <- NULL
set <- function(y) {
x <<- y
neg.m <<- NULL
}
get <- function() x
setinverse <- function(inverse) neg.m <<- inverse
getinverse <- function() neg.m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The second function returns the inverse of the defined in makeCacheMatrix(), by first checking to see if it is cached, and then computing, or "solving," if it is not.
cacheSolve <- function(x, ...) {
cacheSolve <- function(x, ...) {
neg.m <- x$getinverse()
if(!is.null(neg.m)) {
message("getting cached data")
return(neg.m)
}
m.data <- x$get()
neg.m <- solve(m.data, ...)
x$setinverse(neg.m)
neg.m
}
}
## The test function demonstrates that the above functions return the inverse of "my_matrix"
test.fun<-function(sq_mat){
test<-makeCacheMatrix(sq_mat)
cacheSolve(test)
}
my_matrix<-matrix(rnorm(25, 3, 1), 5, 5)
row.names(my_matrix)<-c("1","2","3","4","5")
colnames(my_matrix)<-c("A","B","C","D","E")
my_matrix
test.fun(my_matrix)
|
## ---------------------------------------------------- ##
## gen_data.R ----------------------------------------- ##
## Purpose: simulate data under a random slope, ------- ##
## random intercept model with normal errors ---------- ##
## Author: Peter Norwood, NCSU, Janssen --------------- ##
## ---------------------------------------------------- ##
## gen_data
## Purpose: generate correlated longitudinal data
## param mean_vec: vector of means
## param cov: covariance matrix
## param samples: number of samples to take
## return Y: a vector of responses for an individual
gen_data <- function(mean_vec,cov_mat,samples){
## generate correlation matrix
cor_mat <- cov2cor(cov_mat)
## generate diag of cov matrix
cov_diag <- diag(diag(cov_mat))
## cholesky factorization of corr
A <- t(chol(cor_mat))
## multiplying matrix
B <- cov_diag**(0.5) %*% A
Binv <- solve(B)
## mean x vector
mu_x <- Binv %*% mean_vec
## generate random x vector
X <- sapply(mu_x,rnorm,n=samples,sd=1)
## generate random y vector
Y <- t(B %*% t(X))
return(Y)
}
## gen_data_mixed
## Purpose: generate correlated longitudinal data with
## random slopes and random intercepts
## param fixed_mean: vector of means parameters,
## rows indicate different responses
## param G: covariance matrix for the random effects
## param R: covariance matrix for the random error
## param time_points: number of time points the responses have
## num_responses: number of responses
## param samples: number of samples to take
## return dat: a matrix with the following columns: ID, y_type, time, y
gen_data_mixed <- function(fixed_mean,
G,R,
time_points,
num_responses,
samples){
random_mean <- c(rep(0,ncol(G)))
random_coef <- gen_data(mean_vec=random_mean,
cov_mat=G,samples=samples)
rand_list <- list()
splits <- ncol(G)/num_responses
start <- 1
for(r in 1:splits){
#print(start:(start+(num_responses-1)))
rand_list[[r]] <- random_coef[,start:(start+(num_responses-1))]
start <- start+num_responses
}
error_mean <- c(rep(0,ncol(R)))
error_list <- list()
for(r in 1:length(time_points)){
error_list[[r]] <- gen_data(mean_vec=error_mean,
cov_mat=R,samples=samples)
}
## add the slopes to the correct columns
dat <- matrix(NA,nrow=length(time_points)*num_responses*samples,ncol=4)
tick <- 1
## loop through all individuals
for(i in 1:samples){
## loop through different responses
for(j in 1:num_responses){
## loop through time points
for(k in 1:length(time_points)){
t <- time_points[k]
## int j-th response ## random int j-th response, i-th individual
y <- (fixed_mean[j,1] + rand_list[[j]][i,1]) +
## slope j-th response ## random slope j-th response, i-th individual
(fixed_mean[j,2] + rand_list[[j]][i,2])*t +
## random error k-th time point, j-th response, i-th individual
error_list[[k]][i,j]
dat[tick,] <- c(i,j,t,y)
tick=tick+1
}
}
}
return(dat)
}
| /simulation_code/gen_data.R | no_license | peterpnorwood/MultivariateLongitudinal | R | false | false | 3,242 | r | ## ---------------------------------------------------- ##
## gen_data.R ----------------------------------------- ##
## Purpose: simulate data under a random slope, ------- ##
## random intercept model with normal errors ---------- ##
## Author: Peter Norwood, NCSU, Janssen --------------- ##
## ---------------------------------------------------- ##
## gen_data
## Purpose: generate correlated longitudinal data
## param mean_vec: vector of means
## param cov: covariance matrix
## param samples: number of samples to take
## return Y: a vector of responses for an individual
gen_data <- function(mean_vec,cov_mat,samples){
## generate correlation matrix
cor_mat <- cov2cor(cov_mat)
## generate diag of cov matrix
cov_diag <- diag(diag(cov_mat))
## cholesky factorization of corr
A <- t(chol(cor_mat))
## multiplying matrix
B <- cov_diag**(0.5) %*% A
Binv <- solve(B)
## mean x vector
mu_x <- Binv %*% mean_vec
## generate random x vector
X <- sapply(mu_x,rnorm,n=samples,sd=1)
## generate random y vector
Y <- t(B %*% t(X))
return(Y)
}
## gen_data_mixed
## Purpose: generate correlated longitudinal data with
## random slopes and random intercepts
## param fixed_mean: vector of means parameters,
## rows indicate different responses
## param G: covariance matrix for the random effects
## param R: covariance matrix for the random error
## param time_points: number of time points the responses have
## num_responses: number of responses
## param samples: number of samples to take
## return dat: a matrix with the following columns: ID, y_type, time, y
gen_data_mixed <- function(fixed_mean,
G,R,
time_points,
num_responses,
samples){
random_mean <- c(rep(0,ncol(G)))
random_coef <- gen_data(mean_vec=random_mean,
cov_mat=G,samples=samples)
rand_list <- list()
splits <- ncol(G)/num_responses
start <- 1
for(r in 1:splits){
#print(start:(start+(num_responses-1)))
rand_list[[r]] <- random_coef[,start:(start+(num_responses-1))]
start <- start+num_responses
}
error_mean <- c(rep(0,ncol(R)))
error_list <- list()
for(r in 1:length(time_points)){
error_list[[r]] <- gen_data(mean_vec=error_mean,
cov_mat=R,samples=samples)
}
## add the slopes to the correct columns
dat <- matrix(NA,nrow=length(time_points)*num_responses*samples,ncol=4)
tick <- 1
## loop through all individuals
for(i in 1:samples){
## loop through different responses
for(j in 1:num_responses){
## loop through time points
for(k in 1:length(time_points)){
t <- time_points[k]
## int j-th response ## random int j-th response, i-th individual
y <- (fixed_mean[j,1] + rand_list[[j]][i,1]) +
## slope j-th response ## random slope j-th response, i-th individual
(fixed_mean[j,2] + rand_list[[j]][i,2])*t +
## random error k-th time point, j-th response, i-th individual
error_list[[k]][i,j]
dat[tick,] <- c(i,j,t,y)
tick=tick+1
}
}
}
return(dat)
}
|
## These functions will cache the inverse of a matrix
## creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
set inverse = set inverse,
getinverse = getinverse)
}
## computes inverse of the results of the previous function
## if inverse is already found, cacheSolve will retrive the inverse from the cache
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
| /cachematrix.R | no_license | sgussen969/ProgrammingAssignment2 | R | false | false | 784 | r | ## These functions will cache the inverse of a matrix
## creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
set inverse = set inverse,
getinverse = getinverse)
}
## computes inverse of the results of the previous function
## if inverse is already found, cacheSolve will retrive the inverse from the cache
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
library(tidyverse)
library(nycflights13)
head(flights)
print(flights, n = 10, width = Inf)
help("flights")
flights %>%
filter(arr_delay >= 120) %>%
count()
flights %>%
filter(dest == "IAH" | dest == "HOU")
flights %>%
filter(dest %in% c("IAH","HOU"))
airlines
flights %>%
filter(carrier %in% c("UA","AA","DL")) %>%
print(n = 10, width = Inf)
flights %>%
filter(month %in% 7:9)
flights %>%
filter(arr_delay >= 120 , dep_delay <= 0 )
flights %>%
filter(dep_delay <= 60, dep_delay - arr_delay > 30)
flights %>%
filter(dep_time <= 600 | dep_time == 2400)
flights %>%
filter(is.na()) %>%
arrange()
flights %>%
arrange(desc(dep_delay))
| /R 2nd data analysis.R | no_license | Lekangi/R | R | false | false | 685 | r | library(tidyverse)
library(nycflights13)
head(flights)
print(flights, n = 10, width = Inf)
help("flights")
flights %>%
filter(arr_delay >= 120) %>%
count()
flights %>%
filter(dest == "IAH" | dest == "HOU")
flights %>%
filter(dest %in% c("IAH","HOU"))
airlines
flights %>%
filter(carrier %in% c("UA","AA","DL")) %>%
print(n = 10, width = Inf)
flights %>%
filter(month %in% 7:9)
flights %>%
filter(arr_delay >= 120 , dep_delay <= 0 )
flights %>%
filter(dep_delay <= 60, dep_delay - arr_delay > 30)
flights %>%
filter(dep_time <= 600 | dep_time == 2400)
flights %>%
filter(is.na()) %>%
arrange()
flights %>%
arrange(desc(dep_delay))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geog_dat.R
\docType{data}
\name{sa32016}
\alias{sa32016}
\title{Statistical Area 3, 2016}
\format{An \code{sf} object with 12 variables:
#' \describe{
\item{\code{sa3_code_2016}}{The full 5 digit SA3 code numeric}
\item{\code{sa3_name_2016}}{The SA3 name character}
\item{\code{sa4_code_2016}}{The full 3 digit SA4 code numeric}
\item{\code{sa4_name_2016}}{The SA4 name character}
\item{\code{gcc_code_2016}}{The alphanumeric Greater Capital City (GCC) code numeric}
\item{\code{gcc_name_2016}}{The GCC name}
\item{\code{state_name_2016}}{The full state name}
\item{\code{albers_sqkm_2016}}{The area in square kilometres}
\item{\code{cent_lat}}{The latitide of the area's centroid}
\item{\code{cent_long}}{The latitide of the area's centroid}
\item{\code{geometry}}{A nested list containing the area's geometry (polygons)}
}}
\usage{
sa32016
}
\description{
Geospatial data provided by the ABS for Statistical Area 3 in 2016.
}
\keyword{datasets}
| /man/sa32016.Rd | no_license | srepho/absmapsdata | R | false | true | 1,025 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geog_dat.R
\docType{data}
\name{sa32016}
\alias{sa32016}
\title{Statistical Area 3, 2016}
\format{An \code{sf} object with 12 variables:
#' \describe{
\item{\code{sa3_code_2016}}{The full 5 digit SA3 code numeric}
\item{\code{sa3_name_2016}}{The SA3 name character}
\item{\code{sa4_code_2016}}{The full 3 digit SA4 code numeric}
\item{\code{sa4_name_2016}}{The SA4 name character}
\item{\code{gcc_code_2016}}{The alphanumeric Greater Capital City (GCC) code numeric}
\item{\code{gcc_name_2016}}{The GCC name}
\item{\code{state_name_2016}}{The full state name}
\item{\code{albers_sqkm_2016}}{The area in square kilometres}
\item{\code{cent_lat}}{The latitide of the area's centroid}
\item{\code{cent_long}}{The latitide of the area's centroid}
\item{\code{geometry}}{A nested list containing the area's geometry (polygons)}
}}
\usage{
sa32016
}
\description{
Geospatial data provided by the ABS for Statistical Area 3 in 2016.
}
\keyword{datasets}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{data}
\name{data-phenotypes}
\alias{data-phenotypes}
\title{Raw phenotype data used in thesis}
\usage{
phenotypes
}
\description{
Raw phenotype data used in thesis
}
\details{
The data comes from a field trial that is part of a cotton
breeding program. The trial was set up in 2012 across 7 locations in the
US Cotton Belt. At every location the same bi--parental BC_3F_2 was grown
together with a number of entries serving as checks. Yield performance
measurements were obtained per plot. The data frame contains the field design
information and the yield performance values averaged per plot. The data frame
holds information of 2310 observations and 9 features. The features are
detailed below and represent the columns in the data frame.
\describe{
\item{\code{GERMPLASM}:}{The entry names.}
\item{\code{LOCATION}:}{The name of the locations.}
\item{\code{RANGE}:}{The range coordinates when all fields are
seen as being part of one big field, i.e. same reference grid for all fields.}
\item{\code{ROW}:}{The row coordinates when all fields are
seen as being part of one big field, i.e. same reference grid for all fields.}
\item{\code{RANGEROW}:}{A combination of the range and row coordinates.}
\item{\code{LOCAL_ROW}:}{The coordinates for the rows linked to the locations.
Here the reference grid is the location itself.}
\item{\code{LOCAL_RANGE}:}{The coordinates for the ranges linked to the locations.
Here the reference grid is the location itself.}
\item{\code{PLOT}:}{The reference to the plot of the observation.}
\item{\code{YIELD}:}{The average yield performance measures of the plots for
the respective observations.}
}
}
\examples{
data(phenotypes)
head(phenotypes)
}
\author{
Ruud Derijcker
}
\keyword{phenotypes}
| /man/data-phenotypes.Rd | no_license | digiYozhik/msc_thesis | R | false | false | 1,866 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\docType{data}
\name{data-phenotypes}
\alias{data-phenotypes}
\title{Raw phenotype data used in thesis}
\usage{
phenotypes
}
\description{
Raw phenotype data used in thesis
}
\details{
The data comes from a field trial that is part of a cotton
breeding program. The trial was set up in 2012 across 7 locations in the
US Cotton Belt. At every location the same bi--parental BC_3F_2 was grown
together with a number of entries serving as checks. Yield performance
measurements were obtained per plot. The data frame contains the field design
information and the yield performance values averaged per plot. The data frame
holds information of 2310 observations and 9 features. The features are
detailed below and represent the columns in the data frame.
\describe{
\item{\code{GERMPLASM}:}{The entry names.}
\item{\code{LOCATION}:}{The name of the locations.}
\item{\code{RANGE}:}{The range coordinates when all fields are
seen as being part of one big field, i.e. same reference grid for all fields.}
\item{\code{ROW}:}{The row coordinates when all fields are
seen as being part of one big field, i.e. same reference grid for all fields.}
\item{\code{RANGEROW}:}{A combination of the range and row coordinates.}
\item{\code{LOCAL_ROW}:}{The coordinates for the rows linked to the locations.
Here the reference grid is the location itself.}
\item{\code{LOCAL_RANGE}:}{The coordinates for the ranges linked to the locations.
Here the reference grid is the location itself.}
\item{\code{PLOT}:}{The reference to the plot of the observation.}
\item{\code{YIELD}:}{The average yield performance measures of the plots for
the respective observations.}
}
}
\examples{
data(phenotypes)
head(phenotypes)
}
\author{
Ruud Derijcker
}
\keyword{phenotypes}
|
# required libraries ------------------------------------------------------
library(data.table)
library(magrittr)
library(shiny)
library(leaflet)
library(leaflet.extras)
library(emojifont)
library(shinybusy)
library(RCurl)
# read geolocalized data --------------------------------------------------
d <- readRDS("shiny/escuelas_geolocalizado.rds")
d$cue_anexo <- as.integer(d$cue_anexo)
setDT(d)
# read corrections file to fix wrong geocoding ----------------------------
url <- getURL("https://raw.githubusercontent.com/canovasjm/escuelas/master/correcciones/correcciones.csv")
d_corrections <- read.csv(text = url)
setDT(d_corrections)
# replace wrong lat and lon with the corrections --------------------------
d <- d[d_corrections, on = "cue_anexo", c("lat", "lon") := list(i.lat, i.lon)]
# read labels for each sector ---------------------------------------------
label_estatal <- readRDS("shiny/label_estatal.rds")
label_privado <- readRDS("shiny/label_privado.rds")
# prepare data ------------------------------------------------------------
# filter data by sector
sector_estatal <- d[sector == "Estatal", ]
sector_privado <- d[sector == "Privado", ]
# general objects ---------------------------------------------------------
# define color palette
pal <- colorFactor(palette = c("darkred", "steelblue"),
levels = c("Estatal", "Privado"))
# shiny app ---------------------------------------------------------------
ui <- fluidPage(
tags$head(HTML('<link href="https://fonts.googleapis.com/css?family=Roboto+Mono" rel="stylesheet">')),
tags$head(HTML('<style>* {font-size: 100%; font-family: Roboto Mono;}</style>')),
h2("Establecimientos Educativos en Argentina", lapply(search_emoji("student"), emoji), emoji("argentina")),
fluidRow(
column(3,
h4(emoji("school"), strong("¿Qué hay acá?")),
HTML("<p> Un mapa con los establecimientos educativos (<span style= \"color: darkred;\">estatales</span> y
<span style= \"color: steelblue;\">privados</span>) de la República Argentina.</p>"),
br(),
h4(emoji("memo"), strong("Sobre los datos")),
HTML("<p> Provienen del <i>Padrón Oficial de Establecimientos Educativos</i>, que es el nomenclador unificado
de escuelas e incluye ofertas educativas de distintos programas, carreras y títulos; entre otras variables. <p>"),
br(),
h4(emoji("blue_book"), strong("Fuente")),
HTML("<p> Ministerio de Educación: <a href=https://www.argentina.gob.ar/educacion/planeamiento/info-estadistica/padron-establecimientos >
https://www.argentina.gob.ar/educacion/planeamiento/info-estadistica/padron-establecimientos </a>
Consultado: 2020-05-10 </p>"),
br(),
h4(emoji("nerd_face"), strong("Quiero saber más")),
HTML("<p> <a href=https://canovasjm.netlify.app >
https://canovasjm.netlify.app </a> </p>")
),
column(9,
add_busy_spinner(spin = "fading-circle"),
leafletOutput("map", height = "85vh")
)
)
)
server <- function(input, output, session) {
output$map <- renderLeaflet({
# create base map
m <- d %>%
leaflet() %>%
addTiles(group = "OSM") %>%
addProviderTiles("Stamen.TonerLite", group = "Toner") %>%
addProviderTiles("CartoDB.DarkMatter", group = "Dark") %>%
addResetMapButton() %>%
setView(lat = -39.0147402, lng = -63.6698073, zoom = 4)
# add sectors to base map
m %>%
addCircleMarkers(
data = sector_estatal,
radius = 2,
color = ~ pal(sector),
label = lapply(label_estatal, htmltools::HTML),
group = "Estatal",
clusterOptions = markerClusterOptions(disableClusteringAtZoom = 8)) %>%
addCircleMarkers(
data = sector_privado,
radius = 2,
color = ~ pal(sector),
label = lapply(label_privado, htmltools::HTML),
group = "Privado",
clusterOptions = markerClusterOptions(disableClusteringAtZoom = 8)) %>%
addLayersControl(baseGroups = c("Toner", "Dark", "OSM"),
overlayGroups = c("Estatal", "Privado"),
position = "topleft",
options = layersControlOptions(collapsed = FALSE)) %>%
addLegend(title = "Referencias", position = "bottomright", pal = pal, values = c("Estatal", "Privado"))
})
}
shinyApp(ui, server) | /app.R | permissive | nicolasboattini/escuelas | R | false | false | 4,513 | r | # required libraries ------------------------------------------------------
library(data.table)
library(magrittr)
library(shiny)
library(leaflet)
library(leaflet.extras)
library(emojifont)
library(shinybusy)
library(RCurl)
# read geolocalized data --------------------------------------------------
d <- readRDS("shiny/escuelas_geolocalizado.rds")
d$cue_anexo <- as.integer(d$cue_anexo)
setDT(d)
# read corrections file to fix wrong geocoding ----------------------------
url <- getURL("https://raw.githubusercontent.com/canovasjm/escuelas/master/correcciones/correcciones.csv")
d_corrections <- read.csv(text = url)
setDT(d_corrections)
# replace wrong lat and lon with the corrections --------------------------
d <- d[d_corrections, on = "cue_anexo", c("lat", "lon") := list(i.lat, i.lon)]
# read labels for each sector ---------------------------------------------
label_estatal <- readRDS("shiny/label_estatal.rds")
label_privado <- readRDS("shiny/label_privado.rds")
# prepare data ------------------------------------------------------------
# filter data by sector
sector_estatal <- d[sector == "Estatal", ]
sector_privado <- d[sector == "Privado", ]
# general objects ---------------------------------------------------------
# define color palette
pal <- colorFactor(palette = c("darkred", "steelblue"),
levels = c("Estatal", "Privado"))
# shiny app ---------------------------------------------------------------
ui <- fluidPage(
tags$head(HTML('<link href="https://fonts.googleapis.com/css?family=Roboto+Mono" rel="stylesheet">')),
tags$head(HTML('<style>* {font-size: 100%; font-family: Roboto Mono;}</style>')),
h2("Establecimientos Educativos en Argentina", lapply(search_emoji("student"), emoji), emoji("argentina")),
fluidRow(
column(3,
h4(emoji("school"), strong("¿Qué hay acá?")),
HTML("<p> Un mapa con los establecimientos educativos (<span style= \"color: darkred;\">estatales</span> y
<span style= \"color: steelblue;\">privados</span>) de la República Argentina.</p>"),
br(),
h4(emoji("memo"), strong("Sobre los datos")),
HTML("<p> Provienen del <i>Padrón Oficial de Establecimientos Educativos</i>, que es el nomenclador unificado
de escuelas e incluye ofertas educativas de distintos programas, carreras y títulos; entre otras variables. <p>"),
br(),
h4(emoji("blue_book"), strong("Fuente")),
HTML("<p> Ministerio de Educación: <a href=https://www.argentina.gob.ar/educacion/planeamiento/info-estadistica/padron-establecimientos >
https://www.argentina.gob.ar/educacion/planeamiento/info-estadistica/padron-establecimientos </a>
Consultado: 2020-05-10 </p>"),
br(),
h4(emoji("nerd_face"), strong("Quiero saber más")),
HTML("<p> <a href=https://canovasjm.netlify.app >
https://canovasjm.netlify.app </a> </p>")
),
column(9,
add_busy_spinner(spin = "fading-circle"),
leafletOutput("map", height = "85vh")
)
)
)
server <- function(input, output, session) {
output$map <- renderLeaflet({
# create base map
m <- d %>%
leaflet() %>%
addTiles(group = "OSM") %>%
addProviderTiles("Stamen.TonerLite", group = "Toner") %>%
addProviderTiles("CartoDB.DarkMatter", group = "Dark") %>%
addResetMapButton() %>%
setView(lat = -39.0147402, lng = -63.6698073, zoom = 4)
# add sectors to base map
m %>%
addCircleMarkers(
data = sector_estatal,
radius = 2,
color = ~ pal(sector),
label = lapply(label_estatal, htmltools::HTML),
group = "Estatal",
clusterOptions = markerClusterOptions(disableClusteringAtZoom = 8)) %>%
addCircleMarkers(
data = sector_privado,
radius = 2,
color = ~ pal(sector),
label = lapply(label_privado, htmltools::HTML),
group = "Privado",
clusterOptions = markerClusterOptions(disableClusteringAtZoom = 8)) %>%
addLayersControl(baseGroups = c("Toner", "Dark", "OSM"),
overlayGroups = c("Estatal", "Privado"),
position = "topleft",
options = layersControlOptions(collapsed = FALSE)) %>%
addLegend(title = "Referencias", position = "bottomright", pal = pal, values = c("Estatal", "Privado"))
})
}
shinyApp(ui, server) |
#' fit.curves
#'
#' Fit dose-response curves using MLE
#'
#' @param X concentrations
#' @param Y response values
#' @param Fname filename for saving figures
#' @param Figname Title to appear on figures
#' @param FigDir Directory where figures will be saved
#' @param Ylab Ylabel for figures
#' @param Xlab Xlabel for figures
#' @param axis.font fontsize for figures
#' @param log.base What base used to transform concentrations (NA = no transformation)
#' @param log.factor value added to log-transformed data to avoid log(0)
#' @param FigW Figure width
#' @param FigH Figure height
#' @param SigInit 2-element vector with start values for sigmoidal function
#' @param Uni5Init 5-element vector with start values for 6-param unimodal
#' @param Uni6Init 6-element vector with start values for 5-param unimodal
#' @param reltol numerical tolerance for mle2
#'
#' @import bbmle
#'
#' @return baseline mle2 object for null model
#' @return sigmoidal mle2 object for sigmoid DR curve
#' @return linear mle2 object for linear DR curve
#' @return unimodal5 mle2 object for 5-param unimodal DR curve
#' @return unimodal6 mle2 object for 6-param unimodal DR curve
#' @return quadratic mle2 object for quadratic DR curve
#' @return AIC.table Table with AICc results for each model
#' @return ANOVA Table with likelihood ratio test results for each model
#' @return FN File name used for plotting
#' @return B Identity of 'best' model as judged by likelihood ratio test
#' @return Pval Table of likelihood ratio test p-values
#'
#' @export
fit.curves = function(X,Y,Fname = '',Figname='',FigDir='',Ylab='Y',Xlab='X',axis.font = 0.75,
log.base = NA, log.factor = 0,
FigW=7,FigH=7,SigInit=c(0,0),Uni6Init=c(0,-1,2,2,2,2),
Uni5Init=NA,reltol=1e-4){
# Fit a suite of models using mle2 numerical routine, using best guesses for initial conditions
# Intercept-only
mBase = mle2(Int,start=c(g = mean(Y),s=1),data=list(Y=Y))
# Linear
# Initial guess at slope using lm (yes this is silly because MLE = LS in this case, but this ensures all the results are a MLE2 object)
mLin = mle2(Linear,start=c(b0 = summary(lm(Y~X))[[4]][[1]],
b1=summary(lm(Y~X))[[4]][[2]],
s=1),data=list(X=X,Y=Y))
# Sigmoidal
mSigm = mle2(SigmoidDR,start=c(g=mean(Y),h=mean(Y),a=SigInit[1],b=SigInit[2],s=1),data=list(X=X,Y=Y))
# Unimodal (6-parameter)
# Find good starting values:
mUni6 = mle2(uniDR6,start=c(g=Uni6Init[1],h=Uni6Init[2],a1=Uni6Init[3],b1=Uni6Init[4],
a2=Uni6Init[5],b2=Uni6Init[6],s=1),data=list(X=X,Y=Y),
control=list(maxit=5000, reltol=reltol))
# Unimodal (5-parameter)
if (is.na(Uni5Init)){ # if it was not user-specified
G = max(Y)-min(Y) # 0 # Difference between maximum and minimum values (i.e., y-range of data
H = min(Y) #-1 # Minimum value
# parameters for the increasing portion:
B1 = 1 #2 # Rate of increase (must be > 1)
# parameters for the decreasing portion
A2 = 1 # Intercept (larger positive numbers move this to the right)
B2 = -1 # Rate of decrease (must be < 1)
}else{ # if user-specified
G = Uni5Init[1]
H= Uni5Init[2]
B1= Uni5Init[3]
A2== Uni5Init[4]
B2== Uni5Init[5]}
mUni5 = mle2(uniDR5,start=c(g=G,h=H,b1=B1,a2=A2,b2=B2,s=1),data=list(X=X,Y=Y),
control=list(maxit=5000, reltol=reltol))
# Unimodal (4-parameter, deprecated)
#G = 0 # Difference between maximum and minimum values (i.e., y-range of data
#H = -1 # Minimum value
# parameters for the increasing portion:
# B1 = 2 # Rate of increase (must be > 1)
# parameters for the decreasing portion
# B2 = -4 # Rate of decrease (must be < 1)
# mUni4 = mle2(uniDR4,start=c(g=G,h=H,b1=B1,b2=B2,s=1),data=list(X=X,Y=Y))
# Quadratic
mQuad = mle2(Quadratic,start=c(b0 = mean(Y),b1=1,b2=mean(X),s=1),data=list(X=X,Y=Y),
control=list(maxit=5000, reltol=reltol))
# Calculate AICs from mle objects
AIC.table = AICctab(mBase,mLin,mSigm,mUni6,mUni5,mQuad,nobs=length(X),sort=F)
# Likelihood ratio tests.
# Sometimes higher-order models fail to converge, violating the assumptions of the LRT (that the null model always has lower likelihood)
# A constraint is applied to avoid misleading p-values in that case
ANOVA = list()
ANOVA[[1]] = if (logLik(mBase) < logLik(mLin)){anova(mBase,mLin)}else{1}
ANOVA[[2]] = if (logLik(mBase) < logLik(mQuad)){anova(mBase,mQuad)}else{1}
ANOVA[[3]] = if (logLik(mBase) < logLik(mSigm)){anova(mBase,mSigm)}else{1}
ANOVA[[4]] = if (logLik(mBase) < logLik(mUni5)){anova(mBase,mUni5)}else{1}
ANOVA[[5]] = if (logLik(mBase) < logLik(mUni6)){anova(mBase,mUni6)}else{1}
# ANOVA.6 = anova(mBase,mUni3)
# Tally up p-values and deviances
Pval = rep(1,length(ANOVA)) # number of replicates
Dev = rep(NA,length(ANOVA)) # number of replicates
for (a in 1:length(ANOVA)){
if (length(ANOVA[[a]])>1){
Pval[a]=ANOVA[[a]][10] # pvalue
Dev[a]=ANOVA[[a]][4] # deviance (-2 * log likelihood)
}}
# Pick the best one and make a plot, based on Pvals
# Note: it might be better to do this based on AIC...
Best = match(min(Pval),Pval)
# Choose best model based on AIC:
#Best = match(min(AIC.table$dAICc),AIC.table$dAICc)
Title = Figname
Xticks.at = unique(X) # locations of xticks
if (is.na(log.base)){ # if the x-axis is arithmetic, no action needed
Xticks = Xticks.at} # if logarithmic, transform values to be expressed in arithmetic scale
else{
Xticks = round(log.base^(Xticks.at) - log.factor,digits=1)}
# This opens a new graphic window of a specific size, to be saved as a pdf file
# You can adjust these
switch(Sys.info()[['sysname']],
Windows= {windows(width=FigW,height=FigH)},
Darwin = {quartz(width=FigW,height=FigH)},
Linux = {x11(width=FigW,height=FigH)})
plot(X,Y, #xaxp=c(-3,3,12),
ylab=Ylab, xlab = Xlab, main = Title, # X & Y labels and Title
cex = 1, pch = 1, # size & symbol type for the markers
cex.lab = 1, cex.axis = axis.font, # size for axes labels and tick marks
xaxt = 'n') # turns off default xtick labels
axis(side = 1,at=Xticks.at,labels=Xticks,cex.axis=axis.font) # plot xtick lables where the actual concentrations are
X_temp = seq(-5,5,length.out=1000) # dummy x-values for plotting the curve
# Choose the correct line type
# the code below is appropriate if basing results on an AIC table.
#if (Best > 1){ # Best == 1 corresponds to intercept-only model which should always be dashed
#if (Pval[Best-1]<0.05){Lty = 1}else{Lty=2} }
# the code below is appropriate if basing results on the LRT ('ANOVA') table
if (Pval[Best]<0.05){Lty = 1}else{Lty=2}
# Note that the numbers corresponding to each model depend on whether you use AIC or LRT to determine which ones to plot.
# Currently configured for LRT
# if (Best==1){ # Baseline
# lines(c(X_temp[1],X_temp[1000]),c(mean(Y),mean(Y)),lty=2)
# Resid = Y-mean(Y)
# File.name = paste("dose_response_figures/",Fname,"_",Cycle,"_baseline_dose_response.pdf",sep="")
# dev.print(device=pdf,file=File.name,useDingbats=FALSE)
#}
if (Best==1){ # Linear
B = coef(mLin)
P = B[1]+B[2]*X_temp
Pr = B[1]+B[2]*X
Resid = Y-X
lines(X_temp,P,lty=Lty)
File.name = paste(FigDir,Fname,"_linear_dose_response.pdf",sep="")
dev.print(device=pdf,file=File.name,useDingbats=FALSE)
}
if (Best==2){ # Quadratic
B = coef(mQuad)
P = B[1]+B[2]*(X_temp-B[3])^2
Pr = B[1]+B[2]*(X-B[3])^2
Resid = Y-X
lines(X_temp,P,lty=Lty)
File.name = paste(FigDir,Fname,"_quadratic_dose_response.pdf",sep="")
dev.print(device=pdf,file=File.name,useDingbats=FALSE)
}
if (Best==3){ # Sigmoidal
B = coef(mSigm)
P = (B[2]-B[1])/(1+exp(B[3] + B[4]*X_temp))+B[1]
Pr = (B[2]-B[1])/(1+exp(B[3] + B[4]*X))+B[1]
Resid = Y-X
lines(X_temp,P,lty=Lty)
File.name = paste(FigDir,Fname,"_sigmoidal_dose_response.pdf",sep="")
dev.print(device=pdf,file=File.name,useDingbats=FALSE)
}
if (Best==4){ # Uni6
B = coef(mUni6)
P = B[1]+B[2]*(1 + exp(-(B[3]+B[4]*X_temp)))/(1+exp(-(B[5] + B[6]*X_temp)))
Pr = B[1]+B[2]*(1 + exp(-(B[3]+B[4]*X)))/(1+exp(-(B[5] + B[6]*X)))
Resid = Y-X
lines(X_temp,P,lty=Lty)
File.name = paste(FigDir,Fname,"_unimodal6_dose_response.pdf",sep="")
dev.print(device=pdf,file=File.name,useDingbats=FALSE)
}
if (Best==5){ # Uni5
B = coef(mUni5)
P = B[1]+B[2]*(1 + B[3]*X_temp)/(1+exp(-(B[4] + B[5]*X_temp)))
Pr = B[1]+B[2]*(1 + B[3]*X)/(1+exp(-(B[4] + B[5]*X)))
Resid = Y-X
lines(X_temp,P,lty=Lty)
File.name = paste(FigDir,Fname,"_unimodal5_dose_response.pdf",sep="")
dev.print(device=pdf,file=File.name,useDingbats=FALSE)
dev.off()
}
# Also create normal QQ plot
# Collect residuals...
# This opens a new graphic window of a specific size, to be saved as a pdf file
# You can adjust these
switch(Sys.info()[['sysname']],
Windows= {windows(width=FigW,height=FigH)},
Darwin = {quartz(width=FigW,height=FigH)},
Linux = {x11(width=FigW,height=FigH)})
qqnorm(y=Resid)
qqline(y=Resid)
File.name = paste(FigDir,Fname,"_bestmodel_qqplot.pdf",sep="")
dev.print(device=pdf,file=File.name,useDingbats=FALSE)
dev.off()
return(list("baseline"=mBase,"sigmoidal"=mSigm,"linear"=mLin,
"unimodal5"=mUni5,"unimodal6"=mUni6,"quadratic"=mQuad,
"AIC"=AIC.table,"ANOVA"=ANOVA,'FN'=File.name,'B'=Best,
"Pval"=Pval))
}
| /R/fit.curves.R | permissive | jwilsonwhite/DRcurves | R | false | false | 9,647 | r | #' fit.curves
#'
#' Fit dose-response curves using MLE
#'
#' @param X concentrations
#' @param Y response values
#' @param Fname filename for saving figures
#' @param Figname Title to appear on figures
#' @param FigDir Directory where figures will be saved
#' @param Ylab Ylabel for figures
#' @param Xlab Xlabel for figures
#' @param axis.font fontsize for figures
#' @param log.base What base used to transform concentrations (NA = no transformation)
#' @param log.factor value added to log-transformed data to avoid log(0)
#' @param FigW Figure width
#' @param FigH Figure height
#' @param SigInit 2-element vector with start values for sigmoidal function
#' @param Uni5Init 5-element vector with start values for 6-param unimodal
#' @param Uni6Init 6-element vector with start values for 5-param unimodal
#' @param reltol numerical tolerance for mle2
#'
#' @import bbmle
#'
#' @return baseline mle2 object for null model
#' @return sigmoidal mle2 object for sigmoid DR curve
#' @return linear mle2 object for linear DR curve
#' @return unimodal5 mle2 object for 5-param unimodal DR curve
#' @return unimodal6 mle2 object for 6-param unimodal DR curve
#' @return quadratic mle2 object for quadratic DR curve
#' @return AIC.table Table with AICc results for each model
#' @return ANOVA Table with likelihood ratio test results for each model
#' @return FN File name used for plotting
#' @return B Identity of 'best' model as judged by likelihood ratio test
#' @return Pval Table of likelihood ratio test p-values
#'
#' @export
fit.curves = function(X,Y,Fname = '',Figname='',FigDir='',Ylab='Y',Xlab='X',axis.font = 0.75,
log.base = NA, log.factor = 0,
FigW=7,FigH=7,SigInit=c(0,0),Uni6Init=c(0,-1,2,2,2,2),
Uni5Init=NA,reltol=1e-4){
# Fit a suite of models using mle2 numerical routine, using best guesses for initial conditions
# Intercept-only
mBase = mle2(Int,start=c(g = mean(Y),s=1),data=list(Y=Y))
# Linear
# Initial guess at slope using lm (yes this is silly because MLE = LS in this case, but this ensures all the results are a MLE2 object)
mLin = mle2(Linear,start=c(b0 = summary(lm(Y~X))[[4]][[1]],
b1=summary(lm(Y~X))[[4]][[2]],
s=1),data=list(X=X,Y=Y))
# Sigmoidal
mSigm = mle2(SigmoidDR,start=c(g=mean(Y),h=mean(Y),a=SigInit[1],b=SigInit[2],s=1),data=list(X=X,Y=Y))
# Unimodal (6-parameter)
# Find good starting values:
mUni6 = mle2(uniDR6,start=c(g=Uni6Init[1],h=Uni6Init[2],a1=Uni6Init[3],b1=Uni6Init[4],
a2=Uni6Init[5],b2=Uni6Init[6],s=1),data=list(X=X,Y=Y),
control=list(maxit=5000, reltol=reltol))
# Unimodal (5-parameter)
if (is.na(Uni5Init)){ # if it was not user-specified
G = max(Y)-min(Y) # 0 # Difference between maximum and minimum values (i.e., y-range of data
H = min(Y) #-1 # Minimum value
# parameters for the increasing portion:
B1 = 1 #2 # Rate of increase (must be > 1)
# parameters for the decreasing portion
A2 = 1 # Intercept (larger positive numbers move this to the right)
B2 = -1 # Rate of decrease (must be < 1)
}else{ # if user-specified
G = Uni5Init[1]
H= Uni5Init[2]
B1= Uni5Init[3]
A2== Uni5Init[4]
B2== Uni5Init[5]}
mUni5 = mle2(uniDR5,start=c(g=G,h=H,b1=B1,a2=A2,b2=B2,s=1),data=list(X=X,Y=Y),
control=list(maxit=5000, reltol=reltol))
# Unimodal (4-parameter, deprecated)
#G = 0 # Difference between maximum and minimum values (i.e., y-range of data
#H = -1 # Minimum value
# parameters for the increasing portion:
# B1 = 2 # Rate of increase (must be > 1)
# parameters for the decreasing portion
# B2 = -4 # Rate of decrease (must be < 1)
# mUni4 = mle2(uniDR4,start=c(g=G,h=H,b1=B1,b2=B2,s=1),data=list(X=X,Y=Y))
# Quadratic
mQuad = mle2(Quadratic,start=c(b0 = mean(Y),b1=1,b2=mean(X),s=1),data=list(X=X,Y=Y),
control=list(maxit=5000, reltol=reltol))
# Calculate AICs from mle objects
AIC.table = AICctab(mBase,mLin,mSigm,mUni6,mUni5,mQuad,nobs=length(X),sort=F)
# Likelihood ratio tests.
# Sometimes higher-order models fail to converge, violating the assumptions of the LRT (that the null model always has lower likelihood)
# A constraint is applied to avoid misleading p-values in that case
ANOVA = list()
ANOVA[[1]] = if (logLik(mBase) < logLik(mLin)){anova(mBase,mLin)}else{1}
ANOVA[[2]] = if (logLik(mBase) < logLik(mQuad)){anova(mBase,mQuad)}else{1}
ANOVA[[3]] = if (logLik(mBase) < logLik(mSigm)){anova(mBase,mSigm)}else{1}
ANOVA[[4]] = if (logLik(mBase) < logLik(mUni5)){anova(mBase,mUni5)}else{1}
ANOVA[[5]] = if (logLik(mBase) < logLik(mUni6)){anova(mBase,mUni6)}else{1}
# ANOVA.6 = anova(mBase,mUni3)
# Tally up p-values and deviances
Pval = rep(1,length(ANOVA)) # number of replicates
Dev = rep(NA,length(ANOVA)) # number of replicates
for (a in 1:length(ANOVA)){
if (length(ANOVA[[a]])>1){
Pval[a]=ANOVA[[a]][10] # pvalue
Dev[a]=ANOVA[[a]][4] # deviance (-2 * log likelihood)
}}
# Pick the best one and make a plot, based on Pvals
# Note: it might be better to do this based on AIC...
Best = match(min(Pval),Pval)
# Choose best model based on AIC:
#Best = match(min(AIC.table$dAICc),AIC.table$dAICc)
Title = Figname
Xticks.at = unique(X) # locations of xticks
if (is.na(log.base)){ # if the x-axis is arithmetic, no action needed
Xticks = Xticks.at} # if logarithmic, transform values to be expressed in arithmetic scale
else{
Xticks = round(log.base^(Xticks.at) - log.factor,digits=1)}
# This opens a new graphic window of a specific size, to be saved as a pdf file
# You can adjust these
switch(Sys.info()[['sysname']],
Windows= {windows(width=FigW,height=FigH)},
Darwin = {quartz(width=FigW,height=FigH)},
Linux = {x11(width=FigW,height=FigH)})
plot(X,Y, #xaxp=c(-3,3,12),
ylab=Ylab, xlab = Xlab, main = Title, # X & Y labels and Title
cex = 1, pch = 1, # size & symbol type for the markers
cex.lab = 1, cex.axis = axis.font, # size for axes labels and tick marks
xaxt = 'n') # turns off default xtick labels
axis(side = 1,at=Xticks.at,labels=Xticks,cex.axis=axis.font) # plot xtick lables where the actual concentrations are
X_temp = seq(-5,5,length.out=1000) # dummy x-values for plotting the curve
# Choose the correct line type
# the code below is appropriate if basing results on an AIC table.
#if (Best > 1){ # Best == 1 corresponds to intercept-only model which should always be dashed
#if (Pval[Best-1]<0.05){Lty = 1}else{Lty=2} }
# the code below is appropriate if basing results on the LRT ('ANOVA') table
if (Pval[Best]<0.05){Lty = 1}else{Lty=2}
# Note that the numbers corresponding to each model depend on whether you use AIC or LRT to determine which ones to plot.
# Currently configured for LRT
# if (Best==1){ # Baseline
# lines(c(X_temp[1],X_temp[1000]),c(mean(Y),mean(Y)),lty=2)
# Resid = Y-mean(Y)
# File.name = paste("dose_response_figures/",Fname,"_",Cycle,"_baseline_dose_response.pdf",sep="")
# dev.print(device=pdf,file=File.name,useDingbats=FALSE)
#}
if (Best==1){ # Linear
B = coef(mLin)
P = B[1]+B[2]*X_temp
Pr = B[1]+B[2]*X
Resid = Y-X
lines(X_temp,P,lty=Lty)
File.name = paste(FigDir,Fname,"_linear_dose_response.pdf",sep="")
dev.print(device=pdf,file=File.name,useDingbats=FALSE)
}
if (Best==2){ # Quadratic
B = coef(mQuad)
P = B[1]+B[2]*(X_temp-B[3])^2
Pr = B[1]+B[2]*(X-B[3])^2
Resid = Y-X
lines(X_temp,P,lty=Lty)
File.name = paste(FigDir,Fname,"_quadratic_dose_response.pdf",sep="")
dev.print(device=pdf,file=File.name,useDingbats=FALSE)
}
if (Best==3){ # Sigmoidal
B = coef(mSigm)
P = (B[2]-B[1])/(1+exp(B[3] + B[4]*X_temp))+B[1]
Pr = (B[2]-B[1])/(1+exp(B[3] + B[4]*X))+B[1]
Resid = Y-X
lines(X_temp,P,lty=Lty)
File.name = paste(FigDir,Fname,"_sigmoidal_dose_response.pdf",sep="")
dev.print(device=pdf,file=File.name,useDingbats=FALSE)
}
if (Best==4){ # Uni6
B = coef(mUni6)
P = B[1]+B[2]*(1 + exp(-(B[3]+B[4]*X_temp)))/(1+exp(-(B[5] + B[6]*X_temp)))
Pr = B[1]+B[2]*(1 + exp(-(B[3]+B[4]*X)))/(1+exp(-(B[5] + B[6]*X)))
Resid = Y-X
lines(X_temp,P,lty=Lty)
File.name = paste(FigDir,Fname,"_unimodal6_dose_response.pdf",sep="")
dev.print(device=pdf,file=File.name,useDingbats=FALSE)
}
if (Best==5){ # Uni5
B = coef(mUni5)
P = B[1]+B[2]*(1 + B[3]*X_temp)/(1+exp(-(B[4] + B[5]*X_temp)))
Pr = B[1]+B[2]*(1 + B[3]*X)/(1+exp(-(B[4] + B[5]*X)))
Resid = Y-X
lines(X_temp,P,lty=Lty)
File.name = paste(FigDir,Fname,"_unimodal5_dose_response.pdf",sep="")
dev.print(device=pdf,file=File.name,useDingbats=FALSE)
dev.off()
}
# Also create normal QQ plot
# Collect residuals...
# This opens a new graphic window of a specific size, to be saved as a pdf file
# You can adjust these
switch(Sys.info()[['sysname']],
Windows= {windows(width=FigW,height=FigH)},
Darwin = {quartz(width=FigW,height=FigH)},
Linux = {x11(width=FigW,height=FigH)})
qqnorm(y=Resid)
qqline(y=Resid)
File.name = paste(FigDir,Fname,"_bestmodel_qqplot.pdf",sep="")
dev.print(device=pdf,file=File.name,useDingbats=FALSE)
dev.off()
return(list("baseline"=mBase,"sigmoidal"=mSigm,"linear"=mLin,
"unimodal5"=mUni5,"unimodal6"=mUni6,"quadratic"=mQuad,
"AIC"=AIC.table,"ANOVA"=ANOVA,'FN'=File.name,'B'=Best,
"Pval"=Pval))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myfunc.R
\name{myfunc}
\alias{myfunc}
\title{Function designed purely to show unit testing}
\usage{
myfunc(a = 1, b = 2, c = "blah")
}
\arguments{
\item{a}{numeric}
\item{b}{numeric}
\item{c}{character}
}
\description{
Function designed purely to show unit testing
}
| /man/myfunc.Rd | no_license | jchenpku/Rtraining | R | false | true | 348 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myfunc.R
\name{myfunc}
\alias{myfunc}
\title{Function designed purely to show unit testing}
\usage{
myfunc(a = 1, b = 2, c = "blah")
}
\arguments{
\item{a}{numeric}
\item{b}{numeric}
\item{c}{character}
}
\description{
Function designed purely to show unit testing
}
|
#
# icpe13_datamill_xy.R, 8 Jan 16
#
# Data from:
# DataMill: Rigorous Performance Evaluation Made Easy
# Augusto Born de Oliveira and Jean-Christophe Petkovich and Thomas Reidemeister and Sebastian Fischmeister
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
library("plyr")
plot_perf=function(df)
{
points(df$opt_flag, df$task_clock, col=pal_col[col_num])
t=ddply(df, .(opt_flag), function(df) mean(df$task_clock))
lines(t$opt_flag, t$V1, col=pal_col[col_num])
col_num <<- col_num+1
}
bench=read.csv(paste0(ESEUR_dir, "benchmark/icpe13_datamill_xy.csv.xz"), as.is=TRUE)
# 129.97.68.195 A 1.6GHz Nano X2
# 129.97.68.196 B 1.5GHz Xeon
# 129.97.68.204 C 600MHz ARM
# 129.97.68.206 D 600MHz ARM
# 129.97.68.208 E 3.2GHz P4
# 129.97.68.213 F 3.4GHz i7
# 129.97.68.214 G 3.3GHz i5
# 129.97.69.162 H 3.2GHz P4
# 129.97.69.168 I 1.6GHz P4
# 129.97.69.182 J 3.0GHz Pentium D
# 129.97.69.195 K 1.6GHz P4
# 129.97.69.198 L 150MHz Celeron
processors=c("1.6GHz Nano X2","600MHz ARM","3.2GHz P4","3.4GHz i7","3.3GHz i5","1.6GHz P4","1.6GHz P4")
opt_levels=c("-O0", "-O1", "-Os", "-O2", "-O3")
bench$opt_flag=mapvalues(bench$opt_flag, opt_levels, 1:length(opt_levels))
xsub = subset(bench, type=="xz")
pal_col=rainbow(length(unique(xsub$hostname)))
ybounds=range(xsub$task_clock)
plot(1, type="n",
xlim=c(1, length(opt_levels)), ylim=ybounds,
xaxt="n",
xlab="Optimization level", ylab="Clock time (ms)\n")
axis(1, at=1:length(opt_levels), labels=opt_levels)
col_num=1
d_ply(xsub, .(hostname), plot_perf)
legend(x="topright", legend=processors, bty="n", fill=pal_col, cex=1.2)
| /benchmark/icpe13_datamill_xy.R | no_license | alanponce/ESEUR-code-data | R | false | false | 1,641 | r | #
# icpe13_datamill_xy.R, 8 Jan 16
#
# Data from:
# DataMill: Rigorous Performance Evaluation Made Easy
# Augusto Born de Oliveira and Jean-Christophe Petkovich and Thomas Reidemeister and Sebastian Fischmeister
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
library("plyr")
plot_perf=function(df)
{
points(df$opt_flag, df$task_clock, col=pal_col[col_num])
t=ddply(df, .(opt_flag), function(df) mean(df$task_clock))
lines(t$opt_flag, t$V1, col=pal_col[col_num])
col_num <<- col_num+1
}
bench=read.csv(paste0(ESEUR_dir, "benchmark/icpe13_datamill_xy.csv.xz"), as.is=TRUE)
# 129.97.68.195 A 1.6GHz Nano X2
# 129.97.68.196 B 1.5GHz Xeon
# 129.97.68.204 C 600MHz ARM
# 129.97.68.206 D 600MHz ARM
# 129.97.68.208 E 3.2GHz P4
# 129.97.68.213 F 3.4GHz i7
# 129.97.68.214 G 3.3GHz i5
# 129.97.69.162 H 3.2GHz P4
# 129.97.69.168 I 1.6GHz P4
# 129.97.69.182 J 3.0GHz Pentium D
# 129.97.69.195 K 1.6GHz P4
# 129.97.69.198 L 150MHz Celeron
processors=c("1.6GHz Nano X2","600MHz ARM","3.2GHz P4","3.4GHz i7","3.3GHz i5","1.6GHz P4","1.6GHz P4")
opt_levels=c("-O0", "-O1", "-Os", "-O2", "-O3")
bench$opt_flag=mapvalues(bench$opt_flag, opt_levels, 1:length(opt_levels))
xsub = subset(bench, type=="xz")
pal_col=rainbow(length(unique(xsub$hostname)))
ybounds=range(xsub$task_clock)
plot(1, type="n",
xlim=c(1, length(opt_levels)), ylim=ybounds,
xaxt="n",
xlab="Optimization level", ylab="Clock time (ms)\n")
axis(1, at=1:length(opt_levels), labels=opt_levels)
col_num=1
d_ply(xsub, .(hostname), plot_perf)
legend(x="topright", legend=processors, bty="n", fill=pal_col, cex=1.2)
|
\name{survfitJM}
\alias{survfitJM}
\alias{survfitJM.JMbayes}
\title{Prediction in Joint Models}
\description{
This function computes the conditional probability of surviving later times than the last observed time for which a
longitudinal measurement was available.
}
\usage{
survfitJM(object, newdata, \dots)
\method{survfitJM}{JMbayes}(object, newdata,
type = c("SurvProb", "Density"), idVar = "id",
simulate = TRUE, survTimes = NULL, last.time = NULL,
LeftTrunc_var = NULL, M = 200L,
CI.levels = c(0.025, 0.975), log = FALSE, scale = 1.6,
weight = rep(1, nrow(newdata)),
init.b = NULL, seed = 1L, \dots)
}
\arguments{
\item{object}{an object inheriting from class \code{JMBayes}.}
\item{newdata}{a data frame that contains the longitudinal and covariate information for the subjects for which prediction
of survival probabilities is required. The names of the variables in this data frame must be the same as in the data frames that
were used to fit the linear mixed effects model (using \code{lme()}) and the survival model (using \code{coxph()})
that were supplied as the two first argument of \code{\link{jointModelBayes}}. In addition, this data frame should contain a variable
that identifies the different subjects (see also argument \code{idVar}).}
\item{type}{character string indicating what to compute, i.e., survival probabilities or the log conditional density.}
\item{idVar}{the name of the variable in \code{newdata} that identifies the different subjects.}
\item{simulate}{logical; if \code{TRUE}, a Monte Carlo approach is used to estimate survival probabilities. If \code{FALSE},
a first order estimator is used instead. (see \bold{Details})}
\item{survTimes}{a numeric vector of times for which prediction survival probabilities are to be computed.}
\item{last.time}{a numeric vector or character string. This specifies the known time at which each of the subjects in \code{newdata}
was known to be alive. If \code{NULL}, then this is automatically taken as the last time each subject provided a longitudinal
measurement. If a numeric vector, then it is assumed to contain this last time point for each subject. If a character string, then
it should be a variable in the data frame \code{newdata}.}
\item{LeftTrunc_var}{character string indicating the name of the variable in \code{newdata} that denotes the left-truncation
time.}
\item{M}{integer denoting how many Monte Carlo samples to use -- see \bold{Details}.}
\item{CI.levels}{a numeric vector of length two that specifies which quantiles to use for the calculation of confidence interval for the
predicted probabilities -- see \bold{Details}.}
\item{log}{logical, should results be returned in the log scale.}
\item{scale}{a numeric scalar that controls the acceptance rate of the Metropolis-Hastings algorithm -- see \bold{Details}.}
\item{weight}{a numeric vector of weights to be applied to the predictions of each subject.}
\item{init.b}{a numeric matrix of initial values for the random effects. These are used in the optimization procedure that finds the
mode of the posterior distribution described in Step 2 below.}
\item{seed}{numeric scalar, the random seed used to produce the results.}
\item{\dots}{additional arguments; currently none is used.}
}
\details{
Based on a fitted joint model (represented by \code{object}), and a history of longitudinal responses
\eqn{\tilde{y}_i(t) = \{y_i(s), 0 \leq s \leq t\}}{tilde{y_i}(t) = {y_i(s), 0 \leq s \leq t}} and a covariates vector \eqn{x_i} (stored in
\code{newdata}), this function provides estimates of \eqn{Pr(T_i > u | T_i > t, \tilde{y}_i(t), x_i)}{Pr(T_i > u | T_i > t,
tilde{y}_i(t), x_i)}, i.e., the conditional probability of surviving time \eqn{u} given that subject \eqn{i}, with covariate information
\eqn{x_i}, has survived up to time \eqn{t} and has provided longitudinal the measurements \eqn{\tilde{y}_i(t)}{tilde{y}_i(t)}.
To estimate \eqn{Pr(T_i > u | T_i > t, \tilde{y}_i(t), x_i)}{Pr(T_i > u | T_i > t, tilde{y}_i(t), x_i)} and if \code{simulate = TRUE}, a
Monte Carlo procedure is followed with the following steps:
\describe{
\item{Step 1:}{Take randomly a realization, say \eqn{\theta^*} from the MCMC sample of posterior of the joint model represented by \code{object}.}
\item{Step 2:}{Simulate random effects values, say \eqn{b_i^*}, from their posterior distribution given survival up to time \eqn{t},
the vector of longitudinal responses \eqn{\tilde{y}_i(t)} and \eqn{\theta^*}. This is achieved using a Metropolis-Hastings algorithm with
independent proposals from a properly centered and scaled multivariate \eqn{t} distribution. The \code{scale} argument controls the
acceptance rate for this algorithm.}
\item{Step 3}{Using \eqn{\theta^*} and \eqn{b_i^*}, compute \eqn{Pr(T_i > u | T_i > t, b_i^*, x_i; \theta^*)}{Pr(T_i >
u | T_i > t, b_i^*, x_i; \theta^*)}.}
\item{Step 4:}{Repeat Steps 1-3 \code{M} times.}
}
Based on the \code{M} estimates of the conditional probabilities, we compute useful summary statistics, such as their mean, median, and
percentiles (to produce a confidence interval).
If \code{simulate = FALSE}, then survival probabilities are estimated using the formula \deqn{Pr(T_i > u | T_i > t, \hat{b}_i, x_i;
\hat{\theta}),}{Pr(T_i > u | T_i > t, hat{b}_i, x_i; hat{\theta}),} where \eqn{\hat{\theta}} denotes the posterior means for the parameters,
and \eqn{\hat{b}_i} denotes the posterior means for the random effects.
}
\value{
A list of class \code{survfit.JMbayes} with components:
\item{summaries}{a list with elements numeric matrices with numeric summaries of the predicted probabilities for each subject.}
\item{survTimes}{a copy of the \code{survTimes} argument.}
\item{last.time}{a numeric vector with the time of the last available longitudinal measurement of each subject.}
\item{obs.times}{a list with elements numeric vectors denoting the timings of the longitudinal measurements for each subject.}
\item{y}{a list with elements numeric vectors denoting the longitudinal responses for each subject.}
\item{full.results}{a list with elements numeric matrices with predicted probabilities for each subject in each replication of the Monte Carlo
scheme described above.}
\item{success.rate}{a numeric vector with the success rates of the Metropolis-Hastings algorithm described above for each subject.}
\item{scale}{a copy of the \code{scale} argument.}
}
\references{
Rizopoulos, D. (2012) \emph{Joint Models for Longitudinal and Time-to-Event Data: with
Applications in R}. Boca Raton: Chapman and Hall/CRC.
Rizopoulos, D. (2011). Dynamic predictions and prospective accuracy in joint models for longitudinal and time-to-event data.
\emph{Biometrics} \bold{67}, 819--829.
}
\author{Dimitris Rizopoulos \email{d.rizopoulos@erasmusmc.nl}}
\seealso{\code{\link{plot.survfit.JMbayes}}, \code{\link{predict.JMbayes}},
\code{\link{aucJM}}, \code{\link{dynCJM}}, \code{\link{prederrJM}}, \code{\link{jointModelBayes}}}
\examples{
\dontrun{
# we construct the composite event indicator (transplantation or death)
pbc2$status2 <- as.numeric(pbc2$status != "alive")
pbc2.id$status2 <- as.numeric(pbc2.id$status != "alive")
# we fit the joint model using splines for the subject-specific
# longitudinal trajectories and a spline-approximated baseline
# risk function
lmeFit <- lme(log(serBilir) ~ ns(year, 2), data = pbc2,
random = ~ ns(year, 2) | id)
survFit <- coxph(Surv(years, status2) ~ drug, data = pbc2.id, x = TRUE)
jointFit <- jointModelBayes(lmeFit, survFit, timeVar = "year")
# we will compute survival probabilities for Subject 2 in a dynamic manner,
# i.e., after each longitudinal measurement is recorded
ND <- pbc2[pbc2$id == 2, ] # the data of Subject 2
survPreds <- vector("list", nrow(ND))
for (i in 1:nrow(ND)) {
survPreds[[i]] <- survfitJM(jointFit, newdata = ND[1:i, ])
}
survPreds
# run Shiny app
if (require("shiny")) {
shiny::runApp(file.path(.Library, "JMbayes/demo"))
}
}
}
\keyword{methods}
| /man/survfitJM.Rd | no_license | TobiasPolak/JMbayes | R | false | false | 8,160 | rd | \name{survfitJM}
\alias{survfitJM}
\alias{survfitJM.JMbayes}
\title{Prediction in Joint Models}
\description{
This function computes the conditional probability of surviving later times than the last observed time for which a
longitudinal measurement was available.
}
\usage{
survfitJM(object, newdata, \dots)
\method{survfitJM}{JMbayes}(object, newdata,
type = c("SurvProb", "Density"), idVar = "id",
simulate = TRUE, survTimes = NULL, last.time = NULL,
LeftTrunc_var = NULL, M = 200L,
CI.levels = c(0.025, 0.975), log = FALSE, scale = 1.6,
weight = rep(1, nrow(newdata)),
init.b = NULL, seed = 1L, \dots)
}
\arguments{
\item{object}{an object inheriting from class \code{JMBayes}.}
\item{newdata}{a data frame that contains the longitudinal and covariate information for the subjects for which prediction
of survival probabilities is required. The names of the variables in this data frame must be the same as in the data frames that
were used to fit the linear mixed effects model (using \code{lme()}) and the survival model (using \code{coxph()})
that were supplied as the two first argument of \code{\link{jointModelBayes}}. In addition, this data frame should contain a variable
that identifies the different subjects (see also argument \code{idVar}).}
\item{type}{character string indicating what to compute, i.e., survival probabilities or the log conditional density.}
\item{idVar}{the name of the variable in \code{newdata} that identifies the different subjects.}
\item{simulate}{logical; if \code{TRUE}, a Monte Carlo approach is used to estimate survival probabilities. If \code{FALSE},
a first order estimator is used instead. (see \bold{Details})}
\item{survTimes}{a numeric vector of times for which prediction survival probabilities are to be computed.}
\item{last.time}{a numeric vector or character string. This specifies the known time at which each of the subjects in \code{newdata}
was known to be alive. If \code{NULL}, then this is automatically taken as the last time each subject provided a longitudinal
measurement. If a numeric vector, then it is assumed to contain this last time point for each subject. If a character string, then
it should be a variable in the data frame \code{newdata}.}
\item{LeftTrunc_var}{character string indicating the name of the variable in \code{newdata} that denotes the left-truncation
time.}
\item{M}{integer denoting how many Monte Carlo samples to use -- see \bold{Details}.}
\item{CI.levels}{a numeric vector of length two that specifies which quantiles to use for the calculation of confidence interval for the
predicted probabilities -- see \bold{Details}.}
\item{log}{logical, should results be returned in the log scale.}
\item{scale}{a numeric scalar that controls the acceptance rate of the Metropolis-Hastings algorithm -- see \bold{Details}.}
\item{weight}{a numeric vector of weights to be applied to the predictions of each subject.}
\item{init.b}{a numeric matrix of initial values for the random effects. These are used in the optimization procedure that finds the
mode of the posterior distribution described in Step 2 below.}
\item{seed}{numeric scalar, the random seed used to produce the results.}
\item{\dots}{additional arguments; currently none is used.}
}
\details{
Based on a fitted joint model (represented by \code{object}), and a history of longitudinal responses
\eqn{\tilde{y}_i(t) = \{y_i(s), 0 \leq s \leq t\}}{tilde{y_i}(t) = {y_i(s), 0 \leq s \leq t}} and a covariates vector \eqn{x_i} (stored in
\code{newdata}), this function provides estimates of \eqn{Pr(T_i > u | T_i > t, \tilde{y}_i(t), x_i)}{Pr(T_i > u | T_i > t,
tilde{y}_i(t), x_i)}, i.e., the conditional probability of surviving time \eqn{u} given that subject \eqn{i}, with covariate information
\eqn{x_i}, has survived up to time \eqn{t} and has provided longitudinal the measurements \eqn{\tilde{y}_i(t)}{tilde{y}_i(t)}.
To estimate \eqn{Pr(T_i > u | T_i > t, \tilde{y}_i(t), x_i)}{Pr(T_i > u | T_i > t, tilde{y}_i(t), x_i)} and if \code{simulate = TRUE}, a
Monte Carlo procedure is followed with the following steps:
\describe{
\item{Step 1:}{Take randomly a realization, say \eqn{\theta^*} from the MCMC sample of posterior of the joint model represented by \code{object}.}
\item{Step 2:}{Simulate random effects values, say \eqn{b_i^*}, from their posterior distribution given survival up to time \eqn{t},
the vector of longitudinal responses \eqn{\tilde{y}_i(t)} and \eqn{\theta^*}. This is achieved using a Metropolis-Hastings algorithm with
independent proposals from a properly centered and scaled multivariate \eqn{t} distribution. The \code{scale} argument controls the
acceptance rate for this algorithm.}
\item{Step 3}{Using \eqn{\theta^*} and \eqn{b_i^*}, compute \eqn{Pr(T_i > u | T_i > t, b_i^*, x_i; \theta^*)}{Pr(T_i >
u | T_i > t, b_i^*, x_i; \theta^*)}.}
\item{Step 4:}{Repeat Steps 1-3 \code{M} times.}
}
Based on the \code{M} estimates of the conditional probabilities, we compute useful summary statistics, such as their mean, median, and
percentiles (to produce a confidence interval).
If \code{simulate = FALSE}, then survival probabilities are estimated using the formula \deqn{Pr(T_i > u | T_i > t, \hat{b}_i, x_i;
\hat{\theta}),}{Pr(T_i > u | T_i > t, hat{b}_i, x_i; hat{\theta}),} where \eqn{\hat{\theta}} denotes the posterior means for the parameters,
and \eqn{\hat{b}_i} denotes the posterior means for the random effects.
}
\value{
A list of class \code{survfit.JMbayes} with components:
\item{summaries}{a list with elements numeric matrices with numeric summaries of the predicted probabilities for each subject.}
\item{survTimes}{a copy of the \code{survTimes} argument.}
\item{last.time}{a numeric vector with the time of the last available longitudinal measurement of each subject.}
\item{obs.times}{a list with elements numeric vectors denoting the timings of the longitudinal measurements for each subject.}
\item{y}{a list with elements numeric vectors denoting the longitudinal responses for each subject.}
\item{full.results}{a list with elements numeric matrices with predicted probabilities for each subject in each replication of the Monte Carlo
scheme described above.}
\item{success.rate}{a numeric vector with the success rates of the Metropolis-Hastings algorithm described above for each subject.}
\item{scale}{a copy of the \code{scale} argument.}
}
\references{
Rizopoulos, D. (2012) \emph{Joint Models for Longitudinal and Time-to-Event Data: with
Applications in R}. Boca Raton: Chapman and Hall/CRC.
Rizopoulos, D. (2011). Dynamic predictions and prospective accuracy in joint models for longitudinal and time-to-event data.
\emph{Biometrics} \bold{67}, 819--829.
}
\author{Dimitris Rizopoulos \email{d.rizopoulos@erasmusmc.nl}}
\seealso{\code{\link{plot.survfit.JMbayes}}, \code{\link{predict.JMbayes}},
\code{\link{aucJM}}, \code{\link{dynCJM}}, \code{\link{prederrJM}}, \code{\link{jointModelBayes}}}
\examples{
\dontrun{
# we construct the composite event indicator (transplantation or death)
pbc2$status2 <- as.numeric(pbc2$status != "alive")
pbc2.id$status2 <- as.numeric(pbc2.id$status != "alive")
# we fit the joint model using splines for the subject-specific
# longitudinal trajectories and a spline-approximated baseline
# risk function
lmeFit <- lme(log(serBilir) ~ ns(year, 2), data = pbc2,
random = ~ ns(year, 2) | id)
survFit <- coxph(Surv(years, status2) ~ drug, data = pbc2.id, x = TRUE)
jointFit <- jointModelBayes(lmeFit, survFit, timeVar = "year")
# we will compute survival probabilities for Subject 2 in a dynamic manner,
# i.e., after each longitudinal measurement is recorded
ND <- pbc2[pbc2$id == 2, ] # the data of Subject 2
survPreds <- vector("list", nrow(ND))
for (i in 1:nrow(ND)) {
survPreds[[i]] <- survfitJM(jointFit, newdata = ND[1:i, ])
}
survPreds
# run Shiny app
if (require("shiny")) {
shiny::runApp(file.path(.Library, "JMbayes/demo"))
}
}
}
\keyword{methods}
|
##' Split the org file by nodes.
##'
##' \code{split_orgfile} splits the org file by the index of the
##' headlines.
##' @param x org object as character vector.
##' @return the nodes of an org file as a character list.
split_orgfile <-
function(x) {
headline_ids <-
x %>%
extract_raw_headlines() %>%
complete.cases() %>%
which()
split_file <-
x %>%
(function(x) {
unname(split(x, cumsum(seq_along(x) %in% headline_ids)))
})
if (length(headline_ids) > 0 && headline_ids[1] != 1) {
return(split_file[2:length(split_file)])
} else {
return(split_file)
}
}
| /R/split_orgfile.r | no_license | lwjohnst86/orgclockr | R | false | false | 746 | r | ##' Split the org file by nodes.
##'
##' \code{split_orgfile} splits the org file by the index of the
##' headlines.
##' @param x org object as character vector.
##' @return the nodes of an org file as a character list.
split_orgfile <-
function(x) {
headline_ids <-
x %>%
extract_raw_headlines() %>%
complete.cases() %>%
which()
split_file <-
x %>%
(function(x) {
unname(split(x, cumsum(seq_along(x) %in% headline_ids)))
})
if (length(headline_ids) > 0 && headline_ids[1] != 1) {
return(split_file[2:length(split_file)])
} else {
return(split_file)
}
}
|
# Import the dataframe
df<- read.csv("Salary_simple_linear_regression.csv")
head(df)
# Splitting the data in training and test
# Install the "caTools" library
#install.packages("caTools")
library(caTools)
set.seed(123)
# adiciona somente o Y e escolhe a porcentagem de TREINO
split = sample.split(df$Salary, SplitRatio = 2/3)
# Criando set de treino e teste
train_set = subset(df, split == TRUE)
test_set = subset(df, split == FALSE)
# Regressao linear simples NAO PRECISA de Feature Scaling
# Fit da regressao linear no Train Set
regressor = lm(formula = Salary ~ YearsExperience,
data=train_set)
# Dados sobre o regressor
summary(regressor)
#Predict dos resultados do set de TESTE:
y_pred = predict(regressor, newdata = test_set)
# Visualization: com GGPLOT2
# SET DE TREINO (TRAIN_SET)
library(ggplot2)
ggplot() +
geom_point(aes(x=train_set$YearsExperience, y=train_set$Salary),
colour='red') +
geom_line(aes(x=train_set$YearsExperience, y=predict(regressor, newdata = train_set)),
colour="blue") +
ggtitle('Salary vs Experience (TRAINING Set)') +
xlab('Years of Experience') +
ylab("Salary")
# SET DE TESTE (TEST_SET)
ggplot() +
geom_point(aes(x=test_set$YearsExperience, y=test_set$Salary),
colour='red') +
geom_line(aes(x=train_set$YearsExperience, y=predict(regressor, newdata = train_set)),
colour="blue") +
ggtitle('Salary vs Experience (TEST Set)') +
xlab('Years of Experience') +
ylab("Salary")
| /linear-regression-simple.R | no_license | MathAugusto/R-estudos-machine-learning | R | false | false | 1,503 | r |
# Import the dataframe
df<- read.csv("Salary_simple_linear_regression.csv")
head(df)
# Splitting the data in training and test
# Install the "caTools" library
#install.packages("caTools")
library(caTools)
set.seed(123)
# adiciona somente o Y e escolhe a porcentagem de TREINO
split = sample.split(df$Salary, SplitRatio = 2/3)
# Criando set de treino e teste
train_set = subset(df, split == TRUE)
test_set = subset(df, split == FALSE)
# Regressao linear simples NAO PRECISA de Feature Scaling
# Fit da regressao linear no Train Set
regressor = lm(formula = Salary ~ YearsExperience,
data=train_set)
# Dados sobre o regressor
summary(regressor)
#Predict dos resultados do set de TESTE:
y_pred = predict(regressor, newdata = test_set)
# Visualization: com GGPLOT2
# SET DE TREINO (TRAIN_SET)
library(ggplot2)
ggplot() +
geom_point(aes(x=train_set$YearsExperience, y=train_set$Salary),
colour='red') +
geom_line(aes(x=train_set$YearsExperience, y=predict(regressor, newdata = train_set)),
colour="blue") +
ggtitle('Salary vs Experience (TRAINING Set)') +
xlab('Years of Experience') +
ylab("Salary")
# SET DE TESTE (TEST_SET)
ggplot() +
geom_point(aes(x=test_set$YearsExperience, y=test_set$Salary),
colour='red') +
geom_line(aes(x=train_set$YearsExperience, y=predict(regressor, newdata = train_set)),
colour="blue") +
ggtitle('Salary vs Experience (TEST Set)') +
xlab('Years of Experience') +
ylab("Salary")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.